From ff5facd75e2f5a1124aef6cd5007f3baf36a18cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 12 Nov 2011 01:26:44 +0000 Subject: [PATCH 1/7] I can has 0.6.x branch git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1201142 13f79535-47bb-0310-9956-ffa450edef68 From 3bd6015cdb15b290f0faae1f2056f4d826bd0d3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 12 Nov 2011 01:29:59 +0000 Subject: [PATCH 2/7] I can has 0.6.x branch git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1201143 13f79535-47bb-0310-9956-ffa450edef68 --- trunk/.coveragerc | 31 + trunk/.gitignore | 11 + trunk/.ratignore | 18 + trunk/CHANGES | 484 +++++ trunk/HACKING | 74 + trunk/LICENSE | 202 ++ trunk/MANIFEST.in | 24 + trunk/NOTICE | 8 + trunk/README | 58 + trunk/RELEASING | 64 + trunk/demos/compute_demo.py | 114 ++ trunk/demos/secrets.py-dist | 36 + trunk/dist/hash-sign.sh | 147 ++ trunk/dist/release.sh | 39 + trunk/doap_libcloud.rdf | 107 + trunk/example_compute.py | 36 + trunk/example_dns.py | 29 + trunk/example_loadbalancer.py | 71 + trunk/example_storage.py | 29 + trunk/libcloud/__init__.py | 64 + trunk/libcloud/base.py | 41 + trunk/libcloud/common/__init__.py | 0 trunk/libcloud/common/aws.py | 18 + trunk/libcloud/common/base.py | 731 +++++++ trunk/libcloud/common/cloudstack.py | 115 ++ trunk/libcloud/common/gandi.py | 217 +++ trunk/libcloud/common/gogrid.py | 168 ++ trunk/libcloud/common/linode.py | 160 ++ trunk/libcloud/common/openstack.py | 296 +++ trunk/libcloud/common/rackspace.py | 26 + trunk/libcloud/common/types.py | 114 ++ trunk/libcloud/compute/__init__.py | 3 + trunk/libcloud/compute/base.py | 691 +++++++ trunk/libcloud/compute/deployment.py | 145 ++ trunk/libcloud/compute/drivers/__init__.py | 40 + trunk/libcloud/compute/drivers/bluebox.py | 223 +++ trunk/libcloud/compute/drivers/brightbox.py | 222 +++ trunk/libcloud/compute/drivers/cloudsigma.py | 553 ++++++ trunk/libcloud/compute/drivers/cloudstack.py | 282 +++ trunk/libcloud/compute/drivers/dreamhost.py | 244 +++ trunk/libcloud/compute/drivers/dummy.py | 306 +++ trunk/libcloud/compute/drivers/ec2.py | 1165 +++++++++++ trunk/libcloud/compute/drivers/ecp.py | 360 ++++ .../libcloud/compute/drivers/elastichosts.py | 156 ++ .../libcloud/compute/drivers/elasticstack.py | 452 +++++ trunk/libcloud/compute/drivers/gandi.py | 398 ++++ trunk/libcloud/compute/drivers/gogrid.py | 401 ++++ trunk/libcloud/compute/drivers/ibm_sbc.py | 184 ++ trunk/libcloud/compute/drivers/linode.py | 486 +++++ trunk/libcloud/compute/drivers/ninefold.py | 27 + trunk/libcloud/compute/drivers/opennebula.py | 429 ++++ trunk/libcloud/compute/drivers/openstack.py | 1062 ++++++++++ trunk/libcloud/compute/drivers/opsource.py | 567 ++++++ trunk/libcloud/compute/drivers/rackspace.py | 66 + trunk/libcloud/compute/drivers/rimuhosting.py | 310 +++ trunk/libcloud/compute/drivers/serverlove.py | 82 + trunk/libcloud/compute/drivers/skalicloud.py | 82 + trunk/libcloud/compute/drivers/slicehost.py | 232 +++ trunk/libcloud/compute/drivers/softlayer.py | 442 +++++ trunk/libcloud/compute/drivers/vcloud.py | 613 ++++++ trunk/libcloud/compute/drivers/voxel.py | 307 +++ trunk/libcloud/compute/drivers/vpsnet.py | 183 ++ trunk/libcloud/compute/providers.py | 101 + trunk/libcloud/compute/ssh.py | 200 ++ trunk/libcloud/compute/types.py | 140 ++ trunk/libcloud/data/pricing.json | 170 ++ trunk/libcloud/deployment.py | 31 + trunk/libcloud/dns/__init__.py | 0 trunk/libcloud/dns/base.py | 301 +++ trunk/libcloud/dns/drivers/__init__.py | 0 trunk/libcloud/dns/drivers/dummy.py | 181 ++ trunk/libcloud/dns/drivers/linode.py | 270 +++ trunk/libcloud/dns/drivers/rackspace.py | 351 ++++ trunk/libcloud/dns/drivers/zerigo.py | 451 +++++ trunk/libcloud/dns/providers.py | 34 + trunk/libcloud/dns/types.py | 102 + trunk/libcloud/drivers/__init__.py | 38 + trunk/libcloud/drivers/brightbox.py | 19 + trunk/libcloud/drivers/cloudsigma.py | 21 + trunk/libcloud/drivers/dreamhost.py | 19 + trunk/libcloud/drivers/dummy.py | 19 + trunk/libcloud/drivers/ec2.py | 19 + trunk/libcloud/drivers/ecp.py | 19 + trunk/libcloud/drivers/elastichosts.py | 19 + trunk/libcloud/drivers/gogrid.py | 19 + trunk/libcloud/drivers/ibm_sbc.py | 19 + trunk/libcloud/drivers/linode.py | 19 + trunk/libcloud/drivers/opennebula.py | 22 + trunk/libcloud/drivers/rackspace.py | 19 + trunk/libcloud/drivers/rimuhosting.py | 19 + trunk/libcloud/drivers/slicehost.py | 19 + trunk/libcloud/drivers/softlayer.py | 19 + trunk/libcloud/drivers/vcloud.py | 19 + trunk/libcloud/drivers/voxel.py | 19 + trunk/libcloud/drivers/vpsnet.py | 19 + trunk/libcloud/httplib_ssl.py | 161 ++ trunk/libcloud/loadbalancer/__init__.py | 26 + trunk/libcloud/loadbalancer/base.py | 227 +++ .../libcloud/loadbalancer/drivers/__init__.py | 19 + .../loadbalancer/drivers/cloudstack.py | 123 ++ trunk/libcloud/loadbalancer/drivers/gogrid.py | 217 +++ .../libcloud/loadbalancer/drivers/ninefold.py | 27 + .../loadbalancer/drivers/rackspace.py | 186 ++ trunk/libcloud/loadbalancer/providers.py | 37 + trunk/libcloud/loadbalancer/types.py | 51 + trunk/libcloud/pricing.py | 139 ++ trunk/libcloud/providers.py | 27 + trunk/libcloud/security.py | 59 + trunk/libcloud/ssh.py | 30 + trunk/libcloud/storage/__init__.py | 3 + trunk/libcloud/storage/base.py | 729 +++++++ trunk/libcloud/storage/drivers/__init__.py | 23 + trunk/libcloud/storage/drivers/atmos.py | 414 ++++ trunk/libcloud/storage/drivers/cloudfiles.py | 522 +++++ trunk/libcloud/storage/drivers/dummy.py | 410 ++++ .../storage/drivers/google_storage.py | 135 ++ trunk/libcloud/storage/drivers/ninefold.py | 24 + trunk/libcloud/storage/drivers/s3.py | 512 +++++ trunk/libcloud/storage/providers.py | 45 + trunk/libcloud/storage/types.py | 95 + trunk/libcloud/types.py | 28 + trunk/libcloud/utils.py | 318 +++ trunk/setup.py | 234 +++ trunk/test/__init__.py | 288 +++ trunk/test/common/__init__.py | 14 + trunk/test/common/test_cloudstack.py | 187 ++ trunk/test/compute/__init__.py | 90 + .../bluebox/api_block_products_json.json | 1 + .../bluebox/api_block_templates_json.json | 1 + ...878c_6e5c_4945_a635_d94da9fd3146_json.json | 1 + ...5c_4945_a635_d94da9fd3146_json_delete.json | 1 + ...5c_4945_a635_d94da9fd3146_reboot_json.json | 1 + .../fixtures/bluebox/api_blocks_json.json | 1 + .../bluebox/api_blocks_json_post.json | 1 + .../fixtures/brightbox/create_server.json | 62 + .../fixtures/brightbox/list_images.json | 21 + .../fixtures/brightbox/list_server_types.json | 8 + .../fixtures/brightbox/list_servers.json | 62 + .../fixtures/brightbox/list_zones.json | 3 + .../compute/fixtures/brightbox/token.json | 1 + .../fixtures/cloudsigma/drives_clone.txt | 19 + .../fixtures/cloudsigma/drives_info.txt | 39 + .../cloudsigma/drives_single_info.txt | 19 + .../cloudsigma/drives_standard_info.txt | 1735 +++++++++++++++++ .../cloudsigma/resources_ip_create.txt | 13 + .../fixtures/cloudsigma/resources_ip_list.txt | 3 + .../fixtures/cloudsigma/servers_create.txt | 26 + .../fixtures/cloudsigma/servers_info.txt | 26 + .../fixtures/cloudsigma/servers_set.txt | 26 + .../deployVirtualMachine_default.json | 1 + .../deployVirtualMachine_deployfail.json | 1 + .../deployVirtualMachine_deployfail2.json | 1 + .../destroyVirtualMachine_default.json | 1 + .../cloudstack/listNetworks_default.json | 1 + .../cloudstack/listNetworks_deployfail.json | 1 + .../cloudstack/listNetworks_deployfail2.json | 1 + .../listPublicIpAddresses_default.json | 1 + .../listServiceOfferings_default.json | 1 + .../cloudstack/listTemplates_default.json | 1 + .../listVirtualMachines_default.json | 1 + .../cloudstack/listZones_default.json | 1 + .../cloudstack/listZones_deployfail.json | 1 + .../cloudstack/listZones_deployfail2.json | 1 + .../cloudstack/queryAsyncJobResult_17164.json | 1 + .../cloudstack/queryAsyncJobResult_17165.json | 1 + .../cloudstack/queryAsyncJobResult_17166.json | 1 + .../cloudstack/queryAsyncJobResult_17177.json | 1 + .../rebootVirtualMachine_default.json | 1 + .../test/compute/fixtures/ec2/create_tags.xml | 4 + .../test/compute/fixtures/ec2/delete_tags.xml | 4 + .../fixtures/ec2/describe_addresses.xml | 9 + .../fixtures/ec2/describe_addresses_multi.xml | 17 + .../ec2/describe_addresses_single.xml | 9 + .../ec2/describe_availability_zones.xml | 17 + .../compute/fixtures/ec2/describe_images.xml | 16 + .../fixtures/ec2/describe_instances.xml | 72 + .../ec2/describe_instances_with_tags.xml | 53 + .../compute/fixtures/ec2/describe_tags.xml | 23 + .../ec2/modify_instance_attribute.xml | 4 + .../compute/fixtures/ec2/reboot_instances.xml | 4 + .../compute/fixtures/ec2/run_instances.xml | 31 + .../fixtures/ec2/run_instances_idem.xml | 32 + .../ec2/run_instances_idem_mismatch.xml | 12 + .../fixtures/ec2/terminate_instances.xml | 16 + .../compute/fixtures/ecp/htemplate_list.json | 9 + .../compute/fixtures/ecp/network_list.json | 1 + .../compute/fixtures/ecp/ptemplate_list.json | 6 + .../fixtures/ecp/vm_1_action_delete.json | 1 + .../fixtures/ecp/vm_1_action_start.json | 3 + .../fixtures/ecp/vm_1_action_stop.json | 3 + trunk/test/compute/fixtures/ecp/vm_1_get.json | 3 + trunk/test/compute/fixtures/ecp/vm_list.json | 10 + trunk/test/compute/fixtures/ecp/vm_put.json | 1 + .../fixtures/elastichosts/drives_create.json | 12 + .../fixtures/elastichosts/drives_info.json | 12 + .../fixtures/elastichosts/servers_create.json | 25 + .../fixtures/elastichosts/servers_info.json | 27 + .../compute/fixtures/gandi/account_info.xml | 317 +++ .../fixtures/gandi/datacenter_list.xml | 53 + .../compute/fixtures/gandi/disk_attach.xml | 53 + .../fixtures/gandi/disk_create_from.xml | 53 + .../compute/fixtures/gandi/disk_detach.xml | 53 + .../test/compute/fixtures/gandi/disk_list.xml | 200 ++ .../compute/fixtures/gandi/disk_update.xml | 53 + .../compute/fixtures/gandi/iface_attach.xml | 53 + .../compute/fixtures/gandi/iface_detach.xml | 53 + .../compute/fixtures/gandi/iface_list.xml | 99 + .../compute/fixtures/gandi/image_list_dc0.xml | 493 +++++ trunk/test/compute/fixtures/gandi/ip_list.xml | 261 +++ .../compute/fixtures/gandi/operation_info.xml | 45 + .../compute/fixtures/gandi/vm_create_from.xml | 147 ++ .../test/compute/fixtures/gandi/vm_delete.xml | 53 + trunk/test/compute/fixtures/gandi/vm_info.xml | 330 ++++ trunk/test/compute/fixtures/gandi/vm_list.xml | 141 ++ .../test/compute/fixtures/gandi/vm_reboot.xml | 53 + trunk/test/compute/fixtures/gandi/vm_stop.xml | 53 + .../compute/fixtures/gogrid/image_list.json | 180 ++ .../compute/fixtures/gogrid/image_save.json | 62 + .../test/compute/fixtures/gogrid/ip_list.json | 69 + .../fixtures/gogrid/ip_list_empty.json | 12 + .../gogrid/lookup_list_ip_datacenter.json | 24 + .../fixtures/gogrid/password_list.json | 102 + .../compute/fixtures/gogrid/server_add.json | 96 + .../fixtures/gogrid/server_delete.json | 97 + .../compute/fixtures/gogrid/server_edit.json | 97 + .../compute/fixtures/gogrid/server_list.json | 98 + .../compute/fixtures/gogrid/server_power.json | 97 + .../fixtures/gogrid/server_power_fail.json | 97 + .../test/compute/fixtures/ibm_sbc/create.xml | 1 + .../test/compute/fixtures/ibm_sbc/delete.xml | 1 + .../test/compute/fixtures/ibm_sbc/images.xml | 2 + .../compute/fixtures/ibm_sbc/instances.xml | 1 + .../fixtures/ibm_sbc/instances_deleted.xml | 1 + .../compute/fixtures/ibm_sbc/locations.xml | 1 + .../fixtures/ibm_sbc/reboot_active.xml | 1 + trunk/test/compute/fixtures/ibm_sbc/sizes.xml | 1 + .../test/compute/fixtures/meta/helloworld.txt | 1 + .../compute/fixtures/opennebula/compute.xml | 15 + .../compute/fixtures/opennebula/computes.xml | 5 + .../test/compute/fixtures/opennebula/disk.xml | 7 + .../compute/fixtures/opennebula/storage.xml | 5 + .../fixtures/openstack/_v1_1__auth.json | 1 + .../openstack/_v1_1__auth_mssing_token.json | 1 + .../openstack/_v1_1__auth_unauthorized.json | 1 + .../fixtures/openstack/_v2_0__auth.json | 1 + .../openstack/v1_slug_flavors_detail.xml | 10 + .../openstack/v1_slug_images_detail.xml | 15 + .../openstack/v1_slug_images_post.xml | 3 + .../fixtures/openstack/v1_slug_limits.xml | 15 + .../fixtures/openstack/v1_slug_servers.xml | 12 + .../openstack/v1_slug_servers_detail.xml | 14 + ...slug_servers_detail_deployment_missing.xml | 14 + ...slug_servers_detail_deployment_pending.xml | 14 + ...ug_servers_detail_deployment_same_uuid.xml | 26 + ...slug_servers_detail_deployment_success.xml | 14 + .../v1_slug_servers_detail_empty.xml | 2 + .../v1_slug_servers_detail_metadata.xml | 16 + .../openstack/v1_slug_servers_ips.xml | 10 + .../openstack/v1_slug_servers_metadata.xml | 15 + .../openstack/v1_slug_shared_ip_group.xml | 6 + .../openstack/v1_slug_shared_ip_groups.xml | 5 + .../v1_slug_shared_ip_groups_detail.xml | 16 + .../compute/fixtures/openstack_v1.1/README | 7 + .../fixtures/openstack_v1.1/_flavors_7.json | 1 + .../openstack_v1.1/_flavors_detail.json | 1 + .../fixtures/openstack_v1.1/_images_13.json | 1 + .../openstack_v1.1/_images_detail.json | 1 + .../_os_quota_sets_aTenantId.json | 1 + .../fixtures/openstack_v1.1/_servers.json | 78 + .../_servers_12063_metadata_two_keys.json | 6 + .../openstack_v1.1/_servers_12064.json | 1 + .../_servers_12064_updated_name_bob.json | 1 + .../openstack_v1.1/_servers_detail.json | 1 + .../openstack_v1.1/_servers_detail_EMPTY.json | 3 + ...abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml | 6 + ...2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml | 12 + ..._9cbc_8dabe5a7d0e4_networkWithLocation.xml | 11 + ...d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml | 7 + ...abe5a7d0e4_server_11_delete_INPROGRESS.xml | 7 + ...a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml | 7 + ...e5a7d0e4_server_11_poweroff_INPROGRESS.xml | 7 + ...8a_9cbc_8dabe5a7d0e4_server_11_restart.xml | 6 + ...be5a7d0e4_server_11_restart_INPROGRESS.xml | 7 + ...a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml | 6 + ...e5a7d0e4_server_11_shutdown_INPROGRESS.xml | 7 + ...4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml | 7 + ...dabe5a7d0e4_server_11_start_INPROGRESS.xml | 7 + ...4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml | 45 + ...9cbc_8dabe5a7d0e4_server_pendingDeploy.xml | 26 + .../fixtures/opsource/oec_0_9_base_image.xml | 339 ++++ .../fixtures/opsource/oec_0_9_myaccount.xml | 26 + .../fixtures/rimuhosting/r_distributions.json | 22 + .../fixtures/rimuhosting/r_orders.json | 27 + .../rimuhosting/r_orders_new_vps.json | 62 + ...rs_order_88833465_api_ivan_net_nz_vps.json | 13 + ...465_api_ivan_net_nz_vps_running_state.json | 40 + .../fixtures/rimuhosting/r_pricing_plans.json | 26 + .../compute/fixtures/slicehost/flavors.xml | 45 + .../compute/fixtures/slicehost/images.xml | 47 + .../fixtures/slicehost/slices_1_reboot.xml | 15 + .../slicehost/slices_1_reboot_forbidden.xml | 3 + .../fixtures/slicehost/slices_errors.xml | 4 + .../compute/fixtures/slicehost/slices_get.xml | 17 + .../fixtures/slicehost/slices_post.xml | 16 + .../v3_SoftLayer_Account_getVirtualGuests.xml | 1066 ++++++++++ ...yer_Location_Datacenter_getDatacenters.xml | 99 + .../terremark/api_v0_8_catalogItem_5.xml | 6 + .../fixtures/terremark/api_v0_8_login.xml | 3 + .../fixtures/terremark/api_v0_8_org_240.xml | 5 + .../terremark/api_v0_8_task_10496.xml | 4 + .../terremark/api_v0_8_task_11001.xml | 4 + .../terremark/api_v0_8_vapp_14031.xml | 132 ++ .../api_v0_8_vapp_14031_action_deploy.xml | 4 + .../terremark/api_v0_8_vapp_14031_get.xml | 132 ++ ...i_v0_8_vapp_14031_power_action_powerOn.xml | 4 + ..._v0_8_vapp_14031_power_action_poweroff.xml | 4 + ...api_v0_8_vapp_14031_power_action_reset.xml | 4 + .../fixtures/terremark/api_v0_8_vdc_224.xml | 12 + ...vdc_224_action_instantiateVAppTemplate.xml | 3 + .../terremark/api_v0_8_vdc_224_catalog.xml | 5 + .../compute/fixtures/voxel/create_node.xml | 7 + trunk/test/compute/fixtures/voxel/failure.xml | 1 + trunk/test/compute/fixtures/voxel/images.xml | 19 + .../test/compute/fixtures/voxel/locations.xml | 10 + trunk/test/compute/fixtures/voxel/nodes.xml | 38 + trunk/test/compute/fixtures/voxel/success.xml | 1 + .../compute/fixtures/voxel/unauthorized.xml | 11 + .../compute/test_backward_compatibility.py | 54 + trunk/test/compute/test_base.py | 53 + trunk/test/compute/test_bluebox.py | 112 ++ trunk/test/compute/test_brightbox.py | 132 ++ trunk/test/compute/test_cloudsigma.py | 204 ++ trunk/test/compute/test_cloudstack.py | 94 + trunk/test/compute/test_deployment.py | 362 ++++ trunk/test/compute/test_dreamhost.py | 279 +++ trunk/test/compute/test_ec2.py | 445 +++++ trunk/test/compute/test_ecp.py | 128 ++ trunk/test/compute/test_elasticstack.py | 243 +++ trunk/test/compute/test_gandi.py | 233 +++ trunk/test/compute/test_gogrid.py | 280 +++ trunk/test/compute/test_ibm_sbc.py | 206 ++ trunk/test/compute/test_linode.py | 148 ++ trunk/test/compute/test_opennebula.py | 148 ++ trunk/test/compute/test_openstack.py | 785 ++++++++ trunk/test/compute/test_opsource.py | 222 +++ trunk/test/compute/test_rackspace.py | 39 + trunk/test/compute/test_rimuhosting.py | 107 + trunk/test/compute/test_slicehost.py | 155 ++ trunk/test/compute/test_softlayer.py | 91 + trunk/test/compute/test_ssh_client.py | 41 + trunk/test/compute/test_vcloud.py | 142 ++ trunk/test/compute/test_voxel.py | 161 ++ trunk/test/compute/test_vpsnet.py | 209 ++ trunk/test/dns/__init__.py | 0 .../dns/fixtures/linode/create_domain.json | 7 + .../create_domain_validation_error.json | 1 + .../dns/fixtures/linode/create_resource.json | 7 + .../dns/fixtures/linode/delete_domain.json | 7 + .../linode/delete_domain_does_not_exist.json | 1 + .../dns/fixtures/linode/delete_resource.json | 7 + .../delete_resource_does_not_exist.json | 1 + .../test/dns/fixtures/linode/domain_list.json | 32 + .../test/dns/fixtures/linode/get_record.json | 18 + .../linode/get_record_does_not_exist.json | 5 + trunk/test/dns/fixtures/linode/get_zone.json | 19 + .../linode/get_zone_does_not_exist.json | 5 + .../dns/fixtures/linode/resource_list.json | 30 + .../linode/resource_list_does_not_exist.json | 1 + .../dns/fixtures/linode/update_domain.json | 7 + .../dns/fixtures/linode/update_resource.json | 7 + .../test/dns/fixtures/rackspace/auth_1_1.json | 31 + .../rackspace/create_record_success.json | 21 + .../rackspace/create_zone_success.json | 29 + .../create_zone_validation_error.json | 1 + .../rackspace/delete_record_success.json | 8 + .../rackspace/delete_zone_success.json | 7 + .../fixtures/rackspace/does_not_exist.json | 1 + .../rackspace/get_record_success.json | 10 + .../fixtures/rackspace/get_zone_success.json | 51 + .../rackspace/list_records_no_results.json | 22 + .../rackspace/list_records_success.json | 51 + .../rackspace/list_zones_no_results.json | 5 + .../rackspace/list_zones_success.json | 53 + .../dns/fixtures/rackspace/unauthorized.json | 1 + .../rackspace/update_record_success.json | 8 + .../rackspace/update_zone_success.json | 8 + .../dns/fixtures/zerigo/create_record.xml | 13 + .../test/dns/fixtures/zerigo/create_zone.xml | 18 + .../zerigo/create_zone_validation_error.xml | 4 + trunk/test/dns/fixtures/zerigo/get_record.xml | 13 + trunk/test/dns/fixtures/zerigo/get_zone.xml | 32 + .../test/dns/fixtures/zerigo/list_records.xml | 15 + .../zerigo/list_records_no_results.xml | 1 + trunk/test/dns/fixtures/zerigo/list_zones.xml | 17 + .../fixtures/zerigo/list_zones_no_results.xml | 1 + trunk/test/dns/test_linode.py | 324 +++ trunk/test/dns/test_rackspace.py | 413 ++++ trunk/test/dns/test_zerigo.py | 357 ++++ trunk/test/file_fixtures.py | 67 + trunk/test/loadbalancer/__init__.py | 14 + .../assignToLoadBalancerRule_default.json | 1 + .../associateIpAddress_default.json | 1 + .../createLoadBalancerRule_default.json | 1 + .../deleteLoadBalancerRule_default.json | 1 + .../disassociateIpAddress_default.json | 1 + ...listLoadBalancerRuleInstances_default.json | 1 + .../listLoadBalancerRules_default.json | 1 + .../cloudstack/listZones_default.json | 1 + .../cloudstack/queryAsyncJobResult_17340.json | 1 + .../cloudstack/queryAsyncJobResult_17341.json | 1 + .../cloudstack/queryAsyncJobResult_17342.json | 1 + .../cloudstack/queryAsyncJobResult_17344.json | 1 + .../cloudstack/queryAsyncJobResult_17346.json | 1 + .../removeFromLoadBalancerRule_default.json | 1 + .../loadbalancer/fixtures/gogrid/ip_list.json | 316 +++ .../fixtures/gogrid/loadbalancer_add.json | 141 ++ .../fixtures/gogrid/loadbalancer_edit.json | 164 ++ .../fixtures/gogrid/loadbalancer_get.json | 141 ++ .../fixtures/gogrid/loadbalancer_list.json | 224 +++ .../fixtures/gogrid/unexpected_error.json | 1 + .../rackspace/v1_slug_loadbalancers.json | 48 + .../rackspace/v1_slug_loadbalancers_8290.json | 46 + .../v1_slug_loadbalancers_8290_nodes.json | 18 + ...v1_slug_loadbalancers_8290_nodes_post.json | 12 + .../rackspace/v1_slug_loadbalancers_post.json | 48 + .../v1_slug_loadbalancers_protocols.json | 43 + trunk/test/loadbalancer/test_cloudstack.py | 101 + trunk/test/loadbalancer/test_gogrid.py | 183 ++ trunk/test/loadbalancer/test_rackspace.py | 176 ++ trunk/test/pricing_test.json | 10 + trunk/test/secrets.py-dist | 45 + trunk/test/storage/__init__.py | 0 .../storage/fixtures/atmos/already_exists.xml | 5 + .../atmos/empty_directory_listing.xml | 4 + .../fixtures/atmos/list_containers.xml | 45 + .../test/storage/fixtures/atmos/not_empty.xml | 5 + .../test/storage/fixtures/atmos/not_found.xml | 5 + .../cloudfiles/list_container_objects.json | 14 + .../list_container_objects_empty.json | 1 + ...list_container_objects_not_exhausted1.json | 11 + ...list_container_objects_not_exhausted2.json | 8 + .../fixtures/cloudfiles/list_containers.json | 5 + .../cloudfiles/list_containers_empty.json | 1 + .../fixtures/cloudfiles/meta_data.json | 1 + .../google_storage/list_container_objects.xml | 18 + .../list_container_objects_empty.xml | 8 + .../list_container_objects_not_exhausted1.xml | 38 + .../list_container_objects_not_exhausted2.xml | 28 + .../google_storage/list_containers.xml | 16 + .../google_storage/list_containers_empty.xml | 9 + .../fixtures/s3/list_container_objects.xml | 18 + .../s3/list_container_objects_empty.xml | 8 + .../list_container_objects_not_exhausted1.xml | 38 + .../list_container_objects_not_exhausted2.xml | 28 + .../storage/fixtures/s3/list_containers.xml | 16 + .../fixtures/s3/list_containers_empty.xml | 9 + trunk/test/storage/test_atmos.py | 570 ++++++ trunk/test/storage/test_base.py | 141 ++ trunk/test/storage/test_cloudfiles.py | 700 +++++++ trunk/test/storage/test_google_storage.py | 43 + trunk/test/storage/test_s3.py | 652 +++++++ trunk/test/test_file_fixtures.py | 31 + trunk/test/test_httplib_ssl.py | 140 ++ trunk/test/test_pricing.py | 106 + trunk/test/test_response_classes.py | 92 + trunk/test/test_types.py | 112 ++ trunk/test/test_utils.py | 162 ++ trunk/tox.ini | 11 + 468 files changed, 42934 insertions(+) create mode 100644 trunk/.coveragerc create mode 100644 trunk/.gitignore create mode 100644 trunk/.ratignore create mode 100644 trunk/CHANGES create mode 100644 trunk/HACKING create mode 100644 trunk/LICENSE create mode 100644 trunk/MANIFEST.in create mode 100644 trunk/NOTICE create mode 100644 trunk/README create mode 100644 trunk/RELEASING create mode 100644 trunk/demos/compute_demo.py create mode 100644 trunk/demos/secrets.py-dist create mode 100755 trunk/dist/hash-sign.sh create mode 100755 trunk/dist/release.sh create mode 100644 trunk/doap_libcloud.rdf create mode 100644 trunk/example_compute.py create mode 100644 trunk/example_dns.py create mode 100644 trunk/example_loadbalancer.py create mode 100644 trunk/example_storage.py create mode 100644 trunk/libcloud/__init__.py create mode 100644 trunk/libcloud/base.py create mode 100644 trunk/libcloud/common/__init__.py create mode 100644 trunk/libcloud/common/aws.py create mode 100644 trunk/libcloud/common/base.py create mode 100644 trunk/libcloud/common/cloudstack.py create mode 100644 trunk/libcloud/common/gandi.py create mode 100644 trunk/libcloud/common/gogrid.py create mode 100644 trunk/libcloud/common/linode.py create mode 100644 trunk/libcloud/common/openstack.py create mode 100644 trunk/libcloud/common/rackspace.py create mode 100644 trunk/libcloud/common/types.py create mode 100644 trunk/libcloud/compute/__init__.py create mode 100644 trunk/libcloud/compute/base.py create mode 100644 trunk/libcloud/compute/deployment.py create mode 100644 trunk/libcloud/compute/drivers/__init__.py create mode 100644 trunk/libcloud/compute/drivers/bluebox.py create mode 100644 trunk/libcloud/compute/drivers/brightbox.py create mode 100644 trunk/libcloud/compute/drivers/cloudsigma.py create mode 100644 trunk/libcloud/compute/drivers/cloudstack.py create mode 100644 trunk/libcloud/compute/drivers/dreamhost.py create mode 100644 trunk/libcloud/compute/drivers/dummy.py create mode 100644 trunk/libcloud/compute/drivers/ec2.py create mode 100644 trunk/libcloud/compute/drivers/ecp.py create mode 100644 trunk/libcloud/compute/drivers/elastichosts.py create mode 100644 trunk/libcloud/compute/drivers/elasticstack.py create mode 100644 trunk/libcloud/compute/drivers/gandi.py create mode 100644 trunk/libcloud/compute/drivers/gogrid.py create mode 100644 trunk/libcloud/compute/drivers/ibm_sbc.py create mode 100644 trunk/libcloud/compute/drivers/linode.py create mode 100644 trunk/libcloud/compute/drivers/ninefold.py create mode 100644 trunk/libcloud/compute/drivers/opennebula.py create mode 100644 trunk/libcloud/compute/drivers/openstack.py create mode 100644 trunk/libcloud/compute/drivers/opsource.py create mode 100644 trunk/libcloud/compute/drivers/rackspace.py create mode 100644 trunk/libcloud/compute/drivers/rimuhosting.py create mode 100644 trunk/libcloud/compute/drivers/serverlove.py create mode 100644 trunk/libcloud/compute/drivers/skalicloud.py create mode 100644 trunk/libcloud/compute/drivers/slicehost.py create mode 100644 trunk/libcloud/compute/drivers/softlayer.py create mode 100644 trunk/libcloud/compute/drivers/vcloud.py create mode 100644 trunk/libcloud/compute/drivers/voxel.py create mode 100644 trunk/libcloud/compute/drivers/vpsnet.py create mode 100644 trunk/libcloud/compute/providers.py create mode 100644 trunk/libcloud/compute/ssh.py create mode 100644 trunk/libcloud/compute/types.py create mode 100644 trunk/libcloud/data/pricing.json create mode 100644 trunk/libcloud/deployment.py create mode 100644 trunk/libcloud/dns/__init__.py create mode 100644 trunk/libcloud/dns/base.py create mode 100644 trunk/libcloud/dns/drivers/__init__.py create mode 100644 trunk/libcloud/dns/drivers/dummy.py create mode 100644 trunk/libcloud/dns/drivers/linode.py create mode 100644 trunk/libcloud/dns/drivers/rackspace.py create mode 100644 trunk/libcloud/dns/drivers/zerigo.py create mode 100644 trunk/libcloud/dns/providers.py create mode 100644 trunk/libcloud/dns/types.py create mode 100644 trunk/libcloud/drivers/__init__.py create mode 100644 trunk/libcloud/drivers/brightbox.py create mode 100644 trunk/libcloud/drivers/cloudsigma.py create mode 100644 trunk/libcloud/drivers/dreamhost.py create mode 100644 trunk/libcloud/drivers/dummy.py create mode 100644 trunk/libcloud/drivers/ec2.py create mode 100644 trunk/libcloud/drivers/ecp.py create mode 100644 trunk/libcloud/drivers/elastichosts.py create mode 100644 trunk/libcloud/drivers/gogrid.py create mode 100644 trunk/libcloud/drivers/ibm_sbc.py create mode 100644 trunk/libcloud/drivers/linode.py create mode 100644 trunk/libcloud/drivers/opennebula.py create mode 100644 trunk/libcloud/drivers/rackspace.py create mode 100644 trunk/libcloud/drivers/rimuhosting.py create mode 100644 trunk/libcloud/drivers/slicehost.py create mode 100644 trunk/libcloud/drivers/softlayer.py create mode 100644 trunk/libcloud/drivers/vcloud.py create mode 100644 trunk/libcloud/drivers/voxel.py create mode 100644 trunk/libcloud/drivers/vpsnet.py create mode 100644 trunk/libcloud/httplib_ssl.py create mode 100644 trunk/libcloud/loadbalancer/__init__.py create mode 100644 trunk/libcloud/loadbalancer/base.py create mode 100644 trunk/libcloud/loadbalancer/drivers/__init__.py create mode 100644 trunk/libcloud/loadbalancer/drivers/cloudstack.py create mode 100644 trunk/libcloud/loadbalancer/drivers/gogrid.py create mode 100644 trunk/libcloud/loadbalancer/drivers/ninefold.py create mode 100644 trunk/libcloud/loadbalancer/drivers/rackspace.py create mode 100644 trunk/libcloud/loadbalancer/providers.py create mode 100644 trunk/libcloud/loadbalancer/types.py create mode 100644 trunk/libcloud/pricing.py create mode 100644 trunk/libcloud/providers.py create mode 100644 trunk/libcloud/security.py create mode 100644 trunk/libcloud/ssh.py create mode 100644 trunk/libcloud/storage/__init__.py create mode 100644 trunk/libcloud/storage/base.py create mode 100644 trunk/libcloud/storage/drivers/__init__.py create mode 100644 trunk/libcloud/storage/drivers/atmos.py create mode 100644 trunk/libcloud/storage/drivers/cloudfiles.py create mode 100644 trunk/libcloud/storage/drivers/dummy.py create mode 100644 trunk/libcloud/storage/drivers/google_storage.py create mode 100644 trunk/libcloud/storage/drivers/ninefold.py create mode 100644 trunk/libcloud/storage/drivers/s3.py create mode 100644 trunk/libcloud/storage/providers.py create mode 100644 trunk/libcloud/storage/types.py create mode 100644 trunk/libcloud/types.py create mode 100644 trunk/libcloud/utils.py create mode 100644 trunk/setup.py create mode 100644 trunk/test/__init__.py create mode 100644 trunk/test/common/__init__.py create mode 100644 trunk/test/common/test_cloudstack.py create mode 100644 trunk/test/compute/__init__.py create mode 100644 trunk/test/compute/fixtures/bluebox/api_block_products_json.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_block_templates_json.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_blocks_json.json create mode 100644 trunk/test/compute/fixtures/bluebox/api_blocks_json_post.json create mode 100644 trunk/test/compute/fixtures/brightbox/create_server.json create mode 100644 trunk/test/compute/fixtures/brightbox/list_images.json create mode 100644 trunk/test/compute/fixtures/brightbox/list_server_types.json create mode 100644 trunk/test/compute/fixtures/brightbox/list_servers.json create mode 100644 trunk/test/compute/fixtures/brightbox/list_zones.json create mode 100644 trunk/test/compute/fixtures/brightbox/token.json create mode 100644 trunk/test/compute/fixtures/cloudsigma/drives_clone.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/drives_info.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/drives_single_info.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/drives_standard_info.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/resources_ip_create.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/resources_ip_list.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/servers_create.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/servers_info.txt create mode 100644 trunk/test/compute/fixtures/cloudsigma/servers_set.txt create mode 100644 trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json create mode 100644 trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json create mode 100644 trunk/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listNetworks_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listServiceOfferings_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listTemplates_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listVirtualMachines_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listZones_default.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listZones_deployfail.json create mode 100644 trunk/test/compute/fixtures/cloudstack/listZones_deployfail2.json create mode 100644 trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json create mode 100644 trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json create mode 100644 trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json create mode 100644 trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json create mode 100644 trunk/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json create mode 100644 trunk/test/compute/fixtures/ec2/create_tags.xml create mode 100644 trunk/test/compute/fixtures/ec2/delete_tags.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_addresses.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_addresses_multi.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_addresses_single.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_availability_zones.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_images.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_instances.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_instances_with_tags.xml create mode 100644 trunk/test/compute/fixtures/ec2/describe_tags.xml create mode 100644 trunk/test/compute/fixtures/ec2/modify_instance_attribute.xml create mode 100644 trunk/test/compute/fixtures/ec2/reboot_instances.xml create mode 100644 trunk/test/compute/fixtures/ec2/run_instances.xml create mode 100644 trunk/test/compute/fixtures/ec2/run_instances_idem.xml create mode 100644 trunk/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml create mode 100644 trunk/test/compute/fixtures/ec2/terminate_instances.xml create mode 100644 trunk/test/compute/fixtures/ecp/htemplate_list.json create mode 100644 trunk/test/compute/fixtures/ecp/network_list.json create mode 100644 trunk/test/compute/fixtures/ecp/ptemplate_list.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_1_action_delete.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_1_action_start.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_1_action_stop.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_1_get.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_list.json create mode 100644 trunk/test/compute/fixtures/ecp/vm_put.json create mode 100644 trunk/test/compute/fixtures/elastichosts/drives_create.json create mode 100644 trunk/test/compute/fixtures/elastichosts/drives_info.json create mode 100644 trunk/test/compute/fixtures/elastichosts/servers_create.json create mode 100644 trunk/test/compute/fixtures/elastichosts/servers_info.json create mode 100644 trunk/test/compute/fixtures/gandi/account_info.xml create mode 100644 trunk/test/compute/fixtures/gandi/datacenter_list.xml create mode 100644 trunk/test/compute/fixtures/gandi/disk_attach.xml create mode 100644 trunk/test/compute/fixtures/gandi/disk_create_from.xml create mode 100644 trunk/test/compute/fixtures/gandi/disk_detach.xml create mode 100644 trunk/test/compute/fixtures/gandi/disk_list.xml create mode 100644 trunk/test/compute/fixtures/gandi/disk_update.xml create mode 100644 trunk/test/compute/fixtures/gandi/iface_attach.xml create mode 100644 trunk/test/compute/fixtures/gandi/iface_detach.xml create mode 100644 trunk/test/compute/fixtures/gandi/iface_list.xml create mode 100644 trunk/test/compute/fixtures/gandi/image_list_dc0.xml create mode 100644 trunk/test/compute/fixtures/gandi/ip_list.xml create mode 100644 trunk/test/compute/fixtures/gandi/operation_info.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_create_from.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_delete.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_info.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_list.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_reboot.xml create mode 100644 trunk/test/compute/fixtures/gandi/vm_stop.xml create mode 100644 trunk/test/compute/fixtures/gogrid/image_list.json create mode 100644 trunk/test/compute/fixtures/gogrid/image_save.json create mode 100644 trunk/test/compute/fixtures/gogrid/ip_list.json create mode 100644 trunk/test/compute/fixtures/gogrid/ip_list_empty.json create mode 100644 trunk/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json create mode 100644 trunk/test/compute/fixtures/gogrid/password_list.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_add.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_delete.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_edit.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_list.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_power.json create mode 100644 trunk/test/compute/fixtures/gogrid/server_power_fail.json create mode 100644 trunk/test/compute/fixtures/ibm_sbc/create.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/delete.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/images.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/instances.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/instances_deleted.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/locations.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/reboot_active.xml create mode 100644 trunk/test/compute/fixtures/ibm_sbc/sizes.xml create mode 100644 trunk/test/compute/fixtures/meta/helloworld.txt create mode 100644 trunk/test/compute/fixtures/opennebula/compute.xml create mode 100644 trunk/test/compute/fixtures/opennebula/computes.xml create mode 100644 trunk/test/compute/fixtures/opennebula/disk.xml create mode 100644 trunk/test/compute/fixtures/opennebula/storage.xml create mode 100644 trunk/test/compute/fixtures/openstack/_v1_1__auth.json create mode 100644 trunk/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json create mode 100644 trunk/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json create mode 100644 trunk/test/compute/fixtures/openstack/_v2_0__auth.json create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_images_detail.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_images_post.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_limits.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_ips.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml create mode 100644 trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/README create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_flavors_7.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_flavors_detail.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_images_13.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_images_detail.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers_12064.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers_detail.json create mode 100644 trunk/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json create mode 100644 trunk/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_base_image.xml create mode 100644 trunk/test/compute/fixtures/opsource/oec_0_9_myaccount.xml create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_distributions.json create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_orders.json create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_orders_new_vps.json create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json create mode 100644 trunk/test/compute/fixtures/rimuhosting/r_pricing_plans.json create mode 100644 trunk/test/compute/fixtures/slicehost/flavors.xml create mode 100644 trunk/test/compute/fixtures/slicehost/images.xml create mode 100644 trunk/test/compute/fixtures/slicehost/slices_1_reboot.xml create mode 100644 trunk/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml create mode 100644 trunk/test/compute/fixtures/slicehost/slices_errors.xml create mode 100644 trunk/test/compute/fixtures/slicehost/slices_get.xml create mode 100644 trunk/test/compute/fixtures/slicehost/slices_post.xml create mode 100644 trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml create mode 100644 trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_login.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_org_240.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_task_10496.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_task_11001.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml create mode 100644 trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml create mode 100644 trunk/test/compute/fixtures/voxel/create_node.xml create mode 100644 trunk/test/compute/fixtures/voxel/failure.xml create mode 100644 trunk/test/compute/fixtures/voxel/images.xml create mode 100644 trunk/test/compute/fixtures/voxel/locations.xml create mode 100644 trunk/test/compute/fixtures/voxel/nodes.xml create mode 100644 trunk/test/compute/fixtures/voxel/success.xml create mode 100644 trunk/test/compute/fixtures/voxel/unauthorized.xml create mode 100644 trunk/test/compute/test_backward_compatibility.py create mode 100644 trunk/test/compute/test_base.py create mode 100644 trunk/test/compute/test_bluebox.py create mode 100644 trunk/test/compute/test_brightbox.py create mode 100644 trunk/test/compute/test_cloudsigma.py create mode 100644 trunk/test/compute/test_cloudstack.py create mode 100644 trunk/test/compute/test_deployment.py create mode 100644 trunk/test/compute/test_dreamhost.py create mode 100644 trunk/test/compute/test_ec2.py create mode 100644 trunk/test/compute/test_ecp.py create mode 100644 trunk/test/compute/test_elasticstack.py create mode 100644 trunk/test/compute/test_gandi.py create mode 100644 trunk/test/compute/test_gogrid.py create mode 100644 trunk/test/compute/test_ibm_sbc.py create mode 100644 trunk/test/compute/test_linode.py create mode 100644 trunk/test/compute/test_opennebula.py create mode 100644 trunk/test/compute/test_openstack.py create mode 100644 trunk/test/compute/test_opsource.py create mode 100644 trunk/test/compute/test_rackspace.py create mode 100644 trunk/test/compute/test_rimuhosting.py create mode 100644 trunk/test/compute/test_slicehost.py create mode 100644 trunk/test/compute/test_softlayer.py create mode 100644 trunk/test/compute/test_ssh_client.py create mode 100644 trunk/test/compute/test_vcloud.py create mode 100644 trunk/test/compute/test_voxel.py create mode 100644 trunk/test/compute/test_vpsnet.py create mode 100644 trunk/test/dns/__init__.py create mode 100644 trunk/test/dns/fixtures/linode/create_domain.json create mode 100644 trunk/test/dns/fixtures/linode/create_domain_validation_error.json create mode 100644 trunk/test/dns/fixtures/linode/create_resource.json create mode 100644 trunk/test/dns/fixtures/linode/delete_domain.json create mode 100644 trunk/test/dns/fixtures/linode/delete_domain_does_not_exist.json create mode 100644 trunk/test/dns/fixtures/linode/delete_resource.json create mode 100644 trunk/test/dns/fixtures/linode/delete_resource_does_not_exist.json create mode 100644 trunk/test/dns/fixtures/linode/domain_list.json create mode 100644 trunk/test/dns/fixtures/linode/get_record.json create mode 100644 trunk/test/dns/fixtures/linode/get_record_does_not_exist.json create mode 100644 trunk/test/dns/fixtures/linode/get_zone.json create mode 100644 trunk/test/dns/fixtures/linode/get_zone_does_not_exist.json create mode 100644 trunk/test/dns/fixtures/linode/resource_list.json create mode 100644 trunk/test/dns/fixtures/linode/resource_list_does_not_exist.json create mode 100644 trunk/test/dns/fixtures/linode/update_domain.json create mode 100644 trunk/test/dns/fixtures/linode/update_resource.json create mode 100644 trunk/test/dns/fixtures/rackspace/auth_1_1.json create mode 100644 trunk/test/dns/fixtures/rackspace/create_record_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/create_zone_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/create_zone_validation_error.json create mode 100644 trunk/test/dns/fixtures/rackspace/delete_record_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/delete_zone_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/does_not_exist.json create mode 100644 trunk/test/dns/fixtures/rackspace/get_record_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/get_zone_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/list_records_no_results.json create mode 100644 trunk/test/dns/fixtures/rackspace/list_records_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/list_zones_no_results.json create mode 100644 trunk/test/dns/fixtures/rackspace/list_zones_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/unauthorized.json create mode 100644 trunk/test/dns/fixtures/rackspace/update_record_success.json create mode 100644 trunk/test/dns/fixtures/rackspace/update_zone_success.json create mode 100644 trunk/test/dns/fixtures/zerigo/create_record.xml create mode 100644 trunk/test/dns/fixtures/zerigo/create_zone.xml create mode 100644 trunk/test/dns/fixtures/zerigo/create_zone_validation_error.xml create mode 100644 trunk/test/dns/fixtures/zerigo/get_record.xml create mode 100644 trunk/test/dns/fixtures/zerigo/get_zone.xml create mode 100644 trunk/test/dns/fixtures/zerigo/list_records.xml create mode 100644 trunk/test/dns/fixtures/zerigo/list_records_no_results.xml create mode 100644 trunk/test/dns/fixtures/zerigo/list_zones.xml create mode 100644 trunk/test/dns/fixtures/zerigo/list_zones_no_results.xml create mode 100644 trunk/test/dns/test_linode.py create mode 100644 trunk/test/dns/test_rackspace.py create mode 100644 trunk/test/dns/test_zerigo.py create mode 100644 trunk/test/file_fixtures.py create mode 100644 trunk/test/loadbalancer/__init__.py create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/listZones_default.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json create mode 100644 trunk/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/ip_list.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json create mode 100644 trunk/test/loadbalancer/fixtures/gogrid/unexpected_error.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json create mode 100644 trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json create mode 100644 trunk/test/loadbalancer/test_cloudstack.py create mode 100644 trunk/test/loadbalancer/test_gogrid.py create mode 100644 trunk/test/loadbalancer/test_rackspace.py create mode 100644 trunk/test/pricing_test.json create mode 100644 trunk/test/secrets.py-dist create mode 100644 trunk/test/storage/__init__.py create mode 100644 trunk/test/storage/fixtures/atmos/already_exists.xml create mode 100644 trunk/test/storage/fixtures/atmos/empty_directory_listing.xml create mode 100644 trunk/test/storage/fixtures/atmos/list_containers.xml create mode 100644 trunk/test/storage/fixtures/atmos/not_empty.xml create mode 100644 trunk/test/storage/fixtures/atmos/not_found.xml create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_container_objects.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_container_objects_empty.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_containers.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/list_containers_empty.json create mode 100644 trunk/test/storage/fixtures/cloudfiles/meta_data.json create mode 100644 trunk/test/storage/fixtures/google_storage/list_container_objects.xml create mode 100644 trunk/test/storage/fixtures/google_storage/list_container_objects_empty.xml create mode 100644 trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml create mode 100644 trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml create mode 100644 trunk/test/storage/fixtures/google_storage/list_containers.xml create mode 100644 trunk/test/storage/fixtures/google_storage/list_containers_empty.xml create mode 100644 trunk/test/storage/fixtures/s3/list_container_objects.xml create mode 100644 trunk/test/storage/fixtures/s3/list_container_objects_empty.xml create mode 100644 trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml create mode 100644 trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml create mode 100644 trunk/test/storage/fixtures/s3/list_containers.xml create mode 100644 trunk/test/storage/fixtures/s3/list_containers_empty.xml create mode 100644 trunk/test/storage/test_atmos.py create mode 100644 trunk/test/storage/test_base.py create mode 100644 trunk/test/storage/test_cloudfiles.py create mode 100644 trunk/test/storage/test_google_storage.py create mode 100644 trunk/test/storage/test_s3.py create mode 100644 trunk/test/test_file_fixtures.py create mode 100644 trunk/test/test_httplib_ssl.py create mode 100644 trunk/test/test_pricing.py create mode 100644 trunk/test/test_response_classes.py create mode 100644 trunk/test/test_types.py create mode 100644 trunk/test/test_utils.py create mode 100644 trunk/tox.ini diff --git a/trunk/.coveragerc b/trunk/.coveragerc new file mode 100644 index 0000000000..09d73084fd --- /dev/null +++ b/trunk/.coveragerc @@ -0,0 +1,31 @@ +# .coveragerc to control coverage.py +[run] +branch = True +source = libcloud + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + __all__ + import + deprecated_warning + in_development_warning + +ignore_errors = True + +[html] +directory = coverage_html_report diff --git a/trunk/.gitignore b/trunk/.gitignore new file mode 100644 index 0000000000..1c8d9576c3 --- /dev/null +++ b/trunk/.gitignore @@ -0,0 +1,11 @@ +*.py[co] +*.sw[po] +test/secrets.py +demos/secrets.py +*~ +_trial_temp +build +MANIFEST +/.ropeproject/config.py +/.coverage +coverage_html_report/ diff --git a/trunk/.ratignore b/trunk/.ratignore new file mode 100644 index 0000000000..f113a4d195 --- /dev/null +++ b/trunk/.ratignore @@ -0,0 +1,18 @@ +MANIFEST +MANIFEST.in +.gitignore +apidocs/ +CHANGES +HACKING +test/storage/fixtures/ +test/compute/fixtures/ +test/loadbalancer/fixtures/ +coverage_html_report/ +.coverage +.coveragerc +libcloud/data/pricing.json +libcloud/common/__init__.py +libcloud/compute/__init__.py +libcloud/storage/__init__.py +test/storage/__init__.py +test/pricing_test.json diff --git a/trunk/CHANGES b/trunk/CHANGES new file mode 100644 index 0000000000..fb3d952a8e --- /dev/null +++ b/trunk/CHANGES @@ -0,0 +1,484 @@ + -*- coding: utf-8 -*- + +Changes with Apache Libcloud in development: + + *) Compute: + + - Add support for Auth 2.0 API (keystone) to the OpenStack Auth + connection class. + [Brad Morgan] + + - Add list_locations method to the OpenStack driver and fix some + inconsistencies in the OpenStack driver extension method signatures. + [Brad Morgan] + + - Update Amazon EC2 driver and pricing data to support a new region - + US West 2 (Oregon) + [Tomaz Muraus] + + *) Storage: + + - Update Amazon S3 driver to support a new region - US West 2 (Oregon) + [Tomaz Muraus] + +Changes with Apache Libcloud 0.6.1: + + *) General: + + - Modify ParamikoSSHClient.connect so it supports authentication using a + key file ; LIBCLOUD-116 + [Jay Doane] + + - User must now explicitly specify a path when using LIBCLOUD_DEBUG + environment variable ; LIBCLOUD-95. + [daveb, Tomaz Muraus] + + - Add new XmlResponse and JsonResponse base class and modify all the + driver-specific response classes to inherit from one of those two + classes where applicable. + [Caio Romão] + + - Add new 'PollingConnection' class. This class can work with 'async' + APIs. It sends and an initial request and then periodically poll the API + until the job has completed or a timeout has been reached. + [Tomaz Muraus] + + *) Compute: + + - Add 24GB size to the GoGrid driver + [Roman Bogorodskiy] + + - Fix API endpoint URL in the Softlayer driver + [Tomaz Muraus] + + - Add support for OpenNebula 3.0 API ; LIBCLOUD-120 + [Hutson Betts] + + - Add more attributes to the extra dictionary in the EC2 driver ; + GITHUB-31 + [Juan Carlos Moreno] + + - Fix IP address assignment in the EC2 driver. Don't include "None" in the + public_ip and private_ip Node list attribute. + [Tomaz Muraus] + + - Make deploy_node functionality more robust and don't start deployment if + node public_ip attribute is an empty list. + [Tomaz Muraus] + + - Support SSH key authentication when using deploy_node. + [Russell Haering, Tomaz Muraus] + + - Enable deploy_node functionality in the EC2 driver using SSH key + authentication + [Russell Haering, Tomaz Muraus] + + - Enable paramiko library debug log level if LIBCLOUD_DEBUG is used and + paramiko is installed. + [Tomaz Muraus] + + - Fix the request signature generation in the base EC2 compute driver. + If the endpoint is using a non-standard port (Eucalyptus based + installations), append it to the hostname used to generate the + signature. + [Simon Delamare] + + - Add new "unavailable" state to the BrightboxNodeDriver class. + [Tim Fletcher] + + - Increase a PollingConnection timeout in the CloudStack connection + and fix the context dictionary creation in the _async_request method. + [gigimon] + + - Fix networks retrieval in the CloudStack driver create_node method. + Also only pass 'networkids' field to the API if there are any networks + available. + [gigimon, Tomaz Muraus] + + - Fix list_nodes in the CloudStack driver. Private IPs aren't always + available. + [Tomaz Muraus] + + *) Load-baancer: + + - Add a missing argument to the method call inside + LoadBalancer.attach_compute_node and Driver.balancer_attach_compute_node. + [Tim Fletcher, Tomaz Muraus] + + - Add missing destroy() method to the LoadBalancer class. + [Tomaz Muraus] + + *) DNS: + + - New drivers for Rackspace Cloud DNS (US and UK region) + [Tomaz Muraus] + + - Add list_record_types() method. This method returns a list of record + types supported by the provider. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.6.0-beta1 + + *) General: + + - All the driver classes now inherit from the BaseDriver class + [Tomaz Muraus] + + - Prefer simplejson (if available) over json module; LIBCLOUD-112 + [Geoff Greer] + + - Update compute demo and change the syntax of test credentials stored in + test/secrets.py-dist ; LIBCLOUD-111 + [Mike Nerone] + + - Enable SSL certificate verification by default and throw an exception + if CA certificate files cannot be found. This can be overridden by + setting libcloud.security.VERIFY_SSL_CERT_STRICT to False. + [Tomaz Muraus] + + *) Compute: + + - Support for 1.1 API and many other improvements in the OpenStack driver ; + LIBCLOUD-83 + [Mike Nerone, Paul Querna, Brad Morgan, Tomaz Muraus] + + - Add some extra methods to the Gandi.net driver ; LIBCLOUD-115 + [Aymeric Barantal] + + - Add ex_delete_image method to the Rackspace driver ; GITHUB-27 + [David Busby] + + - Linode driver now supports new 'Japan' location + [Jed Smith] + + - Rackspace driver now inherits from the OpenStack one instead of doing + it vice versa.; LIBCLOUD-110 + [Mike Nerone] + + - Properly populate NodeImage "details" dictionary in the Rackspace + compute driver.; LIBCLOUD-107 + [Lucy Mendel] + + - Fix a bug in Eucalyptus driver ex_describe_addresses method. + [Tomaz Muraus] + + - Add the following new extenstion methods to the Rackspace driver: + ex_resize, ex_confirm_resize, ex_revert_resize. + [Tomaz Muraus] + + - Also allow user to pass in Node object to some ex_ methods in + the Rackspace compute driver. + [Tomaz Muraus] + + - Throw an exception in deploy_node if paramiko library is not + available + [Tomaz Muraus] + + - Fix chmod argument value which is pased to the sftpclient.put + method; GITHUB-17 + [John Carr] + + - New driver for Ninefold.com; LIBCLOUD-98 + [Benno Rice] + + *) Storage: + + - New driver for Google Storage based on the v1.0 / legacy API + [Tomaz Muraus] + + - New driver for Ninefold.com; GITHUB-19 + [Benno Rice] + + - Fix a bug in uploading an object with some versions of Python 2.7 + where httplib library doesn't automatically call str() on the + header values. + [Tomaz Muraus] + + - Allow users to upload (create) 0-bytes large (empty) objects + [Tomaz Muraus] + + *) Load-balancer: + + - New driver for Rackspace UK location + [Tomaz Muraus] + + - New driver for Ninefold.com; LIBCLOUD-98 + [Benno Rice] + + *) DNS: + + - Drivers for Linode DNS and Zerigo DNS + [Tomaz Muraus] + + - Brand new DNS API! + [Tomaz Muraus] + +Changes with Apache Libcloud 0.5.2 + + *) Compute: + - New driver for serverlove.com and skalicloud.com + [Tomaz Muraus] + + - Fix node name and tag handling in the Amazon EC2 driver + [Wiktor Kolodziej] + + - Fix pricing and response handling in the OpenStack driver + [Andrey Zhuchkov] + + - Fix deploy_node() method and make it more robust + [Tomaz Muraus] + + - Users can now pass file like objects to ScriptDeployment and + SSHKeyDeployment constructor. + [Tomaz Muraus] + + - Include node tags when calling list_nodes() in the Amazon EC2 + driver + [Trevor Pounds] + + - Properly handle response errors in the Rackspace driver and + only throw InvalidCredsError if the returned status code is 401 + [Brad Morgan] + + - Fix the create_node method in the Nimbus driver and make the + "ex_create_tag" method a no-op, because Nimbus doesn't support creating + tags. + [Tomaz Muraus] + + *) Storage: + + - Fix handling of the containers with a lot of objects. Now a LazyList + object is returned when user calls list_container_objects() method + and this object transparently handles pagination. + [Danny Clark, Wiktor Kolodziej] + +Changes with Apache Libcloud 0.5.0 + + *) Existing APIs directly on the libcloud.* module have been + deprecated and will be removed in version 0.6.0. Most methods + were moved to the libcloud.compute.* module. + + *) Add new libcloud.loadbalancers API, with initial support for: + - GoGrid Load Balancers + - Rackspace Load Balancers + [Roman Bogorodskiy] + + *) Add new libcloud.storage API, with initial support for: + - Amazon S3 + - Rackspace CloudFiles + [Tomaz Muraus] + + *) Add new libcloud.compute drivers for: + - Bluebox [Christian Paredes] + - Gandi.net [Aymeric Barantal] + - Nimbus [David LaBissoniere] + - OpenStack [Roman Bogorodskiy] + - Opsource.net [Joe Miller] + + *) Added "pricing" module and improved pricing handling. + [Tomaz Muraus] + + *) Updates to the GoGrid compute driver: + - Use API version 1.0. + - Remove sandbox flag. + - Add ex_list_ips() to list IP addresses assigned to the account. + - Implement ex_edit_image method which allows changing image attributes + like name, description and make image public or private. + [Roman Bogorodskiy] + + *) Updates to the Amazon EC2 compute driver: + - When creating a Node, use the name argument to set a Tag with the + value. [Tomaz Muraus] + - Add extension method for modifying node attributes and changing the + node size. [Tomaz Muraus] + - Add support for the new Amazon Region (Tokyo). [Tomaz Muraus] + - Added ex_create_tags and ex_delete_tags. [Brandon Rhodes] + - Include node Elastic IP addresses in the node public_ip attribute + for the EC2 nodes. [Tomaz Muraus] + - Use ipAddress and privateIpAddress attribute for the EC 2node public + and private ip. [Tomaz Muraus] + - Add ex_describe_addresses method to the EC2 driver. [Tomaz Muraus] + + *) Updates to the Rackspace CloudServers compute driver: + - Add ex_rebuild() and ex_get_node_details() [Andrew Klochkov] + - Expose URI of a Rackspace node to the node meta data. [Paul Querna] + + *) Minor fixes to get the library and tests working on Python 2.7 and PyPy. + [Tomaz Muraus] + +Changes with Apache Libcloud 0.4.2 (Released January 18, 2011) + + *) Fix EC2 create_node to become backward compatible for + NodeLocation. + [Tomaž Muraus] + + *) Update code for compatibility with CPython 2.5 + [Jerry Chen] + + *) Implement ex_edit_node method for GoGrid driver which allows + changing node attributes like amount of RAM or description. + [Roman Bogorodskiy] + + *) Add ex_set_password and ex_set_server_name to Rackspace driver. + [Peter Herndon, Paul Querna] + + *) Add Hard and Soft reboot methods to Rackspace driver. + [Peter Herndon] + + *) EC2 Driver availability zones, via ex_list_availability_zones; + list_locations rewrite to include availablity zones + [Tomaž Muraus] + + *) EC2 Driver Idempotency capability in create_node; LIBCLOUD-69 + [David LaBissoniere] + + *) SSL Certificate Name Verification: + - libcloud.security module + - LibcloudHTTPSConnection, LibcloudHTTPConnection (alias) + - Emits warning when not verifying, or CA certs not found + + *) Append ORD1 to available Rackspace location, but keep in the + same node as DFW1, because it's not readable or writeable from + the API. + [Per suggestion of Grig Gheorghiu] + + *) ex_create_ip_group, ex_list_ip_groups, ex_delete_ip_group, + ex_share_ip, ex_unshare_ip, ex_list_ip_addresses additions + to Rackspace driver + [Andrew Klochkov] + + *) New driver for CloudSigma + [Tomaž Muraus] + + *) New driver for Brightbox Cloud. LIBCLOUD-63 + [Tim Fletcher] + + *) Deployment capability to ElasticHosts + [Tomaž Muraus] + + *) Allow deploy_node to use non-standard SSH username and port + [Tomaž Muraus] + + *) Added Rackspace UK (London) support + [Chmouel Boudjnah] + + *) GoGrid driver: add support for locations, i.e. listing + of locations and creation of a node in specified + location + [Roman Bogorodskiy] + + *) GoGrid and Rackspace drivers: add ex_save_image() extra + call to convert running node to an image + [Roman Bogorodskiy] + + *) GoGrid driver: add support for creating 'sandbox' server + and populate isSandbox flag in node's extra information. + [Roman Bogorodskiy] + + *) Add ImportKeyPair and DescribeKeyPair to EC2. LIBCLOUD-62 + [Philip Schwartz] + + *) Update EC2 driver and test fixtures for new API. + [Philip Schwartz] + +Changes with Apache Libcloud 0.4.0 [Released October 6, 2010] + + *) Add create keypair functionality to EC2 Drivers. LIBCLOUD-57 + [Grig Gheorghiu] + + *) Improve handling of GoGrid accounts with limited access + API keys. [Paul Querna] + + *) New Driver for ElasticHosts. LIBCLOUD-45 + [Tomaz Muraus] + + *) Use more consistent name for GoGrid driver and use http + POST method for 'unsafe' operations + [Russell Haering] + + *) Implement password handling and add deployment support + for GoGrid nodes. + [Roman Bogorodskiy] + + *) Fix behavior of GoGrid's create_node to wait for a Node ID. + [Roman Bogorodskiy] + + *) Add ex_create_node_nowait to GoGrid driver if you don't need to + wait for a Node ID when creating a node. + [Roman Bogorodskiy] + + *) Removed libcloud.interfaces module. + [Paul Querna] + + *) Removed dependency on zope.interfaces. + [Paul Querna] + + *) RimuHosting moved API endpoint address. + [Paul Querna] + + *) Fix regression and error in GoGrid driver for parsing node objects. + [Roman Bogorodskiy] + + *) Added more test cases for GoGrid driver. LIBCLOUD-34 + [Roman Bogorodskiy, Jerry Chen] + + *) Fix parsing of Slicehost nodes with multiple Public IP addresses. + [Paul Querna] + + *) Add exit_status to ScriptDeployment. LIBCLOUD-36 + [Paul Querna] + + *) Update prices for several drivers. + [Brad Morgan, Paul Querna] + + *) Update Linode driver to reflect new plan sizes. + [Jed Smith] + + *) Change default of 'location' in Linode create_node. LIBCLOUD-41 + [Jed Smith, Steve Steiner] + + *) Document the Linode driver. + [Jed Smith] + + *) Request a private, LAN IP address at Linode creation. + [Jed Smith] + +Changes with Apache Libcloud 0.3.1 [Released May 11, 2010] + + *) Updates to Apache License blocks to correctly reflect status as an + Apache Project. + + *) Fix NOTICE file to use 2010 copyright date. + + *) Improve error messages for when running the test cases without + first setting up a secrets.py + +Changes with Apache Libcloud 0.3.0 [Tagged May 6, 2010, not released] + + *) New Drivers for: + - Dreamhost + - Eucalyptus + - Enomaly ECP + - IBM Developer Cloud + - OpenNebula + - SoftLayer + + *) Added new deployment and bootstrap API. + + *) Improved Voxel driver. + + *) Added support for Amazon EC2 Asia Pacific (Singapore) Region. + + *) Improved test coverage for all drivers. + + *) Add support for multiple security groups in EC2. + + *) Fixed bug in Rackspace and RimuHosting when using multiple threads. + + *) Improved debugging and logging of HTTP requests. + + *) Improved documentation for all classes and methods. + +Changes with Apache Libcloud 0.2.0 [Tagged February 2, 2010] + + *) First public release. diff --git a/trunk/HACKING b/trunk/HACKING new file mode 100644 index 0000000000..c43a415c76 --- /dev/null +++ b/trunk/HACKING @@ -0,0 +1,74 @@ +General Information +=================== + * URL: http://libcloud.apache.org/libcloud/devinfo.html + +Git Repositories +=================== + * Official Git Mirror: git://git.apache.org/libcloud.git + * Github Mirror: git://github.com/apache/libcloud.git + +Using The Git-SVN Bridge (For Committers) +========================================= + + $ git clone git://git.apache.org/libcloud libcloud + $ cd libcloud + + $ curl http://git.apache.org/authors.txt > .git/authors.txt + $ git config svn.authorsfile ".git/authors.txt" + + # Optionally, set your Apache commiter info, if different from global + $ git config user.name "Your Name" + $ git config user.email "you@example.org" + + $ git svn init \ + --prefix=origin/ \ + --tags=tags \ + --trunk=trunk \ + --branches=branches \ + https://svn.apache.org/repos/asf/incubator/libcloud + + $ git svn rebase + + To push commits back to SVN: + $ git svn dcommit + +Testing +======= + + To run the libcloud test suite you need to have the following extra + dependencies installed: + + * mock (pip install mock) + * coverage (pip install coverage) - you only need this library if you + want to generate a test coverage report + + Libcloud includes an example secrets.py file at: + test/secrets.py-dist + + To run the test cases, you most likely want to run: + $ cp test/secrets.py-dist test/secrets.py + + This is done to prevent accidental commits of a developers provider credentials. + + To run all suites: + + libcloud$ python setup.py test + running test + ................................................................................................ + ---------------------------------------------------------------------- + Ran 96 tests in 0.182s + + OK + + To run specific tests: + + libcloud$ PYTHONPATH=. python test/compute/test_base.py + ....... + ---------------------------------------------------------------------- + Ran 7 tests in 0.001s + + OK + + To generate test coverage report run: + + libcloud$ PYTHONPATH=. python setup.py coverage diff --git a/trunk/LICENSE b/trunk/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/trunk/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/trunk/MANIFEST.in b/trunk/MANIFEST.in new file mode 100644 index 0000000000..8e1cd486f3 --- /dev/null +++ b/trunk/MANIFEST.in @@ -0,0 +1,24 @@ +include LICENSE +include NOTICE +include DISCLAIMER +include example_*.py +include CONTRIBUTORS +include CHANGES +include HACKING +include RELEASING +include README +include tox.ini +include libcloud/data/pricing.json +prune test/secrets.py +include demos/* +include test/*.py +include test/pricing_test.json +include test/secrets.py-dist +include test/compute/*.py +include test/storage/*.py +include test/loadbalancer/*.py +include test/dns/*.py +include test/compute/fixtures/*/* +include test/storage/fixtures/*/* +include test/loadbalancer/fixtures/*/* +include test/dns/fixtures/*/* diff --git a/trunk/NOTICE b/trunk/NOTICE new file mode 100644 index 0000000000..3a62dab2a7 --- /dev/null +++ b/trunk/NOTICE @@ -0,0 +1,8 @@ +Apache Libcloud +Copyright (c) 2010 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + +This product includes software developed by +Cloudkick (http://www.cloudkick.com/). diff --git a/trunk/README b/trunk/README new file mode 100644 index 0000000000..74bc2592a9 --- /dev/null +++ b/trunk/README @@ -0,0 +1,58 @@ +Apache libcloud - a unified interface into the cloud +==================================================== + +The goal of this project is to create a basic yet functional standard library +into various cloud providers. + +Apache libcloud is an Apache project, see for +more information. + +For API documentation and examples, see: + + +Important Security Note +======================= + +Python's built-in SSL module does not do certificate validation. + +To address this, we've introduced the libcloud.security module with tunable +parameters. + +View the entire guide at: + +Enabling SSL Certificate Check +============================== + + import libcloud.security + libcloud.security.VERIFY_SSL_CERT = True + + # optionally, add to CA_CERTS_PATH + libcloud.security.CA_CERTS_PATH.append("/path/to/your/cacerts.txt") + +CA_CERTS_PATH contains common paths to CA bundle installations on the +following platforms: + + * openssl on CentOS/Fedora + * ca-certificates on Debian/Ubuntu/Arch/Gentoo + * ca_root_nss on FreeBSD + * curl-ca-bundle on Mac OS X + +Note for OS X Users +=================== + +OS X root certificates are stored in the Keychain format, unlike the standard +PEM format available on other *nix platforms. For this reason, it is not +possible to include the standard OS X root certificates with CA_CERTS_PATH. + +Acquiring CA Certificates +========================= + +If the above packages are unavailable to you, and you don't wish to roll your +own, the makers of cURL provides an excellent resource, generated from +Mozilla: http://curl.haxx.se/docs/caextract.html + +Feedback +======== + +Please send feedback to the mailing list at , +or the JIRA at . diff --git a/trunk/RELEASING b/trunk/RELEASING new file mode 100644 index 0000000000..4589ee769a --- /dev/null +++ b/trunk/RELEASING @@ -0,0 +1,64 @@ +Making a release (for release managers) +======================================= + +1. Pre-release check list + +- Make sure tests pass on all the supported Python versions (sudo tox) +- Make sure CHANGES file is up to date +- Make sure __version__ string in libcloud/__init__.py is up to date + +2. Creating release artifacts + + We have a script that runs the required setup.py commands and then hashes + and signs the files. To run it: + + cd dist + ./release.sh -u yourusername@apache.org + + This should result in a set of apache-libcloud-${VERSION}.{tar.bz2,zip}{,asc,md5,sha1} + files that are suitable to be uploaded for a release. + +3. Uploading release artifacts + + - Add release artifacts to the SVN repository at + https://dist.apache.org/repos/dist/release/libcloud/ + + It may take up to a day for the artifacts to be available on all the + Apache mirrors, but they should be instantly available at + http://www.apache.org/dist/libcloud/. + + - If there is more than one older release in the repository, delete the + oldest one. Old releases are automatically archived and available at + https://dist.apache.org/repos/dist/release/libcloud/. + +4. Tagging a release + + svn copy https://svn.apache.org/repos/asf/libcloud/trunk/ \ + https://svn.apache.org/repos/asf/libcloud/tags/ \ + -r \ + -m "Release " + +5. Publishing package to PyPi + + TODO + +6. Updating a website and doap_libcloud + + - Update "News" page + + - Update "Downloads" page + + - Update "Get it" section in the sidebar + + - Update doap_libcloud.py and add info about the new version + +7. Sending announcements + + - Send a release announcement to {dev,users}@libcloud.apache.org. If it's a + major release also send it to announce@apache.org. + + - Send a release announcement to Twitter + +Miscellaneous: + +- If needed, use Apache URL shortening service - http://s.apache.org/ diff --git a/trunk/demos/compute_demo.py b/trunk/demos/compute_demo.py new file mode 100644 index 0000000000..ce896942cb --- /dev/null +++ b/trunk/demos/compute_demo.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This example provides both a running script (invoke from command line) +# and an importable module one can play with in Interactive Mode. +# +# See docstrings for usage examples. +# + +try: + import secrets +except ImportError: + secrets = None + +import os.path +import sys + +# Add parent dir of this file's dir to sys.path (OS-agnostically) +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), os.path.pardir))) + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import Provider +from libcloud.providers import get_driver + +from pprint import pprint + +def get_demo_driver(provider_name='RACKSPACE', *args, **kwargs): + """An easy way to play with a driver interactively. + + # Load credentials from secrets.py: + >>> from compute_demo import get_demo_driver + >>> driver = get_demo_driver('RACKSPACE') + + # Or, provide credentials: + >>> from compute_demo import get_demo_driver + >>> driver = get_demo_driver('RACKSPACE', 'username', 'api_key') + # Note that these parameters vary by driver ^^^ + + # Do things like the demo: + >>> driver.load_nodes() + >>> images = driver.load_images() + >>> sizes = driver.load_sizes() + + # And maybe do more than that: + >>> node = driver.create_node( + name='my_first_node', + image=images[0], + size=sizes[0], + ) + >>> node.destroy() + """ + provider_name = provider_name.upper() + + DriverClass = get_driver(getattr(Provider, provider_name)) + + if not args: + args = getattr(secrets, provider_name + '_PARAMS', ()) + if not kwargs: + kwargs = getattr(secrets, provider_name + '_KEYWORD_PARAMS', {}) + + try: + return DriverClass(*args, **kwargs) + except InvalidCredsError: + raise InvalidCredsError( + 'valid values should be put in secrets.py' + ) + +def main(argv): + """Main Compute Demo + + When invoked from the command line, it will connect using secrets.py + (see secrets.py-dist for instructions and examples), and perform the + following tasks: + + - List current nodes + - List available images (up to 10) + - List available sizes (up to 10) + """ + try: + driver = get_demo_driver() + except InvalidCredsError as ex: + print("Invalid Credentials: %s" % (ex.value,)) + return 1 + + try: + print(">> Loading nodes...") + pprint(driver.list_nodes()) + + print(">> Loading images... (showing up to 10)") + pprint(driver.list_images()[:10]) + + print(">> Loading sizes... (showing up to 10)") + pprint(driver.list_sizes()[:10]) + except Exception as ex: + print("A fatal error occurred: %s" % (ex,)) + return 1 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/trunk/demos/secrets.py-dist b/trunk/demos/secrets.py-dist new file mode 100644 index 0000000000..f7e3fc668f --- /dev/null +++ b/trunk/demos/secrets.py-dist @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make a copy of this file named 'secrets.py' and add your credentials there. +# Note you can run unit tests without setting your credentials. + +BLUEBOX_PARAMS = ('customer_id', 'api_key') +BRIGHTBOX_PARAMS = ('client_id', 'client_secret') +DREAMHOST_PARAMS = ('key',) +EC2_PARAMS = ('access_id', 'secret') +ECP_PARAMS = ('user_name', 'password') +GANDI_PARAMS = ('user',) +HOSTINGCOM_PARAMS = ('user', 'secret') +IBM_PARAMS = ('user', 'secret') +# OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) +OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) +OPENNEBULA_PARAMS = ('user', 'key') +OPSOURCE_PARAMS = ('user', 'password') +RACKSPACE_PARAMS = ('user', 'key') +SLICEHOST_PARAMS = ('key',) +SOFTLAYER_PARAMS = ('user', 'api_key') +VCLOUD_PARAMS = ('user', 'secret') +VOXEL_PARAMS = ('key', 'secret') +VPSNET_PARAMS = ('user', 'key') diff --git a/trunk/dist/hash-sign.sh b/trunk/dist/hash-sign.sh new file mode 100755 index 0000000000..840b6a8921 --- /dev/null +++ b/trunk/dist/hash-sign.sh @@ -0,0 +1,147 @@ +#!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# hash-sign.sh : hash and sign the specified files +# +# USAGE: hash-sign.sh file1 file2 ... +# + +user="" +case "$1" in + -u) + shift + user="$1" + shift + ;; +esac + +allfiles=$* + + +split="---------------------------------------------------------------------" + +echo $split +echo "" +echo "Generating MD5/SHA1 checksum files ..." +echo "" + +# check for executables +gpg="`which gpg 2> /dev/null | head -1`" +pgp="`which pgp 2> /dev/null | head -1`" +openssl="`which openssl 2> /dev/null | head -1`" +md5sum="`which md5sum 2> /dev/null | head -1`" +sha1sum="`which sha1sum 2> /dev/null | head -1`" +md5="`which md5 2> /dev/null | head -1`" +sha1="`which sha1 2> /dev/null | head -1`" + +# if found we use openssl for generating the checksums +# and convert the results into machine-readable format. +if test -x "${openssl}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "openssl: creating md5 checksum file for ${file} ..." + ${openssl} md5 ${file} |\ + sed -e 's#^MD5(\(.*\))= \([0-9a-f]*\)$#\2 *\1#' > ${file}.md5 + echo "openssl: creating sha1 checksum file for ${file} ..." + ${openssl} sha1 ${file} |\ + sed -e 's#^SHA1(\(.*\))= \([0-9a-f]*\)$#\2 *\1#' > ${file}.sha1 + fi + done +# no openssl found - check if we have gpg +elif test -x "${gpg}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "gpg: creating md5 checksum file for ${file} ..." + ${gpg} --print-md md5 ${file} |\ + sed -e '{N;s#\n##;}' |\ + sed -e 's#\(.*\): \(.*\)#\2::\1#;s#[\r\n]##g;s# ##g' \ + -e 'y#ABCDEF#abcdef#;s#::# *#' > ${file}.md5 + echo "gpg: creating sha1 checksum file for ${file} ..." + ${gpg} --print-md sha1 ${file} |\ + sed -e '{N;s#\n##;}' |\ + sed -e 's#\(.*\): \(.*\)#\2::\1#;s#[\r\n]##g;s# ##g' \ + -e 'y#ABCDEF#abcdef#;s#::# *#' > ${file}.sha1 + fi + done +else + # no openssl or gpg found - check for md5sum + if test -x "${md5sum}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "md5sum: creating md5 checksum file for ${file} ..." + ${md5sum} -b ${file} > ${file}.md5 + fi + done + # no openssl or gpg found - check for md5 + elif test -x "${md5}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "md5: creating md5 checksum file for ${file} ..." + ${md5} -r ${file} | sed -e 's# # *#' > ${file}.md5 + fi + done + fi + # no openssl or gpg found - check for sha1sum + if test -x "${sha1sum}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "sha1sum: creating sha1 checksum file for ${file} ..." + ${sha1sum} -b ${file} > ${file}.sha1 + fi + done + # no openssl or gpg found - check for sha1 + elif test -x "${sha1}"; then + for file in ${allfiles}; do + if test -f "${file}"; then + echo "sha1: creating sha1 checksum file for ${file} ..." + ${sha1} -r ${file} | sed -e 's# # *#' > ${file}.sha1 + fi + done + fi +fi + +echo $split +echo "" +echo "Signing the files ..." +echo "" + +# if found we use pgp for signing the files +if test -x "${pgp}"; then + if test -n "${user}"; then + args="-u ${user}" + fi + for file in ${allfiles}; do + if test -f "${file}"; then + echo "pgp: creating asc signature file for ${file} ..." + ${pgp} -sba ${file} ${args} + fi + done +# no pgp found - check for gpg +elif test -x "${gpg}"; then + if test -z "${user}"; then + args="--default-key ${args}" + else + args="-u ${user} ${args}" + fi + for file in ${allfiles}; do + if test -f "${file}"; then + echo "gpg: creating asc signature file for ${file} ..." + ${gpg} --armor ${args} --detach-sign ${file} + fi + done +else + echo "PGP or GnuPG not found! Not signing release!" +fi diff --git a/trunk/dist/release.sh b/trunk/dist/release.sh new file mode 100755 index 0000000000..15a809c803 --- /dev/null +++ b/trunk/dist/release.sh @@ -0,0 +1,39 @@ +#!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +user="" +case "$1" in + -u) + shift + user="$1" + shift + ;; +esac + +if test -z "${user}"; then + echo "must pass -u with a gpg id" + exit 1 +fi + +cd .. + +python setup.py sdist --formats=bztar,zip + +cd dist + +./hash-sign.sh -u ${user} *.tar.bz2 *.zip diff --git a/trunk/doap_libcloud.rdf b/trunk/doap_libcloud.rdf new file mode 100644 index 0000000000..8aebe85c2f --- /dev/null +++ b/trunk/doap_libcloud.rdf @@ -0,0 +1,107 @@ + + + + + + 2011-05-29 + + Apache Libcloud + + + Apache Libcloud is a standard Python library that abstracts away differences among multiple cloud provider APIs. + Apache Libcloud is a standard Python library that abstracts away differences among multiple cloud provider APIs. It allows users to manage cloud servers, cloud storage and load-balancers. + + + + Python + + + + 0.2.0 + 2010-02-09 + 0.2.0 + + + + + 0.3.0 + 2010-05-11 + 0.3.0 + + + + + 0.4.0 + 2010-10-12 + 0.4.0 + + + + + 0.4.1 + 2011-01-17 + 0.4.0 + + + + + 0.5.0 + 2011-05-25 + 0.5.0 + + + + + 0.5.2 + 2011-07-05 + 0.5.2 + + + + + 0.6.0-beta1 + 2011-10-19 + 0.6.0-beta1 + + + + + 0.6.1 + 2011-11-09 + 0.6.1 + + + + + + + + + + + + Tomaz Muraus + + + + + diff --git a/trunk/example_compute.py b/trunk/example_compute.py new file mode 100644 index 0000000000..43aed9d0c8 --- /dev/null +++ b/trunk/example_compute.py @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +EC2 = get_driver(Provider.EC2_US_EAST) +Slicehost = get_driver(Provider.SLICEHOST) +Rackspace = get_driver(Provider.RACKSPACE) + +drivers = [ EC2('access key id', 'secret key'), + Slicehost('api key'), + Rackspace('username', 'api key') ] + +nodes = [ driver.list_nodes() for driver in drivers ] + +print nodes +# [ , +# , ... ] + +# grab the node named "test" +node = filter(lambda x: x.name == 'test', nodes)[0] + +# reboot "test" +node.reboot() diff --git a/trunk/example_dns.py b/trunk/example_dns.py new file mode 100644 index 0000000000..b99756be01 --- /dev/null +++ b/trunk/example_dns.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pprint import pprint + +from libcloud.dns.types import Provider +from libcloud.dns.providers import get_driver + +Zerigo = get_driver(Provider.ZERIGO) + +driver = Zerigo('email', 'key') + +zones = driver.list_zones() +pprint(zones) + +records = zones[0].list_records() +pprint(records) diff --git a/trunk/example_loadbalancer.py b/trunk/example_loadbalancer.py new file mode 100644 index 0000000000..61ad5ac22c --- /dev/null +++ b/trunk/example_loadbalancer.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import time + +from libcloud.loadbalancer.base import Member, Algorithm +from libcloud.loadbalancer.types import Provider, State +from libcloud.loadbalancer.providers import get_driver + +def main(): + Rackspace = get_driver(Provider.RACKSPACE_US) + + driver = Rackspace('username', 'api key') + + balancers = driver.list_balancers() + + print balancers + + # creating a balancer which balances traffic across two + # nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer + # itself listens on port 80/tcp + new_balancer_name = 'testlb' + os.urandom(4).encode('hex') + new_balancer = driver.create_balancer(name=new_balancer_name, + algorithm=Algorithm.ROUND_ROBIN, + port=80, + protocol='http', + members=(Member(None, '192.168.86.1', 80), + Member(None, '192.168.86.2', 8080)) + ) + + print new_balancer + + # wait for balancer to become ready + # NOTE: in real life code add timeout to not end up in + # endless loop when things go wrong on provider side + while True: + balancer = driver.get_balancer(balancer_id=new_balancer.id) + + if balancer.state == State.RUNNING: + break + + print "sleeping for 30 seconds for balancers to become ready" + time.sleep(30) + + # fetch list of members + members = balancer.list_members() + print members + + # remove first member + balancer.detach_member(members[0]) + + # remove the balancer + driver.destroy_balancer(new_balancer) + +if __name__ == "__main__": + main() diff --git a/trunk/example_storage.py b/trunk/example_storage.py new file mode 100644 index 0000000000..87b5402608 --- /dev/null +++ b/trunk/example_storage.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pprint import pprint + +from libcloud.storage.types import Provider +from libcloud.storage.providers import get_driver + +CloudFiles = get_driver(Provider.CLOUDFILES_UK) + +driver = CloudFiles('access key id', 'secret key') + +containers = driver.list_containers() +container_objects = driver.list_container_objects(containers[0]) + +pprint(containers) +pprint(container_objects) diff --git a/trunk/libcloud/__init__.py b/trunk/libcloud/__init__.py new file mode 100644 index 0000000000..44347cff4d --- /dev/null +++ b/trunk/libcloud/__init__.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +libcloud provides a unified interface to the cloud computing resources. + +@var __version__: Current version of libcloud +""" + +__all__ = ["__version__", "enable_debug"] +__version__ = '0.6.1' + +try: + import paramiko + have_paramiko = True +except ImportError: + have_paramiko = False + + +def enable_debug(fo): + """ + Enable library wide debugging to a file-like object. + + @param fo: Where to append debugging information + @type fo: File like object, only write operations are used. + """ + from libcloud.common.base import (Connection, + LoggingHTTPConnection, + LoggingHTTPSConnection) + LoggingHTTPSConnection.log = fo + LoggingHTTPConnection.log = fo + Connection.conn_classes = (LoggingHTTPConnection, + LoggingHTTPSConnection) + + +def _init_once(): + """ + Utility function that is ran once on Library import. + + This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists + is where we will log debug information about the provider transports. + """ + import os + path = os.getenv('LIBCLOUD_DEBUG') + if path: + fo = open(path, 'a') + enable_debug(fo) + + if have_paramiko: + paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG) + +_init_once() diff --git a/trunk/libcloud/base.py b/trunk/libcloud/base.py new file mode 100644 index 0000000000..ef340cb884 --- /dev/null +++ b/trunk/libcloud/base.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import RawResponse, Response, LoggingConnection +from libcloud.common.base import LoggingHTTPSConnection, LoggingHTTPConnection +from libcloud.common.base import ConnectionKey, ConnectionUserAndKey +from libcloud.compute.base import Node, NodeSize, NodeImage +from libcloud.compute.base import NodeLocation, NodeAuthSSHKey, NodeAuthPassword +from libcloud.compute.base import NodeDriver, is_private_subnet + +__all__ = ['RawResponse', + 'Response', + 'LoggingConnection', + 'LoggingHTTPSConnection', + 'LoggingHTTPConnection', + 'ConnectionKey', + 'ConnectionUserAndKey', + 'Node', + 'NodeSize', + 'NodeImage', + 'NodeLocation', + 'NodeAuthSSHKey', + 'NodeAuthPassword', + 'NodeDriver', + 'is_private_subnet'] + +from libcloud.utils import deprecated_warning + +deprecated_warning(__name__) diff --git a/trunk/libcloud/common/__init__.py b/trunk/libcloud/common/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/trunk/libcloud/common/aws.py b/trunk/libcloud/common/aws.py new file mode 100644 index 0000000000..206caef00b --- /dev/null +++ b/trunk/libcloud/common/aws.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import XmlResponse + +class AWSBaseResponse(XmlResponse): pass diff --git a/trunk/libcloud/common/base.py b/trunk/libcloud/common/base.py new file mode 100644 index 0000000000..5c0a2ec8d4 --- /dev/null +++ b/trunk/libcloud/common/base.py @@ -0,0 +1,731 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import urllib +import StringIO +import ssl +import time + +from xml.etree import ElementTree as ET +from pipes import quote as pquote +import urlparse + +try: + import simplejson as json +except: + import json + +import libcloud +from libcloud.common.types import LibcloudError, MalformedResponseError + +from libcloud.httplib_ssl import LibcloudHTTPSConnection +from httplib import HTTPConnection as LibcloudHTTPConnection + +class Response(object): + """ + A Base Response class to derive from. + """ + NODE_STATE_MAP = {} + + object = None + body = None + status = httplib.OK + headers = {} + error = None + connection = None + parse_zero_length_body = False + + def __init__(self, response, connection): + self.body = response.read().strip() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + self.connection = connection + + if not self.success(): + raise Exception(self.parse_error()) + + self.object = self.parse_body() + + def parse_body(self): + """ + Parse response body. + + Override in a provider's subclass. + + @return: Parsed body. + """ + return self.body + + def parse_error(self): + """ + Parse the error messages. + + Override in a provider's subclass. + + @return: Parsed error. + """ + return self.body + + def success(self): + """ + Determine if our request was successful. + + The meaning of this can be arbitrary; did we receive OK status? Did + the node get created? Were we authenticated? + + @return: C{True} or C{False} + """ + return self.status == httplib.OK or self.status == httplib.CREATED + + +class JsonResponse(Response): + """ + A Base JSON Response class to derive from. + """ + def parse_body(self): + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + try: + body = json.loads(self.body) + except: + raise MalformedResponseError( + "Failed to parse JSON", + body=self.body, + driver=self.connection.driver) + return body + + parse_error = parse_body + + +class XmlResponse(Response): + """ + A Base XML Response class to derive from. + """ + def parse_body(self): + if len(self.body) == 0 and not self.parse_zero_length_body: + return self.body + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", + body=self.body, + driver=self.connection.driver) + return body + + parse_error = parse_body + + +class RawResponse(Response): + + def __init__(self, connection): + self._status = None + self._response = None + self._headers = {} + self._error = None + self._reason = None + self.connection = connection + + + @property + def response(self): + if not self._response: + response = self.connection.connection.getresponse() + self._response, self.body = response, response + if not self.success(): + self.parse_error() + return self._response + + @property + def status(self): + if not self._status: + self._status = self.response.status + return self._status + + @property + def headers(self): + if not self._headers: + self._headers = dict(self.response.getheaders()) + return self._headers + + @property + def reason(self): + if not self._reason: + self._reason = self.response.reason + return self._reason + + +#TODO: Move this to a better location/package +class LoggingConnection(): + """ + Debug class to log all HTTP(s) requests as they could be made + with the C{curl} command. + + @cvar log: file-like object that logs entries are written to. + """ + log = None + + def _log_response(self, r): + rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) + ht = "" + v = r.version + if r.version == 10: + v = "HTTP/1.0" + if r.version == 11: + v = "HTTP/1.1" + ht += "%s %s %s\r\n" % (v, r.status, r.reason) + body = r.read() + for h in r.getheaders(): + ht += "%s: %s\r\n" % (h[0].title(), h[1]) + ht += "\r\n" + # this is evil. laugh with me. ha arharhrhahahaha + class fakesock: + def __init__(self, s): + self.s = s + def makefile(self, mode, foo): + return StringIO.StringIO(self.s) + rr = r + if r.chunked: + ht += "%x\r\n" % (len(body)) + ht += body + ht += "\r\n0\r\n" + else: + ht += body + rr = httplib.HTTPResponse(fakesock(ht), + method=r._method, + debuglevel=r.debuglevel) + rr.begin() + rv += ht + rv += ("\n# -------- end %d:%d response ----------\n" + % (id(self), id(r))) + return (rr, rv) + + def _log_curl(self, method, url, body, headers): + cmd = ["curl", "-i"] + + cmd.extend(["-X", pquote(method)]) + + for h in headers: + cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) + + # TODO: in python 2.6, body can be a file-like object. + if body is not None and len(body) > 0: + cmd.extend(["--data-binary", pquote(body)]) + + cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))]) + return " ".join(cmd) + +class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection): + """ + Utility Class for logging HTTPS connections + """ + + def getresponse(self): + r = LibcloudHTTPSConnection.getresponse(self) + if self.log is not None: + r, rv = self._log_response(r) + self.log.write(rv + "\n") + self.log.flush() + return r + + def request(self, method, url, body=None, headers=None): + headers.update({'X-LC-Request-ID': str(id(self))}) + if self.log is not None: + pre = "# -------- begin %d request ----------\n" % id(self) + self.log.write(pre + + self._log_curl(method, url, body, headers) + "\n") + self.log.flush() + return LibcloudHTTPSConnection.request(self, method, url, body, headers) + +class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection): + """ + Utility Class for logging HTTP connections + """ + + def getresponse(self): + r = LibcloudHTTPConnection.getresponse(self) + if self.log is not None: + r, rv = self._log_response(r) + self.log.write(rv + "\n") + self.log.flush() + return r + + def request(self, method, url, body=None, headers=None): + headers.update({'X-LC-Request-ID': str(id(self))}) + if self.log is not None: + pre = "# -------- begin %d request ----------\n" % id(self) + self.log.write(pre + + self._log_curl(method, url, body, headers) + "\n") + self.log.flush() + return LibcloudHTTPConnection.request(self, method, url, + body, headers) + + +class Connection(object): + """ + A Base Connection class to derive from. + """ + #conn_classes = (LoggingHTTPSConnection) + conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection) + + responseCls = Response + rawResponseCls = RawResponse + connection = None + host = '127.0.0.1' + port = 443 + secure = 1 + driver = None + action = None + + def __init__(self, secure=True, host=None, port=None, url=None): + self.secure = secure and 1 or 0 + self.ua = [] + self.context = {} + + self.request_path = '' + + if host: + self.host = host + + if port != None: + self.port = port + else: + if self.secure == 1: + self.port = 443 + else: + self.port = 80 + + if url: + (self.host, self.port, self.secure, self.request_path) = self._tuple_from_url(url) + + def set_context(self, context): + self.context = context + + def _tuple_from_url(self, url): + secure = 1 + port = None + scheme, netloc, request_path, param, query, fragment = urlparse.urlparse(url) + + if scheme not in ['http', 'https']: + raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url)) + + if scheme == "http": + secure = 0 + + if ":" in netloc: + netloc, port = netloc.rsplit(":") + port = port + + if not port: + if scheme == "http": + port = 80 + else: + port = 443 + + host = netloc + + return (host, port, secure, request_path) + + def connect(self, host=None, port=None, base_url = None): + """ + Establish a connection with the API server. + + @type host: C{str} + @param host: Optional host to override our default + + @type port: C{int} + @param port: Optional port to override our default + + @returns: A connection + """ + # prefer the attribute base_url if its set or sent + connection = None + secure = self.secure + + if getattr(self, 'base_url', None) and base_url == None: + (host, port, secure, request_path) = self._tuple_from_url(self.base_url) + elif base_url != None: + (host, port, secure, request_path) = self._tuple_from_url(base_url) + else: + host = host or self.host + port = port or self.port + + kwargs = {'host': host, 'port': int(port)} + + connection = self.conn_classes[secure](**kwargs) + # You can uncoment this line, if you setup a reverse proxy server + # which proxies to your endpoint, and lets you easily capture + # connections in cleartext when you setup the proxy to do SSL + # for you + #connection = self.conn_classes[False]("127.0.0.1", 8080) + + self.connection = connection + + def _user_agent(self): + return 'libcloud/%s (%s)%s' % ( + libcloud.__version__, + self.driver.name, + "".join([" (%s)" % x for x in self.ua])) + + def user_agent_append(self, token): + """ + Append a token to a user agent string. + + Users of the library should call this to uniquely identify thier requests + to a provider. + + @type token: C{str} + @param token: Token to add to the user agent. + """ + self.ua.append(token) + + def request(self, + action, + params=None, + data='', + headers=None, + method='GET', + raw=False): + """ + Request a given `action`. + + Basically a wrapper around the connection + object's `request` that does some helpful pre-processing. + + @type action: C{str} + @param action: A path + + @type params: C{dict} + @param params: Optional mapping of additional parameters to send. If + None, leave as an empty C{dict}. + + @type data: C{unicode} + @param data: A body of data to send with the request. + + @type headers: C{dict} + @param headers: Extra headers to add to the request + None, leave as an empty C{dict}. + + @type method: C{str} + @param method: An HTTP method such as "GET" or "POST". + + @type raw: C{bool} + @param raw: True to perform a "raw" request aka only send the headers + and use the rawResponseCls class. This is used with + storage API when uploading a file. + + @return: An instance of type I{responseCls} + """ + if params is None: + params = {} + if headers is None: + headers = {} + + action = self.morph_action_hook(action) + self.action = action + self.method = method + # Extend default parameters + params = self.add_default_params(params) + # Extend default headers + headers = self.add_default_headers(headers) + # We always send a user-agent header + headers.update({'User-Agent': self._user_agent()}) + + p = int(self.port) + + if p not in (80, 443): + headers.update({'Host': "%s:%d" % (self.host, p)}) + else: + headers.update({'Host': self.host}) + + # Encode data if necessary + if data != '' and data != None: + data = self.encode_data(data) + + if data is not None: + headers.update({'Content-Length': str(len(data))}) + + params, headers = self.pre_connect_hook(params, headers) + + if params: + url = '?'.join((action, urllib.urlencode(params))) + else: + url = action + + # Removed terrible hack...this a less-bad hack that doesn't execute a + # request twice, but it's still a hack. + self.connect() + try: + # @TODO: Should we just pass File object as body to request method + # instead of dealing with splitting and sending the file ourselves? + if raw: + self.connection.putrequest(method, url) + + for key, value in headers.iteritems(): + self.connection.putheader(key, str(value)) + + self.connection.endheaders() + else: + self.connection.request(method=method, url=url, body=data, + headers=headers) + except ssl.SSLError, e: + raise ssl.SSLError(str(e)) + + if raw: + response = self.rawResponseCls(connection=self) + else: + response = self.responseCls(response=self.connection.getresponse(), + connection=self) + + return response + + def morph_action_hook(self, action): + return self.request_path + action + + def add_default_params(self, params): + """ + Adds default parameters (such as API key, version, etc.) + to the passed `params` + + Should return a dictionary. + """ + return params + + def add_default_headers(self, headers): + """ + Adds default headers (such as Authorization, X-Foo-Bar) + to the passed `headers` + + Should return a dictionary. + """ + return headers + + def pre_connect_hook(self, params, headers): + """ + A hook which is called before connecting to the remote server. + This hook can perform a final manipulation on the params, headers and + url parameters. + + @type params: C{dict} + @param params: Request parameters. + + @type headers: C{dict} + @param headers: Request headers. + """ + return params, headers + + def encode_data(self, data): + """ + Encode body data. + + Override in a provider's subclass. + """ + return data + +class PollingConnection(Connection): + """ + Connection class which can also work with the async APIs. + + After initial requests, this class periodically polls for jobs status and + waits until the job has finished. + If job doesn't finish in timeout seconds, an Exception thrown. + """ + poll_interval = 0.5 + timeout = 10 + request_method = 'request' + + def async_request(self, action, params=None, data='', headers=None, + method='GET', context=None): + """ + Perform an 'async' request to the specified path. Keep in mind that + this function is *blocking* and 'async' in this case means that the + hit URL only returns a job ID which is the periodically polled until + the job has completed. + + This function works like this: + + - Perform a request to the specified path. Response should contain a + 'job_id'. + + - Returned 'job_id' is then used to construct a URL which is used for + retrieving job status. Constructed URL is then periodically polled + until the response indicates that the job has completed or the timeout + of 'self.timeout' seconds has been reached. + + @type action: C{str} + @param action: A path + + @type params: C{dict} + @param params: Optional mapping of additional parameters to send. If + None, leave as an empty C{dict}. + + @type data: C{unicode} + @param data: A body of data to send with the request. + + @type headers: C{dict} + @param headers: Extra headers to add to the request + None, leave as an empty C{dict}. + + @type method: C{str} + @param method: An HTTP method such as "GET" or "POST". + + @type context: C{dict} + @param context: Context dictionary which is passed to the functions + which construct initial and poll URL. + + @return: An instance of type I{responseCls} + """ + + request = getattr(self, self.request_method) + kwargs = self.get_request_kwargs(action=action, params=params, + data=data, headers=headers, + method=method, + context=context) + response = request(**kwargs) + kwargs = self.get_poll_request_kwargs(response=response, + context=context) + + end = time.time() + self.timeout + completed = False + while time.time() < end and not completed: + response = request(**kwargs) + completed = self.has_completed(response=response) + + if not completed: + raise LibcloudError('Job did not complete in %s seconds' % + (self.timeout)) + + return response + + def get_request_kwargs(self, action, params=None, data='', headers=None, + method='GET', context=None): + """ + Arguments which are passed to the initial request() call inside + async_request. + """ + kwargs = {'action': action, 'params': params, 'data': data, + 'headers': headers, 'method': method} + return kwargs + + def get_poll_request_kwargs(self, response, context): + """ + Return keyword arguments which are passed to the request() method when + polling for the job status. + + @param response: Response object returned by poll request. + @type response: C{HTTPResponse} + + @return C{dict} Keyword arguments + """ + raise NotImplementedError('get_poll_request_kwargs not implemented') + + def has_completed(self, response): + """ + Return job completion status. + + @param response: Response object returned by poll request. + @type response: C{HTTPResponse} + + @return C{bool} True if the job has completed, False otherwise. + """ + raise NotImplementedError('has_completed not implemented') + + +class ConnectionKey(Connection): + """ + A Base Connection class to derive from, which includes a + """ + def __init__(self, key, secure=True, host=None, port=None, url=None): + """ + Initialize `user_id` and `key`; set `secure` to an C{int} based on + passed value. + """ + super(ConnectionKey, self).__init__(secure=secure, host=host, port=port, url=url) + self.key = key + +class ConnectionUserAndKey(ConnectionKey): + """ + Base connection which accepts a user_id and key + """ + + user_id = None + + def __init__(self, user_id, key, secure=True, host=None, port=None, url=None): + super(ConnectionUserAndKey, self).__init__(key, secure=secure, + host=host, port=port, url=url) + self.user_id = user_id + + +class BaseDriver(object): + """ + Base driver class from which other classes can inherit from. + """ + + connectionCls = ConnectionKey + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=None): + """ + @keyword key: API key or username to used + @type key: str + + @keyword secret: Secret password to be used + @type secret: str + + @keyword secure: Weither to use HTTPS or HTTP. Note: Some providers + only support HTTPS, and it is on by default. + @type secure: bool + + @keyword host: Override hostname used for connections. + @type host: str + + @keyword port: Override port used for connections. + @type port: int + + @keyword api_version: Optional API version. Only used by drivers + which support multiple API versions. + @type api_version: str + + """ + self.key = key + self.secret = secret + self.secure = secure + args = [self.key] + + if self.secret is not None: + args.append(self.secret) + + args.append(secure) + + if host is not None: + args.append(host) + + if port is not None: + args.append(port) + + self.connection = self.connectionCls(*args, **self._ex_connection_class_kwargs()) + + self.connection.driver = self + self.connection.connect() + + def _ex_connection_class_kwargs(self): + """ + Return extra connection keyword arguments which are passed to the + Connection class constructor. + """ + return {} diff --git a/trunk/libcloud/common/cloudstack.py b/trunk/libcloud/common/cloudstack.py new file mode 100644 index 0000000000..fcf12818c8 --- /dev/null +++ b/trunk/libcloud/common/cloudstack.py @@ -0,0 +1,115 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import hashlib +import hmac +import time +import urllib + +from libcloud.common.base import ConnectionUserAndKey, PollingConnection +from libcloud.common.base import JsonResponse +from libcloud.common.types import MalformedResponseError + +class CloudStackResponse(JsonResponse): pass + +class CloudStackConnection(ConnectionUserAndKey, PollingConnection): + responseCls = CloudStackResponse + poll_interval = 1 + request_method = '_sync_request' + timeout = 600 + + ASYNC_PENDING = 0 + ASYNC_SUCCESS = 1 + ASYNC_FAILURE = 2 + + def _make_signature(self, params): + signature = [(k.lower(), v) for k, v in params.items()] + signature.sort(key=lambda x: x[0]) + signature = urllib.urlencode(signature) + signature = signature.lower().replace('+', '%20') + signature = hmac.new(self.key, msg=signature, digestmod=hashlib.sha1) + return base64.b64encode(signature.digest()) + + def add_default_params(self, params): + params['apiKey'] = self.user_id + params['response'] = 'json' + + return params + + def pre_connect_hook(self, params, headers): + params['signature'] = self._make_signature(params) + + return params, headers + + def _async_request(self, command, **kwargs): + context = {'command': command} + context.update(kwargs) + result = super(CloudStackConnection, self).async_request(action=None, + params=None, + data=None, + headers=None, + method=None, + context=context) + return result['jobresult'] + + def get_request_kwargs(self, action, params=None, data='', headers=None, + method='GET', context=None): + return context + + def get_poll_request_kwargs(self, response, context): + job_id = response['jobid'] + kwargs = {'command': 'queryAsyncJobResult', 'jobid': job_id} + return kwargs + + def has_completed(self, response): + status = response.get('jobstatus', self.ASYNC_PENDING) + + if status == self.ASYNC_FAILURE: + raise Exception(status) + + return status == self.ASYNC_SUCCESS + + def _sync_request(self, command, **kwargs): + """This method handles synchronous calls which are generally fast + information retrieval requests and thus return 'quickly'.""" + + kwargs['command'] = command + result = self.request(self.driver.path, params=kwargs) + command = command.lower() + 'response' + if command not in result.object: + raise MalformedResponseError( + "Unknown response format", + body=result.body, + driver=self.driver) + result = result.object[command] + return result + +class CloudStackDriverMixIn(object): + host = None + path = None + + connectionCls = CloudStackConnection + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + host = host or self.host + super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host, + port) + + def _sync_request(self, command, **kwargs): + return self.connection._sync_request(command, **kwargs) + + def _async_request(self, command, **kwargs): + return self.connection._async_request(command, **kwargs) diff --git a/trunk/libcloud/common/gandi.py b/trunk/libcloud/common/gandi.py new file mode 100644 index 0000000000..f02399fa78 --- /dev/null +++ b/trunk/libcloud/common/gandi.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gandi driver base classes +""" + +import time +import hashlib +import xmlrpclib + +import libcloud +from libcloud.common.base import ConnectionKey +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, Node, \ + NodeLocation, NodeSize, NodeImage + +# Global constants +API_VERSION = '2.0' +API_PREFIX = "https://rpc.gandi.net/xmlrpc/%s/" % API_VERSION + +DEFAULT_TIMEOUT = 600 # operation pooling max seconds +DEFAULT_INTERVAL = 20 # seconds between 2 operation.info + + +class GandiException(Exception): + """ + Exception class for Gandi driver + """ + def __str__(self): + return "(%u) %s" % (self.args[0], self.args[1]) + + def __repr__(self): + return "" % (self.args[0], self.args[1]) + + +class GandiSafeTransport(xmlrpclib.SafeTransport): + pass + + +class GandiTransport(xmlrpclib.Transport): + pass + + +class GandiProxy(xmlrpclib.ServerProxy): + transportCls = (GandiTransport, GandiSafeTransport) + + def __init__(self, user_agent, verbose=0): + cls = self.transportCls[0] + if API_PREFIX.startswith("https://"): + cls = self.transportCls[1] + t = cls(use_datetime=0) + t.user_agent = user_agent + xmlrpclib.ServerProxy.__init__( + self, + uri="%s" % (API_PREFIX), + transport=t, + verbose=verbose, + allow_none=True + ) + + +class GandiConnection(ConnectionKey): + """ + Connection class for the Gandi driver + """ + + proxyCls = GandiProxy + + def __init__(self, key, password=None): + super(GandiConnection, self).__init__(key) + self.driver = BaseGandiDriver + + try: + self._proxy = self.proxyCls(self._user_agent()) + except xmlrpclib.Fault, e: + raise GandiException(1000, e) + + def request(self, method, *args): + """ Request xmlrpc method with given args""" + try: + return getattr(self._proxy, method)(self.key, *args) + except xmlrpclib.Fault, e: + raise GandiException(1001, e) + + +class BaseGandiDriver(object): + """ + Gandi base driver + + """ + connectionCls = GandiConnection + name = 'Gandi' + + def __init__(self, key, secret=None, secure=False): + self.key = key + self.secret = secret + self.connection = self.connectionCls(key, secret) + self.connection.driver = self + + # Specific methods for gandi + def _wait_operation(self, id, \ + timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): + """ Wait for an operation to succeed""" + + for i in range(0, timeout, check_interval): + try: + op = self.connection.request('operation.info', int(id)) + + if op['step'] == 'DONE': + return True + if op['step'] in ['ERROR', 'CANCEL']: + return False + except (KeyError, IndexError): + pass + except Exception, e: + raise GandiException(1002, e) + + time.sleep(check_interval) + return False + + +class BaseObject(object): + """Base class for objects not conventional""" + + uuid_prefix = '' + + def __init__(self, id, state, driver): + self.id = str(id) if id else None + self.state = state + self.driver = driver + self.uuid = self.get_uuid() + + def get_uuid(self): + """Unique hash for this object + + @return: C{string} + + The hash is a function of an SHA1 hash of prefix, the object's ID and + its driver which means that it should be unique between all + interfaces. + TODO : to review + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> vif = driver.create_interface() + >>> vif.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! + """ + return hashlib.sha1("%s:%s:%d" % \ + (self.uuid_prefix, self.id, self.driver.type)).hexdigest() + + +class IPAddress(BaseObject): + """ + Provide a common interface for ip addresses + """ + + uuid_prefix = 'inet:' + + def __init__(self, id, state, inet, driver, version=4, extra=None): + super(IPAddress, self).__init__(id, state, driver) + self.inet = inet + self.version = version + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.inet, self.state, self.driver.name)) + + +class NetworkInterface(BaseObject): + """ + Provide a common interface for network interfaces + """ + + uuid_prefix = 'if:' + + def __init__(self, id, state, mac_address, driver, + ips=None, node_id=None, extra=None): + super(NetworkInterface, self).__init__(id, state, driver) + self.mac = mac_address + self.ips = ips or {} + self.node_id = node_id + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.mac, self.state, self.driver.name)) + + +class Disk(BaseObject): + """ + Gandi disk component + """ + def __init__(self, id, state, name, driver, size, extra=None): + super(Disk, self).__init__(id, state, driver) + self.name = name + self.size = size + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.state, self.size, self.driver.name)) diff --git a/trunk/libcloud/common/gogrid.py b/trunk/libcloud/common/gogrid.py new file mode 100644 index 0000000000..38c07e5ee3 --- /dev/null +++ b/trunk/libcloud/common/gogrid.py @@ -0,0 +1,168 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import time + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.types import MalformedResponseError +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.compute.base import NodeLocation + +HOST = 'api.gogrid.com' +PORTS_BY_SECURITY = { True: 443, False: 80 } +API_VERSION = '1.8' + +__all__ = ["GoGridResponse", + "GoGridConnection", + "GoGridIpAddress", + "BaseGoGridDriver", +] + +class GoGridResponse(JsonResponse): + + def __init__(self, *args, **kwargs): + self.driver = BaseGoGridDriver + super(GoGridResponse, self).__init__(*args, **kwargs) + + def success(self): + if self.status == 403: + raise InvalidCredsError('Invalid credentials', self.driver) + if self.status == 401: + raise InvalidCredsError('API Key has insufficient rights', self.driver) + if not self.body: + return None + try: + return self.parse_body()['status'] == 'success' + except ValueError: + raise MalformedResponseError('Malformed reply', + body=self.body, driver=self.driver) + + def parse_error(self): + try: + return self.parse_body()["list"][0]["message"] + except (ValueError, KeyError): + return None + +class GoGridConnection(ConnectionUserAndKey): + """ + Connection class for the GoGrid driver + """ + + host = HOST + responseCls = GoGridResponse + + def add_default_params(self, params): + params["api_key"] = self.user_id + params["v"] = API_VERSION + params["format"] = 'json' + params["sig"] = self.get_signature(self.user_id, self.key) + + return params + + def get_signature(self, key, secret): + """ create sig from md5 of key + secret + time """ + m = hashlib.md5(key+secret+str(int(time.time()))) + return m.hexdigest() + +class GoGridIpAddress(object): + """ + IP Address + """ + + def __init__(self, id, ip, public, state, subnet): + self.id = id + self.ip = ip + self.public = public + self.state = state + self.subnet = subnet + +class BaseGoGridDriver(object): + """GoGrid has common object model for services they + provide, like locations and IP, so keep handling of + these things in a single place.""" + + name = "GoGrid" + + def _get_ip(self, element): + return element.get('ip').get('ip') + + def _to_ip(self, element): + ip = GoGridIpAddress(id=element['id'], + ip=element['ip'], + public=element['public'], + subnet=element['subnet'], + state=element["state"]["name"]) + ip.location = self._to_location(element['datacenter']) + return ip + + def _to_ips(self, object): + return [ self._to_ip(el) + for el in object['list'] ] + + def _to_location(self, element): + location = NodeLocation(id=element['id'], + name=element['name'], + country="US", + driver=self.connection.driver) + return location + + def _to_locations(self, object): + return [self._to_location(el) + for el in object['list']] + + + def ex_list_ips(self, **kwargs): + """Return list of IP addresses assigned to + the account. + + @keyword public: set to True to list only + public IPs or False to list only + private IPs. Set to None or not specify + at all not to filter by type + @type public: C{bool} + @keyword assigned: set to True to list only addresses + assigned to servers, False to list unassigned + addresses and set to None or don't set at all + not no filter by state + @type assigned: C{bool} + @keyword location: filter IP addresses by location + @type location: L{NodeLocation} + @return: C{list} of L{GoGridIpAddress}es + """ + + params = {} + + if "public" in kwargs and kwargs["public"] is not None: + params["ip.type"] = {True: "Public", + False: "Private"}[kwargs["public"]] + if "assigned" in kwargs and kwargs["assigned"] is not None: + params["ip.state"] = {True: "Assigned", + False: "Unassigned"}[kwargs["assigned"]] + if "location" in kwargs and kwargs['location'] is not None: + params['datacenter'] = kwargs['location'].id + + ips = self._to_ips( + self.connection.request('/api/grid/ip/list', + params=params).object) + return ips + + def _get_first_ip(self, location=None): + ips = self.ex_list_ips(public=True, assigned=False, location=location) + try: + return ips[0].ip + except IndexError: + raise LibcloudError('No public unassigned IPs left', + self.driver) diff --git a/trunk/libcloud/common/linode.py b/trunk/libcloud/common/linode.py new file mode 100644 index 0000000000..eeb9eae511 --- /dev/null +++ b/trunk/libcloud/common/linode.py @@ -0,0 +1,160 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.common.types import InvalidCredsError + +__all__ = [ + 'API_HOST', + 'API_ROOT', + 'LinodeException', + 'LinodeResponse', + 'LinodeConnection' +] + +# Endpoint for the Linode API +API_HOST = 'api.linode.com' +API_ROOT = '/' + +# Constants that map a RAM figure to a PlanID (updated 6/28/10) +LINODE_PLAN_IDS = {512:'1', + 768:'2', + 1024:'3', + 1536:'4', + 2048:'5', + 4096:'6', + 8192:'7', + 12288:'8', + 16384:'9', + 20480:'10'} + + +class LinodeException(Exception): + """Error originating from the Linode API + + This class wraps a Linode API error, a list of which is available in the + API documentation. All Linode API errors are a numeric code and a + human-readable description. + """ + def __init__(self, code, message): + self.code = code + self.message = message + self.args = (code, message) + + def __str__(self): + return "(%u) %s" % (self.code, self.message) + + def __repr__(self): + return "" % (self.code, self.message) + + +class LinodeResponse(JsonResponse): + """Linode API response + + Wraps the HTTP response returned by the Linode API, which should be JSON in + this structure: + + { + "ERRORARRAY": [ ... ], + "DATA": [ ... ], + "ACTION": " ... " + } + + libcloud does not take advantage of batching, so a response will always + reflect the above format. A few weird quirks are caught here as well.""" + def __init__(self, response, connection): + """Instantiate a LinodeResponse from the HTTP response + + @keyword response: The raw response returned by urllib + @return: parsed L{LinodeResponse}""" + self.body = response.read() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + self.connection = connection + self.invalid = LinodeException(0xFF, + "Invalid JSON received from server") + + # Move parse_body() to here; we can't be sure of failure until we've + # parsed the body into JSON. + self.objects, self.errors = self.parse_body() + if not self.success(): + # Raise the first error, as there will usually only be one + raise self.errors[0] + + def parse_body(self): + """Parse the body of the response into JSON objects + + If the response chokes the parser, action and data will be returned as + None and errorarray will indicate an invalid JSON exception. + + @return: C{list} of objects and C{list} of errors""" + js = super(LinodeResponse, self).parse_body() + + try: + if isinstance(js, dict): + # solitary response - promote to list + js = [js] + ret = [] + errs = [] + for obj in js: + if ("DATA" not in obj or "ERRORARRAY" not in obj + or "ACTION" not in obj): + ret.append(None) + errs.append(self.invalid) + continue + ret.append(obj["DATA"]) + errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"]) + return (ret, errs) + except: + return (None, [self.invalid]) + + def success(self): + """Check the response for success + + The way we determine success is by the presence of an error in + ERRORARRAY. If one is there, we assume the whole request failed. + + @return: C{bool} indicating a successful request""" + return len(self.errors) == 0 + + def _make_excp(self, error): + """Convert an API error to a LinodeException instance + + @keyword error: JSON object containing C{ERRORCODE} and C{ERRORMESSAGE} + @type error: dict""" + if "ERRORCODE" not in error or "ERRORMESSAGE" not in error: + return None + if error["ERRORCODE"] == 4: + return InvalidCredsError(error["ERRORMESSAGE"]) + return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"]) + + +class LinodeConnection(ConnectionKey): + """A connection to the Linode API + + Wraps SSL connections to the Linode API, automagically injecting the + parameters that the API needs for each request.""" + host = API_HOST + responseCls = LinodeResponse + + def add_default_params(self, params): + """Add parameters that are necessary for every request + + This method adds C{api_key} and C{api_responseFormat} to the request.""" + params["api_key"] = self.key + # Be explicit about this in case the default changes. + params["api_responseFormat"] = "json" + return params diff --git a/trunk/libcloud/common/openstack.py b/trunk/libcloud/common/openstack.py new file mode 100644 index 0000000000..ba2f884b07 --- /dev/null +++ b/trunk/libcloud/common/openstack.py @@ -0,0 +1,296 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common utilities for OpenStack +""" +import httplib +from urllib2 import urlparse +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.compute.types import LibcloudError, InvalidCredsError, MalformedResponseError + +try: + import simplejson as json +except ImportError: + import json + +AUTH_API_VERSION = 'v1.0' + +__all__ = [ + "OpenStackBaseConnection", + "OpenStackAuthConnection", + ] + + +# @TODO: Refactor for re-use by other openstack drivers +class OpenStackAuthResponse(Response): + def success(self): + return True + + def parse_body(self): + if not self.body: + return None + + if 'content-type' in self.headers: + key = 'content-type' + elif 'Content-Type' in self.headers: + key = 'Content-Type' + else: + raise LibcloudError('Missing content-type header', driver=OpenStackAuthConnection) + + content_type = self.headers[key] + if content_type.find(';') != -1: + content_type = content_type.split(';')[0] + + if content_type == 'application/json': + try: + data = json.loads(self.body) + except: + raise MalformedResponseError('Failed to parse JSON', + body=self.body, + driver=OpenStackAuthConnection) + elif content_type == 'text/plain': + data = self.body + else: + data = self.body + + return data + +class OpenStackAuthConnection(ConnectionUserAndKey): + + responseCls = OpenStackAuthResponse + name = 'OpenStack Auth' + + def __init__(self, parent_conn, auth_url, auth_version, user_id, key): + self.parent_conn = parent_conn + # enable tests to use the same mock connection classes. + self.conn_classes = parent_conn.conn_classes + + super(OpenStackAuthConnection, self).__init__( + user_id, key, url=auth_url) + + self.auth_version = auth_version + self.auth_url = auth_url + self.urls = {} + self.driver = self.parent_conn.driver + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json; charset=UTF-8' + return headers + + def authenticate(self): + if self.auth_version == "1.0": + return self.authenticate_1_0() + elif self.auth_version == "1.1": + return self.authenticate_1_1() + elif self.auth_version == "2.0": + return self.authenticate_2_0() + else: + raise LibcloudError('Unsupported Auth Version requested') + + def authenticate_1_0(self): + resp = self.request("/", + headers={ + 'X-Auth-User': self.user_id, + 'X-Auth-Key': self.key, + }, + method='GET') + + if resp.status == httplib.UNAUTHORIZED: + # HTTP UNAUTHORIZED (401): auth failed + raise InvalidCredsError() + elif resp.status != httplib.NO_CONTENT: + raise MalformedResponseError('Malformed response', + body='code: %s body:%s headers:%s' % (resp.status, resp.body, resp.headers), + driver=self.driver) + else: + headers = resp.headers + # emulate the auth 1.1 URL list + self.urls = {} + self.urls['cloudServers'] = [{'publicURL': headers.get('x-server-management-url', None)}] + self.urls['cloudFilesCDN'] = [{'publicURL': headers.get('x-cdn-management-url', None)}] + self.urls['cloudFiles'] = [{'publicURL': headers.get('x-storage-url', None)}] + self.auth_token = headers.get('x-auth-token', None) + + if not self.auth_token: + raise MalformedResponseError('Missing X-Auth-Token in response headers') + + def authenticate_1_1(self): + reqbody = json.dumps({'credentials': {'username': self.user_id, 'key': self.key}}) + resp = self.request("/auth", + data=reqbody, + headers={}, + method='POST') + + if resp.status == httplib.UNAUTHORIZED: + # HTTP UNAUTHORIZED (401): auth failed + raise InvalidCredsError() + elif resp.status != httplib.OK: + raise MalformedResponseError('Malformed response', + body='code: %s body:%s' % (resp.status, resp.body), + driver=self.driver) + else: + try: + body = json.loads(resp.body) + except Exception, e: + raise MalformedResponseError('Failed to parse JSON', e) + try: + self.auth_token = body['auth']['token']['id'] + self.urls = body['auth']['serviceCatalog'] + except KeyError, e: + raise MalformedResponseError('Auth JSON response is missing required elements', e) + + # 'keystone' - http://docs.openstack.org/api/openstack-identity-service/2.0/content/Identity-Service-Concepts-e1362.html + def authenticate_2_0(self): + reqbody = json.dumps({'auth':{'passwordCredentials':{'username':self.user_id, 'password':self.key}}}) + resp = self.request('tokens/', + data=reqbody, + headers={'Content-Type':'application/json'}, + method='POST') + if resp.status == httplib.UNAUTHORIZED: + raise InvalidCredsError() + elif resp.status not in [httplib.OK, httplib.NON_AUTHORITATIVE_INFORMATION]: + raise MalformedResponseError('Malformed response', + body='code: %s body: %s' % (resp.status, resp.body), + driver=self.driver) + else: + try: + body = json.loads(resp.body) + except Exception, e: + raise MalformedResponseError('Failed to parse JSON', e) + try: + self.auth_token = body['access']['token']['id'] + self.urls = body['access']['serviceCatalog'] + except KeyError, e: + raise MalformedResponseError('Auth JSON response is missing required elements', e) + +class OpenStackBaseConnection(ConnectionUserAndKey): + + auth_url = None + + def __init__(self, user_id, key, secure=True, + host=None, port=None, + ex_force_base_url=None, + ex_force_auth_url=None, + ex_force_auth_version=None): + self.server_url = None + self.cdn_management_url = None + self.storage_url = None + self.lb_url = None + self.auth_token = None + self._force_base_url = ex_force_base_url + self._ex_force_auth_url = ex_force_auth_url + self._auth_version = ex_force_auth_version + + if not self._auth_version: + self._auth_version = '1.1' + + super(OpenStackBaseConnection, self).__init__( + user_id, key, secure=secure) + + def add_default_headers(self, headers): + headers['X-Auth-Token'] = self.auth_token + headers['Accept'] = self.accept_format + return headers + + def morph_action_hook(self, action): + if self._force_base_url: + _, _, request_path, _, _, _ = urlparse.urlparse(self._force_base_url) + return request_path + action + + value = getattr(self, self._url_key, None) + if not value: + self._populate_hosts_and_request_paths() + request_path = getattr(self, '__request_path_%s' % (self._url_key), '') + action = request_path + action + + return action + + @property + def base_url(self): + return self._get_base_url(url_key=self._url_key) + + def _get_base_url(self, url_key): + value = getattr(self, url_key, None) + if not value: + self._populate_hosts_and_request_paths() + value = getattr(self, url_key, None) + if self._force_base_url != None: + value = self._force_base_url + return value + + def _get_default_region(self, arr): + if len(arr): + return arr[0]['publicURL'] + return None + + def request(self, **kwargs): + self._populate_hosts_and_request_paths() + return super(OpenStackBaseConnection, self).request(**kwargs) + + def _populate_hosts_and_request_paths(self): + """ + OpenStack uses a separate host for API calls which is only provided + after an initial authentication request. If we haven't made that + request yet, do it here. Otherwise, just return the management host. + """ + if not self.auth_token: + aurl = self.auth_url + + if self._ex_force_auth_url != None: + aurl = self._ex_force_auth_url + + if aurl == None: + raise LibcloudError('OpenStack instance must have auth_url set') + + osa = OpenStackAuthConnection(self, aurl, self._auth_version, self.user_id, self.key) + + # may throw InvalidCreds, etc + osa.authenticate() + + self.auth_token = osa.auth_token + + # TODO: Multi-region support + if self._auth_version == '2.0': + for service in osa.urls: + if service.get('type') == 'compute': + self.server_url = self._get_default_region(service.get('endpoints', [])) + elif self._auth_version in ['1.1', '1.0']: + self.server_url = self._get_default_region(osa.urls.get('cloudServers', [])) + self.cdn_management_url = self._get_default_region(osa.urls.get('cloudFilesCDN', [])) + self.storage_url = self._get_default_region(osa.urls.get('cloudFiles', [])) + # TODO: this is even more broken, the service catalog does NOT show load + # balanacers :( You must hard code in the Rackspace Load balancer URLs... + self.lb_url = self.server_url.replace("servers", "ord.loadbalancers") + self.dns_url = self.server_url.replace("servers", "dns") + else: + raise LibcloudError('auth version "%s" not supported' % (self._auth_version)) + + for key in ['server_url', 'storage_url', 'cdn_management_url', + 'lb_url', 'dns_url']: + base_url = None + if self._force_base_url != None: + base_url = self._force_base_url + else: + base_url = getattr(self, key) + + scheme, server, request_path, param, query, fragment = ( + urlparse.urlparse(base_url)) + # Set host to where we want to make further requests to + setattr(self, '__%s' % (key), server+request_path) + setattr(self, '__request_path_%s' % (key), request_path) + + (self.host, self.port, self.secure, self.request_path) = self._tuple_from_url(self.base_url) diff --git a/trunk/libcloud/common/rackspace.py b/trunk/libcloud/common/rackspace.py new file mode 100644 index 0000000000..d0802d74b8 --- /dev/null +++ b/trunk/libcloud/common/rackspace.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common settings for Rackspace Cloud Servers and Cloud Files +""" + +AUTH_URL_US = 'https://auth.api.rackspacecloud.com/v1.1/' +AUTH_URL_UK = 'https://lon.auth.api.rackspacecloud.com/v1.1/' + +__all__ = [ + "AUTH_URL_US", + "AUTH_URL_UK", + ] diff --git a/trunk/libcloud/common/types.py b/trunk/libcloud/common/types.py new file mode 100644 index 0000000000..17e7b84661 --- /dev/null +++ b/trunk/libcloud/common/types.py @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "LibcloudError", + "MalformedResponseError", + "InvalidCredsError", + "InvalidCredsException", + "LazyList" + ] + + +class LibcloudError(Exception): + """The base class for other libcloud exceptions""" + + def __init__(self, value, driver=None): + self.value = value + self.driver = driver + + def __str__(self): + return ("") + + +class MalformedResponseError(LibcloudError): + """Exception for the cases when a provider returns a malformed + response, e.g. you request JSON and provider returns + '

something

' due to some error on their side.""" + + def __init__(self, value, body=None, driver=None): + self.value = value + self.driver = driver + self.body = body + + def __str__(self): + return (": " + + repr(self.body)) + + +class InvalidCredsError(LibcloudError): + """Exception used when invalid credentials are used on a provider.""" + + def __init__(self, value='Invalid credentials with the provider', + driver=None): + self.value = value + self.driver = driver + + def __str__(self): + return repr(self.value) + + +# Deprecated alias of L{InvalidCredsError} +InvalidCredsException = InvalidCredsError + + +class LazyList(object): + + def __init__(self, get_more, value_dict=None): + self._data = [] + self._last_key = None + self._exhausted = False + self._all_loaded = False + self._get_more = get_more + self._value_dict = value_dict or {} + + def __iter__(self): + if not self._all_loaded: + self._load_all() + + data = self._data + for i in data: + yield i + + def __getitem__(self, index): + if index >= len(self._data) and not self._all_loaded: + self._load_all() + + return self._data[index] + + def __len__(self): + self._load_all() + return len(self._data) + + def __repr__(self): + self._load_all() + repr_string = ', ' .join([repr(item) for item in self._data]) + repr_string = '[%s]' % (repr_string) + return repr_string + + def _load_all(self): + while not self._exhausted: + newdata, self._last_key, self._exhausted = \ + self._get_more(last_key=self._last_key, + value_dict=self._value_dict) + self._data.extend(newdata) + self._all_loaded = True diff --git a/trunk/libcloud/compute/__init__.py b/trunk/libcloud/compute/__init__.py new file mode 100644 index 0000000000..6d0970a00c --- /dev/null +++ b/trunk/libcloud/compute/__init__.py @@ -0,0 +1,3 @@ +""" +Module for working with Cloud Servers +""" diff --git a/trunk/libcloud/compute/base.py b/trunk/libcloud/compute/base.py new file mode 100644 index 0000000000..b31896e785 --- /dev/null +++ b/trunk/libcloud/compute/base.py @@ -0,0 +1,691 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides base classes for working with drivers +""" + +import time +import hashlib +import os +import socket +import struct + +import libcloud.compute.ssh +from libcloud.pricing import get_size_price +from libcloud.compute.types import NodeState, DeploymentError +from libcloud.compute.ssh import SSHClient + +# @@TR: are the imports below part of the public api for this +# module? They aren't used in here ... +from libcloud.common.base import ConnectionKey, ConnectionUserAndKey +from libcloud.common.base import BaseDriver +from libcloud.httplib_ssl import LibcloudHTTPSConnection +from libcloud.common.base import LibcloudHTTPConnection +from libcloud.common.types import LibcloudError + + +# How long to wait for the node to come online after creating it +NODE_ONLINE_WAIT_TIMEOUT = 10 * 60 + +# How long to try connecting to a remote SSH server when running a deployment +# script. +SSH_CONNECT_TIMEOUT = 5 * 60 + + +__all__ = [ + "Node", + "NodeState", + "NodeSize", + "NodeImage", + "NodeLocation", + "NodeAuthSSHKey", + "NodeAuthPassword", + "NodeDriver", + + # @@TR: do the following need exporting? + "ConnectionKey", + "ConnectionUserAndKey", + "LibcloudHTTPSConnection", + "LibcloudHTTPConnection" + ] + + +class Node(object): + """ + Provide a common interface for handling nodes of all types. + + The Node object provides the interface in libcloud through which + we can manipulate nodes in different cloud providers in the same + way. Node objects don't actually do much directly themselves, + instead the node driver handles the connection to the node. + + You don't normally create a node object yourself; instead you use + a driver and then have that create the node for you. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> node.public_ip[0] + '127.0.0.3' + >>> node.name + 'dummy-3' + + You can also get nodes from the driver's list_node function. + + >>> node = driver.list_nodes()[0] + >>> node.name + 'dummy-1' + + the node keeps a reference to its own driver which means that we + can work on nodes from different providers without having to know + which is which. + + >>> driver = DummyNodeDriver(72) + >>> node2 = driver.create_node() + >>> node.driver.creds + 0 + >>> node2.driver.creds + 72 + + Althrough Node objects can be subclassed, this isn't normally + done. Instead, any driver specific information is stored in the + "extra" proproperty of the node. + + >>> node.extra + {'foo': 'bar'} + + """ + + def __init__(self, id, name, state, public_ip, private_ip, + driver, size=None, image=None, extra=None): + self.id = str(id) if id else None + self.name = name + self.state = state + self.public_ip = public_ip + self.private_ip = private_ip + self.driver = driver + self.uuid = self.get_uuid() + self.size = size + self.image = image + self.extra = extra or {} + + def get_uuid(self): + """Unique hash for this node + + @return: C{string} + + The hash is a function of an SHA1 hash of the node's ID and + its driver which means that it should be unique between all + nodes. In some subclasses (e.g. GoGrid) there is no ID + available so the public IP address is used. This means that, + unlike a properly done system UUID, the same UUID may mean a + different system install at a different time + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> node.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! + """ + return hashlib.sha1("%s:%d" % (self.id, self.driver.type)).hexdigest() + + def reboot(self): + """Reboot this node + + @return: C{bool} + + This calls the node's driver and reboots the node + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> from libcloud.compute.types import NodeState + >>> node.state == NodeState.RUNNING + True + >>> node.state == NodeState.REBOOTING + False + >>> node.reboot() + True + >>> node.state == NodeState.REBOOTING + True + """ + return self.driver.reboot_node(self) + + def destroy(self): + """Destroy this node + + @return: C{bool} + + This calls the node's driver and destroys the node + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> from libcloud.compute.types import NodeState + >>> node = driver.create_node() + >>> node.state == NodeState.RUNNING + True + >>> node.destroy() + True + >>> node.state == NodeState.RUNNING + False + + """ + return self.driver.destroy_node(self) + + def __repr__(self): + return (('') + % (self.uuid, self.name, self.state, self.public_ip, + self.driver.name)) + + +class NodeSize(object): + """ + A Base NodeSize class to derive from. + + NodeSizes are objects which are typically returned a driver's + list_sizes function. They contain a number of different + parameters which define how big an image is. + + The exact parameters available depends on the provider. + + N.B. Where a parameter is "unlimited" (for example bandwidth in + Amazon) this will be given as 0. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> size = driver.list_sizes()[0] + >>> size.ram + 128 + >>> size.bandwidth + 500 + >>> size.price + 4 + """ + + def __init__(self, id, name, ram, disk, bandwidth, price, driver): + self.id = str(id) + self.name = name + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name)) + + +class NodeImage(object): + """ + An operating system image. + + NodeImage objects are typically returned by the driver for the + cloud provider in response to the list_images function + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> image = driver.list_images()[0] + >>> image.name + 'Ubuntu 9.10' + + Apart from name and id, there is no further standard information; + other parameters are stored in a driver specific "extra" variable + + When creating a node, a node image should be given as an argument + to the create_node function to decide which OS image to use. + + >>> node = driver.create_node(image=image) + + """ + + def __init__(self, id, name, driver, extra=None): + self.id = str(id) + self.name = name + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.driver.name)) + + +class NodeLocation(object): + """ + A physical location where nodes can be. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> location = driver.list_locations()[0] + >>> location.country + 'US' + """ + + def __init__(self, id, name, country, driver): + self.id = str(id) + self.name = name + self.country = country + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.country, self.driver.name)) + + +class NodeAuthSSHKey(object): + """ + An SSH key to be installed for authentication to a node. + + This is the actual contents of the users ssh public key which will + normally be installed as root's public key on the node. + + >>> pubkey = '...' # read from file + >>> from libcloud.compute.base import NodeAuthSSHKey + >>> k = NodeAuthSSHKey(pubkey) + >>> k + + """ + + def __init__(self, pubkey): + self.pubkey = pubkey + + def __repr__(self): + return '' + + +class NodeAuthPassword(object): + """ + A password to be used for authentication to a node. + """ + def __init__(self, password): + self.password = password + + def __repr__(self): + return '' + + +class NodeDriver(BaseDriver): + """ + A base NodeDriver class to derive from + + This class is always subclassed by a specific driver. For + examples of base behavior of most functions (except deploy node) + see the dummy driver. + + """ + + connectionCls = ConnectionKey + name = None + type = None + port = None + features = {"create_node": []} + """ + List of available features for a driver. + - L{create_node} + - ssh_key: Supports L{NodeAuthSSHKey} as an authentication method + for nodes. + - password: Supports L{NodeAuthPassword} as an authentication + method for nodes. + - generates_password: Returns a password attribute on the Node + object returned from creation. + """ + + NODE_STATE_MAP = {} + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + api_version=None): + super(NodeDriver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port, + api_version=api_version) + + def create_node(self, **kwargs): + """Create a new node instance. + + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword size: The size of resources allocated to this node. + (required) + @type size: L{NodeSize} + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword location: Which data center to create a node in. If empty, + undefined behavoir will be selected. (optional) + @type location: L{NodeLocation} + + @keyword auth: Initial authentication information for the node + (optional) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @return: The newly created L{Node}. + """ + raise NotImplementedError( + 'create_node not implemented for this driver') + + def destroy_node(self, node): + """Destroy a node. + + Depending upon the provider, this may destroy all data associated with + the node, including backups. + + @return: C{bool} True if the destroy was successful, otherwise False + """ + raise NotImplementedError( + 'destroy_node not implemented for this driver') + + def reboot_node(self, node): + """ + Reboot a node. + @return: C{bool} True if the reboot was successful, otherwise False + """ + raise NotImplementedError( + 'reboot_node not implemented for this driver') + + def list_nodes(self): + """ + List all nodes + @return: C{list} of L{Node} objects + """ + raise NotImplementedError( + 'list_nodes not implemented for this driver') + + def list_images(self, location=None): + """ + List images on a provider + @return: C{list} of L{NodeImage} objects + """ + raise NotImplementedError( + 'list_images not implemented for this driver') + + def list_sizes(self, location=None): + """ + List sizes on a provider + @return: C{list} of L{NodeSize} objects + """ + raise NotImplementedError( + 'list_sizes not implemented for this driver') + + def list_locations(self): + """ + List data centers for a provider + @return: C{list} of L{NodeLocation} objects + """ + raise NotImplementedError( + 'list_locations not implemented for this driver') + + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + Depends on a Provider Driver supporting either using a specific + password or returning a generated password. + + This function may raise a L{DeploymentException}, if a create_node + call was successful, but there is a later error (like SSH failing or + timing out). This exception includes a Node object which you may want + to destroy if incomplete deployments are not desirable. + + @keyword deploy: Deployment to run once machine is online and + availble to SSH. + @type deploy: L{Deployment} + + @keyword ssh_username: Optional name of the account which is used + when connecting to + SSH server (default is root) + @type ssh_username: C{str} + + @keyword ssh_port: Optional SSH server port (default is 22) + @type ssh_port: C{int} + + @keyword ssh_timeout: Optional SSH connection timeout in seconds + (default is None) + @type ssh_timeout: C{float} + + @keyword auth: Initial authentication information for the node + (optional) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @keyword ssh_key: A path (or paths) to an SSH private key with which + to attempt to authenticate. (optional) + @type ssh_key: C{string} or C{list} of C{string}s + + See L{NodeDriver.create_node} for more keyword args. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> from libcloud.compute.deployment import ScriptDeployment + >>> from libcloud.compute.deployment import MultiStepDeployment + >>> from libcloud.compute.base import NodeAuthSSHKey + >>> driver = DummyNodeDriver(0) + >>> key = NodeAuthSSHKey('...') # read from file + >>> script = ScriptDeployment("yum -y install emacs strace tcpdump") + >>> msd = MultiStepDeployment([key, script]) + >>> def d(): + ... try: + ... node = driver.deploy_node(deploy=msd) + ... except NotImplementedError: + ... print "not implemented for dummy driver" + >>> d() + not implemented for dummy driver + + Deploy node is typically not overridden in subclasses. The + existing implementation should be able to handle most such. + """ + if not libcloud.compute.ssh.have_paramiko: + raise RuntimeError('paramiko is not installed. You can install ' + + 'it using pip: pip install paramiko') + + password = None + + if 'create_node' not in self.features: + raise NotImplementedError( + 'deploy_node not implemented for this driver') + elif 'generates_password' not in self.features["create_node"]: + if 'password' not in self.features["create_node"] and \ + 'ssh_key' not in self.features["create_node"]: + raise NotImplementedError( + 'deploy_node not implemented for this driver') + + if 'auth' not in kwargs: + kwargs['auth'] = NodeAuthPassword(os.urandom(16).encode('hex')) + + if 'ssh_key' not in kwargs: + password = kwargs['auth'].password + + node = self.create_node(**kwargs) + + if 'generates_password' in self.features['create_node']: + password = node.extra.get('password') + + try: + # Wait until node is up and running and has public IP assigned + node = self._wait_until_running(node=node, wait_period=3, + timeout=NODE_ONLINE_WAIT_TIMEOUT) + + ssh_username = kwargs.get('ssh_username', 'root') + ssh_port = kwargs.get('ssh_port', 22) + ssh_timeout = kwargs.get('ssh_timeout', 10) + ssh_key_file = kwargs.get('ssh_key', None) + + ssh_client = SSHClient(hostname=node.public_ip[0], + port=ssh_port, username=ssh_username, + password=password, + key=ssh_key_file, + timeout=ssh_timeout) + + # Connect to the SSH server running on the node + ssh_client = self._ssh_client_connect(ssh_client=ssh_client, + timeout=SSH_CONNECT_TIMEOUT) + + # Execute the deployment task + self._run_deployment_script(task=kwargs['deploy'], + node=node, + ssh_client=ssh_client, + max_tries=3) + except Exception, e: + raise DeploymentError(node, e) + return node + + def _wait_until_running(self, node, wait_period=3, timeout=600): + """ + Block until node is fully booted and has an IP address assigned. + + @keyword node: Node instance. + @type node: C{Node} + + @keyword wait_period: How many seconds to between each loop + iteration (default is 3) + @type wait_period: C{int} + + @keyword timeout: How many seconds to wait before timing out + (default is 600) + @type timeout: C{int} + + @return: C{Node} Node instance on success. + """ + start = time.time() + end = start + timeout + + while time.time() < end: + nodes = self.list_nodes() + nodes = filter(lambda n: n.uuid == node.uuid, nodes) + + if len(nodes) == 0: + raise LibcloudError(value=('Booted node[%s] ' % node + + 'is missing from list_nodes.'), + driver=self) + + if len(nodes) > 1: + raise LibcloudError(value=('Booted single node[%s], ' % node + + 'but multiple nodes have same UUID'), + driver=self) + + node = nodes[0] + + if (node.public_ip and node.state == NodeState.RUNNING): + return node + else: + time.sleep(wait_period) + continue + + raise LibcloudError(value='Timed out after %s seconds' % (timeout), + driver=self) + + def _ssh_client_connect(self, ssh_client, timeout=300): + """ + Try to connect to the remote SSH server. If a connection times out or + is refused it is retried up to timeout number of seconds. + + @keyword ssh_client: A configured SSHClient instance + @type ssh_client: C{SSHClient} + + @keyword timeout: How many seconds to wait before timing out + (default is 600) + @type timeout: C{int} + + @return: C{SSHClient} on success + """ + start = time.time() + end = start + timeout + + while time.time() < end: + try: + ssh_client.connect() + except (IOError, socket.gaierror, socket.error): + # Retry if a connection is refused or timeout + # occurred + ssh_client.close() + continue + else: + return ssh_client + + raise LibcloudError(value='Could not connect to the remote SSH ' + + 'server. Giving up.', driver=self) + + def _run_deployment_script(self, task, node, ssh_client, max_tries=3): + """ + Run the deployment script on the provided node. At this point it is + assumed that SSH connection has already been established. + + @keyword task: Deployment task to run on the node. + @type task: C{Deployment} + + @keyword node: Node to operate one + @type node: C{Node} + + @keyword ssh_client: A configured and connected SSHClient instance + @type ssh_client: C{SSHClient} + + @keyword max_tries: How many times to retry if a deployment fails + before giving up (default is 3) + @type max_tries: C{int} + + @return: C{Node} Node instance on success. + """ + tries = 0 + while tries < max_tries: + try: + node = task.run(node, ssh_client) + except Exception: + tries += 1 + if tries >= max_tries: + raise LibcloudError(value='Failed after %d tries' + % (max_tries), driver=self) + else: + ssh_client.close() + return node + + def _get_size_price(self, size_id): + return get_size_price(driver_type='compute', + driver_name=self.api_name, + size_id=size_id) + + +def is_private_subnet(ip): + """ + Utility function to check if an IP address is inside a private subnet. + + @type ip: C{str} + @keyword ip: IP address to check + + @return: C{bool} if the specified IP address is private. + """ + priv_subnets = [{'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, + {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, + {'subnet': '192.168.0.0', 'mask': '255.255.0.0'}] + + ip = struct.unpack('I', socket.inet_aton(ip))[0] + + for network in priv_subnets: + subnet = struct.unpack('I', socket.inet_aton(network['subnet']))[0] + mask = struct.unpack('I', socket.inet_aton(network['mask']))[0] + + if (ip & mask) == (subnet & mask): + return True + + return False + + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/libcloud/compute/deployment.py b/trunk/libcloud/compute/deployment.py new file mode 100644 index 0000000000..6f26b0b16f --- /dev/null +++ b/trunk/libcloud/compute/deployment.py @@ -0,0 +1,145 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides generic deployment steps for machines post boot. +""" +import os + +class Deployment(object): + """ + Base class for deployment tasks. + """ + + def run(self, node, client): + """ + Runs this deployment task on C{node} using the C{client} provided. + + @type node: L{Node} + @keyword node: Node to operate one + + @type client: L{BaseSSHClient} + @keyword client: Connected SSH client to use. + + @return: L{Node} + """ + raise NotImplementedError, \ + 'run not implemented for this deployment' + + def _get_string_value(self, argument_name, argument_value): + if not isinstance(argument_value, basestring) and \ + not hasattr(argument_value, 'read'): + raise TypeError('%s argument must be a string or a file-like ' + 'object' % (argument_name)) + + if hasattr(argument_value, 'read'): + argument_value = argument_value.read() + + return argument_value + + +class SSHKeyDeployment(Deployment): + """ + Installs a public SSH Key onto a host. + """ + + def __init__(self, key): + """ + @type key: C{str} + @keyword key: Contents of the public key write + """ + self.key = self._get_string_value(argument_name='key', + argument_value=key) + + def run(self, node, client): + """ + Installs SSH key into C{.ssh/authorized_keys} + + See also L{Deployment.run} + """ + client.put(".ssh/authorized_keys", contents=self.key) + return node + +class ScriptDeployment(Deployment): + """ + Runs an arbitrary Shell Script task. + """ + + def __init__(self, script, name=None, delete=False): + """ + @type script: C{str} + @keyword script: Contents of the script to run + + @type name: C{str} + @keyword name: Name of the script to upload it as, if not specified, a random name will be choosen. + + @type delete: C{bool} + @keyword delete: Whether to delete the script on completion. + """ + script = self._get_string_value(argument_name='script', + argument_value=script) + + self.script = script + self.stdout = None + self.stderr = None + self.exit_status = None + self.delete = delete + self.name = name + if self.name is None: + self.name = "/root/deployment_%s.sh" % (os.urandom(4).encode('hex')) + + def run(self, node, client): + """ + Uploads the shell script and then executes it. + + See also L{Deployment.run} + """ + client.put(path=self.name, chmod=0755, contents=self.script) + self.stdout, self.stderr, self.exit_status = client.run(self.name) + if self.delete: + client.delete(self.name) + return node + +class MultiStepDeployment(Deployment): + """ + Runs a chain of Deployment steps. + """ + def __init__(self, add = None): + """ + @type add: C{list} + @keyword add: Deployment steps to add. + """ + self.steps = [] + self.add(add) + + def add(self, add): + """Add a deployment to this chain. + + @type add: Single L{Deployment} or a C{list} of L{Deployment} + @keyword add: Adds this deployment to the others already in this object. + """ + if add is not None: + add = add if isinstance(add, (list, tuple)) else [add] + self.steps.extend(add) + + def run(self, node, client): + """ + Run each deployment that has been added. + + See also L{Deployment.run} + """ + for s in self.steps: + node = s.run(node, client) + return node diff --git a/trunk/libcloud/compute/drivers/__init__.py b/trunk/libcloud/compute/drivers/__init__.py new file mode 100644 index 0000000000..1f16cf4d53 --- /dev/null +++ b/trunk/libcloud/compute/drivers/__init__.py @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'brightbox', + 'bluebox', + 'dummy', + 'ec2', + 'ecp', + 'elasticstack', + 'elastichosts', + 'cloudsigma', + 'gogrid', + 'ibm_sbc', + 'linode', + 'opennebula', + 'rackspace', + 'rimuhosting', + 'slicehost', + 'softlayer', + 'vcloud', + 'voxel', + 'vpsnet', +] diff --git a/trunk/libcloud/compute/drivers/bluebox.py b/trunk/libcloud/compute/drivers/bluebox.py new file mode 100644 index 0000000000..21b7fcdaf6 --- /dev/null +++ b/trunk/libcloud/compute/drivers/bluebox.py @@ -0,0 +1,223 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +libcloud driver for the Blue Box Blocks API + +This driver implements all libcloud functionality for the Blue Box Blocks API. + +Blue Box home page http://bluebox.net +Blue Box API documentation https://boxpanel.bluebox.net/public/the_vault/index.php/Blocks_API +""" + +import copy +import urllib +import base64 + +from libcloud.common.base import JsonResponse, ConnectionUserAndKey +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState, InvalidCredsError +from libcloud.common.types import MalformedResponseError +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation +from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey + +# Current end point for Blue Box API. +BLUEBOX_API_HOST = "boxpanel.bluebox.net" + +# The API doesn't currently expose all of the required values for libcloud, +# so we simply list what's available right now, along with all of the various +# attributes that are needed by libcloud. +BLUEBOX_INSTANCE_TYPES = { + '1gb': { + 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae', + 'name': 'Block 1GB Virtual Server', + 'ram': 1024, + 'disk': 20, + 'cpu': 0.5 + }, + '2gb': { + 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092', + 'name': 'Block 2GB Virtual Server', + 'ram': 2048, + 'disk': 25, + 'cpu': 1 + }, + '4gb': { + 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58', + 'name': 'Block 4GB Virtual Server', + 'ram': 4096, + 'disk': 50, + 'cpu': 2 + }, + '8gb': { + 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251', + 'name': 'Block 8GB Virtual Server', + 'ram': 8192, + 'disk': 100, + 'cpu': 4 + } +} + +RAM_PER_CPU = 2048 + +NODE_STATE_MAP = { 'queued': NodeState.PENDING, + 'building': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'error': NodeState.TERMINATED, + 'unknown': NodeState.UNKNOWN } + +class BlueboxResponse(JsonResponse): + def parse_error(self): + if int(self.status) == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + return self.body + +class BlueboxNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, self.disk, self.price, self.driver.name)) + +class BlueboxConnection(ConnectionUserAndKey): + """ + Connection class for the Bluebox driver + """ + + host = BLUEBOX_API_HOST + secure = True + responseCls = BlueboxResponse + + def add_default_headers(self, headers): + user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + headers['Authorization'] = 'Basic %s' % (user_b64) + return headers + +class BlueboxNodeDriver(NodeDriver): + """ + Bluebox Blocks node driver + """ + + connectionCls = BlueboxConnection + type = Provider.BLUEBOX + api_name = 'bluebox' + name = 'Bluebox Blocks' + + def list_nodes(self): + result = self.connection.request('/api/blocks.json') + return [self._to_node(i) for i in result.object] + + def list_sizes(self, location=None): + sizes = [] + for key, values in BLUEBOX_INSTANCE_TYPES.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(BlueboxNodeSize(driver=self.connection.driver, + **attributes)) + + return sizes + + def list_images(self, location=None): + result = self.connection.request('/api/block_templates.json') + images = [] + for image in result.object: + images.extend([self._to_image(image)]) + + return images + + def create_node(self, **kwargs): + headers = { 'Content-Type': 'application/x-www-form-urlencoded' } + size = kwargs["size"] + + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + try: + auth = kwargs['auth'] + except Exception: + raise Exception("SSH public key or password required.") + + data = { + 'hostname': name, + 'product': size.id, + 'template': image.id + } + + ssh = None + password = None + + if isinstance(auth, NodeAuthSSHKey): + ssh = auth.pubkey + data.update(ssh_public_key=ssh) + elif isinstance(auth, NodeAuthPassword): + password = auth.password + data.update(password=password) + + if "ex_username" in kwargs: + data.update(username=kwargs["ex_username"]) + + if not ssh and not password: + raise Exception("SSH public key or password required.") + + params = urllib.urlencode(data) + result = self.connection.request('/api/blocks.json', headers=headers, data=params, method='POST') + node = self._to_node(result.object) + return node + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + url = '/api/blocks/%s.json' % (node.id) + result = self.connection.request(url, method='DELETE') + + return result.status == 200 + + def list_locations(self): + return [NodeLocation(0, "Blue Box Seattle US", 'US', self)] + + def reboot_node(self, node): + url = '/api/blocks/%s/reboot.json' % (node.id) + result = self.connection.request(url, method="PUT") + return result.status == 200 + + def _to_node(self, vm): + state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)] + n = Node(id=vm['id'], + name=vm['hostname'], + state=state, + public_ip=[ ip['address'] for ip in vm['ips'] ], + private_ip=[], + extra={'storage':vm['storage'], 'cpu':vm['cpu']}, + driver=self.connection.driver) + return n + + def _to_image(self, image): + image = NodeImage(id=image['id'], + name=image['description'], + driver=self.connection.driver) + return image diff --git a/trunk/libcloud/compute/drivers/brightbox.py b/trunk/libcloud/compute/drivers/brightbox.py new file mode 100644 index 0000000000..2315c35e16 --- /dev/null +++ b/trunk/libcloud/compute/drivers/brightbox.py @@ -0,0 +1,222 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Brightbox Driver +""" +import httplib +import base64 + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + +try: + import simplejson as json +except ImportError: + import json + +API_VERSION = '1.0' + + +class BrightboxResponse(JsonResponse): + def success(self): + return self.status >= 200 and self.status < 400 + + def parse_body(self): + if self.headers['content-type'].split('; ')[0] == 'application/json': + return super(BrightboxResponse, self).parse_body() + else: + return self.body + + def parse_error(self): + return super(BrightboxResponse, self).parse_body()['error'] + + +class BrightboxConnection(ConnectionUserAndKey): + """ + Connection class for the Brightbox driver + """ + + host = 'api.gb1.brightbox.com' + responseCls = BrightboxResponse + + def _fetch_oauth_token(self): + body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) + + authorization = 'Basic ' + base64.encodestring('%s:%s' % (self.user_id, self.key)).rstrip() + + self.connect() + + response = self.connection.request(method='POST', url='/token', body=body, headers={ + 'Host': self.host, + 'User-Agent': self._user_agent(), + 'Authorization': authorization, + 'Content-Type': 'application/json', + 'Content-Length': str(len(body)) + }) + + response = self.connection.getresponse() + + if response.status == 200: + return json.loads(response.read())['access_token'] + else: + message = '%s (%s)' % (json.loads(response.read())['error'], response.status) + + raise InvalidCredsError, message + + def add_default_headers(self, headers): + try: + headers['Authorization'] = 'OAuth ' + self.token + except AttributeError: + self.token = self._fetch_oauth_token() + + headers['Authorization'] = 'OAuth ' + self.token + + return headers + + def encode_data(self, data): + return json.dumps(data) + + +class BrightboxNodeDriver(NodeDriver): + """ + Brightbox node driver + """ + + connectionCls = BrightboxConnection + + type = Provider.BRIGHTBOX + name = 'Brightbox' + + NODE_STATE_MAP = { 'creating': NodeState.PENDING, + 'active': NodeState.RUNNING, + 'inactive': NodeState.UNKNOWN, + 'deleting': NodeState.UNKNOWN, + 'deleted': NodeState.TERMINATED, + 'failed': NodeState.UNKNOWN, + 'unavailable': NodeState.UNKNOWN } + + def _to_node(self, data): + return Node( + id = data['id'], + name = data['name'], + state = self.NODE_STATE_MAP[data['status']], + public_ip = map(lambda cloud_ip: cloud_ip['public_ip'], data['cloud_ips']), + private_ip = map(lambda interface: interface['ipv4_address'], data['interfaces']), + driver = self.connection.driver, + extra = { + 'status': data['status'], + 'interfaces': data['interfaces'] + } + ) + + def _to_image(self, data): + return NodeImage( + id = data['id'], + name = data['name'], + driver = self, + extra = { + 'description': data['description'], + 'arch': data['arch'] + } + ) + + def _to_size(self, data): + return NodeSize( + id = data['id'], + name = data['name'], + ram = data['ram'], + disk = data['disk_size'], + bandwidth = 0, + price = '', + driver = self + ) + + def _to_location(self, data): + return NodeLocation( + id = data['id'], + name = data['handle'], + country = 'GB', + driver = self + ) + + def _post(self, path, data={}): + headers = {'Content-Type': 'application/json'} + + return self.connection.request(path, data=data, headers=headers, method='POST') + + def create_node(self, **kwargs): + data = { + 'name': kwargs['name'], + 'server_type': kwargs['size'].id, + 'image': kwargs['image'].id, + 'user_data': '' + } + + if kwargs.has_key('location'): + data['zone'] = kwargs['location'].id + else: + data['zone'] = '' + + data = self._post('/%s/servers' % API_VERSION, data).object + + return self._to_node(data) + + def destroy_node(self, node): + response = self.connection.request('/%s/servers/%s' % (API_VERSION, node.id), method='DELETE') + + return response.status == httplib.ACCEPTED + + def list_nodes(self): + data = self.connection.request('/%s/servers' % API_VERSION).object + + return map(self._to_node, data) + + def list_images(self): + data = self.connection.request('/%s/images' % API_VERSION).object + + return map(self._to_image, data) + + def list_sizes(self): + data = self.connection.request('/%s/server_types' % API_VERSION).object + + return map(self._to_size, data) + + def list_locations(self): + data = self.connection.request('/%s/zones' % API_VERSION).object + + return map(self._to_location, data) + + def ex_list_cloud_ips(self): + return self.connection.request('/%s/cloud_ips' % API_VERSION).object + + def ex_create_cloud_ip(self): + return self._post('/%s/cloud_ips' % API_VERSION).object + + def ex_map_cloud_ip(self, cloud_ip_id, interface_id): + response = self._post('/%s/cloud_ips/%s/map' % (API_VERSION, cloud_ip_id), {'interface': interface_id}) + + return response.status == httplib.ACCEPTED + + def ex_unmap_cloud_ip(self, cloud_ip_id): + response = self._post('/%s/cloud_ips/%s/unmap' % (API_VERSION, cloud_ip_id)) + + return response.status == httplib.ACCEPTED + + def ex_destroy_cloud_ip(self, cloud_ip_id): + response = self.connection.request('/%s/cloud_ips/%s' % (API_VERSION, cloud_ip_id), method='DELETE') + + return response.status == httplib.OK diff --git a/trunk/libcloud/compute/drivers/cloudsigma.py b/trunk/libcloud/compute/drivers/cloudsigma.py new file mode 100644 index 0000000000..ee50ebbac0 --- /dev/null +++ b/trunk/libcloud/compute/drivers/cloudsigma.py @@ -0,0 +1,553 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CloudSigma Driver +""" +import re +import time +import base64 + +from libcloud.utils import str2dicts, str2list, dict2str +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, NodeSize, Node +from libcloud.compute.base import NodeImage + +# API end-points +API_ENDPOINTS = { + 'zrh': { + 'name': 'Zurich', + 'country': 'Switzerland', + 'host': 'api.cloudsigma.com' + }, +} + +# Default API end-point for the base connection clase. +DEFAULT_ENDPOINT = 'zrh' + +# CloudSigma doesn't specify special instance types. +# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work, 500 MB to 32000 MB for ram +# and 1 GB to 1024 GB for hard drive size. +# Plans in this file are based on examples listed on http://www.cloudsigma.com/en/pricing/price-schedules +INSTANCE_TYPES = { + 'micro-regular': { + 'id': 'micro-regular', + 'name': 'Micro/Regular instance', + 'cpu': 1100, + 'memory': 640, + 'disk': 50, + 'bandwidth': None, + }, + 'micro-high-cpu': { + 'id': 'micro-high-cpu', + 'name': 'Micro/High CPU instance', + 'cpu': 2200, + 'memory': 640, + 'disk': 80, + 'bandwidth': None, + }, + 'standard-small': { + 'id': 'standard-small', + 'name': 'Standard/Small instance', + 'cpu': 1100, + 'memory': 1741, + 'disk': 50, + 'bandwidth': None, + }, + 'standard-large': { + 'id': 'standard-large', + 'name': 'Standard/Large instance', + 'cpu': 4400, + 'memory': 7680, + 'disk': 250, + 'bandwidth': None, + }, + 'standard-extra-large': { + 'id': 'standard-extra-large', + 'name': 'Standard/Extra Large instance', + 'cpu': 8800, + 'memory': 15360, + 'disk': 500, + 'bandwidth': None, + }, + 'high-memory-extra-large': { + 'id': 'high-memory-extra-large', + 'name': 'High Memory/Extra Large instance', + 'cpu': 7150, + 'memory': 17510, + 'disk': 250, + 'bandwidth': None, + }, + 'high-memory-double-extra-large': { + 'id': 'high-memory-double-extra-large', + 'name': 'High Memory/Double Extra Large instance', + 'cpu': 14300, + 'memory': 32768, + 'disk': 500, + 'bandwidth': None, + }, + 'high-cpu-medium': { + 'id': 'high-cpu-medium', + 'name': 'High CPU/Medium instance', + 'cpu': 5500, + 'memory': 1741, + 'disk': 150, + 'bandwidth': None, + }, + 'high-cpu-extra-large': { + 'id': 'high-cpu-extra-large', + 'name': 'High CPU/Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 500, + 'bandwidth': None, + } +} + +NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'stopped': NodeState.TERMINATED, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, +} + +# Default timeout (in seconds) for the drive imaging process +IMAGING_TIMEOUT = 20 * 60 + +class CloudSigmaException(Exception): + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + +class CloudSigmaInsufficientFundsException(Exception): + def __repr__(self): + return "" % (self.args[0]) + +class CloudSigmaResponse(Response): + def success(self): + if self.status == 401: + raise InvalidCredsError() + + return self.status >= 200 and self.status <= 299 + + def parse_body(self): + if not self.body: + return self.body + + return str2dicts(self.body) + + def parse_error(self): + return 'Error: %s' % (self.body.replace('errors:', '').strip()) + +class CloudSigmaNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name)) + +class CloudSigmaBaseConnection(ConnectionUserAndKey): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + responseCls = CloudSigmaResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + + headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key))) + + return headers + +class CloudSigmaBaseNodeDriver(NodeDriver): + type = Provider.CLOUDSIGMA + name = 'CloudSigma' + connectionCls = CloudSigmaBaseConnection + + def reboot_node(self, node): + """ + Reboot a node. + + Because Cloudsigma API does not provide native reboot call, it's emulated using stop and start. + """ + node = self._get_node(node.id) + state = node.state + + if state == NodeState.RUNNING: + stopped = self.ex_stop_node(node) + else: + stopped = True + + if not stopped: + raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + + success = self.ex_start_node(node) + + return success + + def destroy_node(self, node): + """ + Destroy a node (all the drives associated with it are NOT destroyed). + + If a node is still running, it's stopped before it's destroyed. + """ + node = self._get_node(node.id) + state = node.state + + # Node cannot be destroyed while running so it must be stopped first + if state == NodeState.RUNNING: + stopped = self.ex_stop_node(node) + else: + stopped = True + + if not stopped: + raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + + response = self.connection.request(action = '/servers/%s/destroy' % (node.id), + method = 'POST') + return response.status == 204 + + def list_images(self, location=None): + """ + Return a list of available standard images (this call might take up to 15 seconds to return). + """ + response = self.connection.request(action = '/drives/standard/info').object + + images = [] + for value in response: + if value.get('type'): + if value['type'] == 'disk': + image = NodeImage(id = value['drive'], name = value['name'], driver = self.connection.driver, + extra = {'size': value['size']}) + images.append(image) + + return images + + def list_sizes(self, location = None): + """ + Return a list of available node sizes. + """ + sizes = [] + for key, value in INSTANCE_TYPES.iteritems(): + size = CloudSigmaNodeSize(id = value['id'], name = value['name'], + cpu = value['cpu'], ram = value['memory'], + disk = value['disk'], bandwidth = value['bandwidth'], + price = self._get_size_price(size_id=key), + driver = self.connection.driver) + sizes.append(size) + + return sizes + + def list_nodes(self): + """ + Return a list of nodes. + """ + response = self.connection.request(action = '/servers/info').object + + nodes = [] + for data in response: + node = self._to_node(data) + if node: + nodes.append(node) + return nodes + + def create_node(self, **kwargs): + """ + Creates a CloudSigma instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: String with a name for this new node (required) + @type name: C{string} + + @keyword smp: Number of virtual processors or None to calculate based on the cpu speed + @type smp: C{int} + + @keyword nic_model: e1000, rtl8139 or virtio (is not specified, e1000 is used) + @type nic_model: C{string} + + @keyword vnc_password: If not set, VNC access is disabled. + @type vnc_password: C{bool} + """ + size = kwargs['size'] + image = kwargs['image'] + smp = kwargs.get('smp', 'auto') + nic_model = kwargs.get('nic_model', 'e1000') + vnc_password = kwargs.get('vnc_password', None) + + if nic_model not in ['e1000', 'rtl8139', 'virtio']: + raise CloudSigmaException('Invalid NIC model specified') + + drive_data = {} + drive_data.update({'name': kwargs['name'], 'size': '%sG' % (kwargs['size'].disk)}) + + response = self.connection.request(action = '/drives/%s/clone' % image.id, data = dict2str(drive_data), + method = 'POST').object + + if not response: + raise CloudSigmaException('Drive creation failed') + + drive_uuid = response[0]['drive'] + + response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + imaging_start = time.time() + while response[0].has_key('imaging'): + response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + elapsed_time = time.time() - imaging_start + if response[0].has_key('imaging') and elapsed_time >= IMAGING_TIMEOUT: + raise CloudSigmaException('Drive imaging timed out') + time.sleep(1) + + node_data = {} + node_data.update({'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, 'ide:0:0': drive_uuid, + 'boot': 'ide:0:0', 'smp': smp}) + node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) + + if vnc_password: + node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) + + response = self.connection.request(action = '/servers/create', data = dict2str(node_data), + method = 'POST').object + + if not isinstance(response, list): + response = [ response ] + + node = self._to_node(response[0]) + if node is None: + # Insufficient funds, destroy created drive + self.ex_drive_destroy(drive_uuid) + raise CloudSigmaInsufficientFundsException('Insufficient funds, node creation failed') + + # Start the node after it has been created + started = self.ex_start_node(node) + + if started: + node.state = NodeState.RUNNING + + return node + + def ex_destroy_node_and_drives(self, node): + """ + Destroy a node and all the drives associated with it. + """ + node = self._get_node_info(node) + + drive_uuids = [] + for key, value in node.iteritems(): + if (key.startswith('ide:') or key.startswith('scsi') or key.startswith('block')) and \ + not (key.endswith(':bytes') or key.endswith(':requests') or key.endswith('media')): + drive_uuids.append(value) + + node_destroyed = self.destroy_node(self._to_node(node)) + + if not node_destroyed: + return False + + for drive_uuid in drive_uuids: + self.ex_drive_destroy(drive_uuid) + + return True + + def ex_static_ip_list(self): + """ + Return a list of available static IP addresses. + """ + response = self.connection.request(action = '/resources/ip/list', method = 'GET') + + if response.status != 200: + raise CloudSigmaException('Could not retrieve IP list') + + ips = str2list(response.body) + return ips + + def ex_drives_list(self): + """ + Return a list of all the available drives. + """ + response = self.connection.request(action = '/drives/info', method = 'GET') + + result = str2dicts(response.body) + return result + + def ex_static_ip_create(self): + """ + Create a new static IP address. + """ + response = self.connection.request(action = '/resources/ip/create', method = 'GET') + + result = str2dicts(response.body) + return result + + def ex_static_ip_destroy(self, ip_address): + """ + Destroy a static IP address. + """ + response = self.connection.request(action = '/resources/ip/%s/destroy' % (ip_address), method = 'GET') + + return response.status == 204 + + def ex_drive_destroy(self, drive_uuid): + """ + Destroy a drive with a specified uuid. + If the drive is currently mounted an exception is thrown. + """ + response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), method = 'POST') + + return response.status == 204 + + + def ex_set_node_configuration(self, node, **kwargs): + """ + Update a node configuration. + Changing most of the parameters requires node to be stopped. + """ + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + + invalid_keys = [] + for key in kwargs.keys(): + matches = False + for regex in valid_keys: + if re.match(regex, key): + matches = True + break + if not matches: + invalid_keys.append(key) + + if invalid_keys: + raise CloudSigmaException('Invalid configuration key specified: %s' % (',' .join(invalid_keys))) + + response = self.connection.request(action = '/servers/%s/set' % (node.id), data = dict2str(kwargs), + method = 'POST') + + return (response.status == 200 and response.body != '') + + def ex_start_node(self, node): + """ + Start a node. + """ + response = self.connection.request(action = '/servers/%s/start' % (node.id), + method = 'POST') + + return response.status == 200 + + def ex_stop_node(self, node): + """ + Stop (shutdown) a node. + """ + response = self.connection.request(action = '/servers/%s/stop' % (node.id), + method = 'POST') + return response.status == 204 + + def ex_shutdown_node(self, node): + """ + Stop (shutdown) a node. + """ + return self.ex_stop_node(node) + + def ex_destroy_drive(self, drive_uuid): + """ + Destroy a drive. + """ + response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), + method = 'POST') + return response.status == 204 + + def _to_node(self, data): + if data: + try: + state = NODE_STATE_MAP[data['status']] + except KeyError: + state = NodeState.UNKNOWN + + if 'server' not in data: + # Response does not contain server UUID if the server + # creation failed because of insufficient funds. + return None + + public_ip = [] + if data.has_key('nic:0:dhcp'): + if isinstance(data['nic:0:dhcp'], list): + public_ip = data['nic:0:dhcp'] + else: + public_ip = [data['nic:0:dhcp']] + + extra = {} + extra_keys = [ ('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), ('status', 'str') ] + for key, value_type in extra_keys: + if data.has_key(key): + value = data[key] + + if value_type == 'int': + value = int(value) + elif value_type == 'auto': + try: + value = int(value) + except ValueError: + pass + + extra.update({key: value}) + + if data.has_key('vnc:ip') and data.has_key('vnc:password'): + extra.update({'vnc_ip': data['vnc:ip'], 'vnc_password': data['vnc:password']}) + + node = Node(id = data['server'], name = data['name'], state = state, + public_ip = public_ip, private_ip = None, driver = self.connection.driver, + extra = extra) + + return node + return None + + def _get_node(self, node_id): + nodes = self.list_nodes() + node = [node for node in nodes if node.id == node.id] + + if not node: + raise CloudSigmaException('Node with id %s does not exist' % (node_id)) + + return node[0] + + def _get_node_info(self, node): + response = self.connection.request(action = '/servers/%s/info' % (node.id)) + + result = str2dicts(response.body) + return result[0] + +class CloudSigmaZrhConnection(CloudSigmaBaseConnection): + """ + Connection class for the CloudSigma driver for the Zurich end-point + """ + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + +class CloudSigmaZrhNodeDriver(CloudSigmaBaseNodeDriver): + """ + CloudSigma node driver for the Zurich end-point + """ + connectionCls = CloudSigmaZrhConnection + api_name = 'cloudsigma_zrh' diff --git a/trunk/libcloud/compute/drivers/cloudstack.py b/trunk/libcloud/compute/drivers/cloudstack.py new file mode 100644 index 0000000000..fc6dfe78d3 --- /dev/null +++ b/trunk/libcloud/compute/drivers/cloudstack.py @@ -0,0 +1,282 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.cloudstack import CloudStackConnection, \ + CloudStackDriverMixIn +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation, \ + NodeSize +from libcloud.compute.types import DeploymentError, NodeState + +class CloudStackNode(Node): + "Subclass of Node so we can expose our extension methods." + + def ex_allocate_public_ip(self): + "Allocate a public IP and bind it to this node." + return self.driver.ex_allocate_public_ip(self) + + def ex_release_public_ip(self, address): + "Release a public IP that this node holds." + return self.driver.ex_release_public_ip(self, address) + + def ex_add_ip_forwarding_rule(self, address, protocol, start_port, + end_port=None): + "Add a NAT/firewall forwarding rule for a port or ports." + return self.driver.ex_add_ip_forwarding_rule(self, address, protocol, + start_port, end_port) + + def ex_delete_ip_forwarding_rule(self, rule): + "Delete a NAT/firewall rule." + return self.driver.ex_delete_ip_forwarding_rule(self, rule) + +class CloudStackAddress(object): + "A public IP address." + + def __init__(self, node, id, address): + self.node = node + self.id = id + self.address = address + + def release(self): + self.node.ex_release_public_ip(self) + + def __str__(self): + return self.address + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + +class CloudStackForwardingRule(object): + "A NAT/firewall forwarding rule." + + def __init__(self, node, id, address, protocol, start_port, end_port=None): + self.node = node + self.id = id + self.address = address + self.protocol = protocol + self.start_port = start_port + self.end_port = end_port + + def delete(self): + self.node.ex_delete_ip_forwarding_rule(self) + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + +class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver): + """Driver for the CloudStack API. + + @cvar host: The host where the API can be reached. + @cvar path: The path where the API can be reached. + @cvar async_poll_frequency: How often (in seconds) to poll for async + job completion. + @type async_poll_frequency: C{int}""" + + api_name = 'cloudstack' + + NODE_STATE_MAP = { + 'Running': NodeState.RUNNING, + 'Starting': NodeState.REBOOTING, + 'Stopped': NodeState.TERMINATED, + 'Stopping': NodeState.TERMINATED, + 'Destroyed': NodeState.TERMINATED + } + + def list_images(self, location=None): + args = { + 'templatefilter': 'executable' + } + if location is not None: + args['zoneid'] = location.id + imgs = self._sync_request('listTemplates', **args) + images = [] + for img in imgs['template']: + images.append(NodeImage(img['id'], img['name'], self, { + 'hypervisor': img['hypervisor'], + 'format': img['format'], + 'os': img['ostypename'], + })) + return images + + def list_locations(self): + locs = self._sync_request('listZones') + locations = [] + for loc in locs['zone']: + locations.append(NodeLocation(loc['id'], loc['name'], 'AU', self)) + return locations + + def list_nodes(self): + vms = self._sync_request('listVirtualMachines') + addrs = self._sync_request('listPublicIpAddresses') + + public_ips = {} + for addr in addrs.get('publicipaddress', []): + if 'virtualmachineid' not in addr: + continue + vm_id = addr['virtualmachineid'] + if vm_id not in public_ips: + public_ips[vm_id] = {} + public_ips[vm_id][addr['ipaddress']] = addr['id'] + + nodes = [] + + for vm in vms.get('virtualmachine', []): + private_ips = [] + + for nic in vm['nic']: + if 'ipaddress' in nic: + private_ips.append(nic['ipaddress']) + + node = CloudStackNode( + id=vm['id'], + name=vm.get('displayname', None), + state=self.NODE_STATE_MAP[vm['state']], + public_ip=public_ips.get(vm['id'], {}).keys(), + private_ip=private_ips, + driver=self, + extra={ + 'zoneid': vm['zoneid'], + } + ) + + addrs = public_ips.get(vm['id'], {}).items() + addrs = [CloudStackAddress(node, v, k) for k, v in addrs] + node.extra['ip_addresses'] = addrs + + rules = [] + for addr in addrs: + result = self._sync_request('listIpForwardingRules') + for r in result.get('ipforwardingrule', []): + rule = CloudStackForwardingRule(node, r['id'], addr, + r['protocol'].upper(), + r['startport'], + r['endport']) + rules.append(rule) + node.extra['ip_forwarding_rules'] = rules + + nodes.append(node) + + return nodes + + def list_sizes(self, location=None): + szs = self._sync_request('listServiceOfferings') + sizes = [] + for sz in szs['serviceoffering']: + sizes.append(NodeSize(sz['id'], sz['name'], sz['memory'], 0, 0, + 0, self)) + return sizes + + def create_node(self, name, size, image, location=None, **kwargs): + extra_args = {} + if location is None: + location = self.list_locations()[0] + + + network_id = kwargs.pop('network_id', None) + if network_id is None: + networks = self._sync_request('listNetworks') + + if networks: + extra_args['networkids'] = networks['network'][0]['id'] + else: + extra_args['networkids'] = network_id + + result = self._async_request('deployVirtualMachine', + name=name, + displayname=name, + serviceofferingid=size.id, + templateid=image.id, + zoneid=location.id, + **extra_args + ) + + node = result['virtualmachine'] + + return Node( + id=node['id'], + name=node['displayname'], + state=self.NODE_STATE_MAP[node['state']], + public_ip=[], + private_ip=[], + driver=self, + extra={ + 'zoneid': location.id, + 'ip_addresses': [], + 'forwarding_rules': [], + } + ) + + def destroy_node(self, node): + self._async_request('destroyVirtualMachine', id=node.id) + return True + + def reboot_node(self, node): + self._async_request('rebootVirtualMachine', id=node.id) + return True + + def ex_allocate_public_ip(self, node): + "Allocate a public IP and bind it to a node." + + zoneid = node.extra['zoneid'] + addr = self._async_request('associateIpAddress', zoneid=zoneid) + addr = addr['ipaddress'] + result = self._sync_request('enableStaticNat', virtualmachineid=node.id, + ipaddressid=addr['id']) + if result.get('success', '').lower() != 'true': + return None + + node.public_ip.append(addr['ipaddress']) + addr = CloudStackAddress(node, addr['id'], addr['ipaddress']) + node.extra['ip_addresses'].append(addr) + return addr + + def ex_release_public_ip(self, node, address): + "Release a public IP." + + node.extra['ip_addresses'].remove(address) + node.public_ip.remove(address.address) + + self._async_request('disableStaticNat', ipaddressid=address.id) + self._async_request('disassociateIpAddress', id=address.id) + return True + + def ex_add_ip_forwarding_rule(self, node, address, protocol, + start_port, end_port=None): + "Add a NAT/firewall forwarding rule." + + protocol = protocol.upper() + if protocol not in ('TCP', 'UDP'): + return None + + args = { + 'ipaddressid': address.id, + 'protocol': protocol, + 'startport': int(start_port) + } + if end_port is not None: + args['endport'] = int(end_port) + + result = self._async_request('createIpForwardingRule', **args) + result = result['ipforwardingrule'] + rule = CloudStackForwardingRule(node, result['id'], address, + protocol, start_port, end_port) + node.extra['ip_forwarding_rules'].append(rule) + return rule + + def ex_delete_ip_forwarding_rule(self, node, rule): + "Remove a NAT/firewall forwading rule." + + node.extra['ip_forwarding_rules'].remove(rule) + self._async_request('deleteIpForwardingRule', id=rule.id) + return True diff --git a/trunk/libcloud/compute/drivers/dreamhost.py b/trunk/libcloud/compute/drivers/dreamhost.py new file mode 100644 index 0000000000..f1a73b4412 --- /dev/null +++ b/trunk/libcloud/compute/drivers/dreamhost.py @@ -0,0 +1,244 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +DreamHost Driver +""" + +import copy + +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.common.types import InvalidCredsError +from libcloud.compute.base import Node, NodeDriver, NodeSize +from libcloud.compute.base import NodeImage +from libcloud.compute.types import Provider, NodeState + +# DreamHost Private Servers can be resized on the fly, but Libcloud doesn't +# currently support extensions to its interface, so we'll put some basic sizes +# in for node creation. + +DH_PS_SIZES = { + 'minimum': { + 'id' : 'minimum', + 'name' : 'Minimum DH PS size', + 'ram' : 300, + 'disk' : None, + 'bandwidth' : None + }, + 'maximum': { + 'id' : 'maximum', + 'name' : 'Maximum DH PS size', + 'ram' : 4000, + 'disk' : None, + 'bandwidth' : None + }, + 'default': { + 'id' : 'default', + 'name' : 'Default DH PS size', + 'ram' : 2300, + 'disk' : None, + 'bandwidth' : None + }, + 'low': { + 'id' : 'low', + 'name' : 'DH PS with 1GB RAM', + 'ram' : 1000, + 'disk' : None, + 'bandwidth' : None + }, + 'high': { + 'id' : 'high', + 'name' : 'DH PS with 3GB RAM', + 'ram' : 3000, + 'disk' : None, + 'bandwidth' : None + }, +} + + +class DreamhostAPIException(Exception): + + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + + +class DreamhostResponse(JsonResponse): + """ + Response class for DreamHost PS + """ + + def parse_body(self): + resp = super(DreamhostResponse, self).parse_body() + if resp['result'] != 'success': + raise Exception(self._api_parse_error(resp)) + return resp['data'] + + def parse_error(self): + raise Exception + + def _api_parse_error(self, response): + if 'data' in response: + if response['data'] == 'invalid_api_key': + raise InvalidCredsError( + "Oops! You've entered an invalid API key") + else: + raise DreamhostAPIException(response['data']) + else: + raise DreamhostAPIException("Unknown problem: %s" % (self.body)) + +class DreamhostConnection(ConnectionKey): + """ + Connection class to connect to DreamHost's API servers + """ + + host = 'api.dreamhost.com' + responseCls = DreamhostResponse + format = 'json' + + def add_default_params(self, params): + """ + Add key and format parameters to the request. Eventually should add + unique_id to prevent re-execution of a single request. + """ + params['key'] = self.key + params['format'] = self.format + #params['unique_id'] = generate_unique_id() + return params + + +class DreamhostNodeDriver(NodeDriver): + """ + Node Driver for DreamHost PS + """ + type = Provider.DREAMHOST + api_name = 'dreamhost' + name = "Dreamhost" + connectionCls = DreamhostConnection + + _sizes = DH_PS_SIZES + + def create_node(self, **kwargs): + """Create a new Dreamhost node + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_movedata: Copy all your existing users to this new PS + @type ex_movedata: C{str} + """ + size = kwargs['size'].ram + params = { + 'cmd' : 'dreamhost_ps-add_ps', + 'movedata' : kwargs.get('movedata', 'no'), + 'type' : kwargs['image'].name, + 'size' : size + } + data = self.connection.request('/', params).object + return Node( + id = data['added_web'], + name = data['added_web'], + state = NodeState.PENDING, + public_ip = [], + private_ip = [], + driver = self.connection.driver, + extra = { + 'type' : kwargs['image'].name + } + ) + + def destroy_node(self, node): + params = { + 'cmd' : 'dreamhost_ps-remove_ps', + 'ps' : node.id + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def reboot_node(self, node): + params = { + 'cmd' : 'dreamhost_ps-reboot', + 'ps' : node.id + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def list_nodes(self, **kwargs): + data = self.connection.request( + '/', {'cmd': 'dreamhost_ps-list_ps'}).object + return [self._to_node(n) for n in data] + + def list_images(self, **kwargs): + data = self.connection.request( + '/', {'cmd': 'dreamhost_ps-list_images'}).object + images = [] + for img in data: + images.append(NodeImage( + id = img['image'], + name = img['image'], + driver = self.connection.driver + )) + return images + + def list_sizes(self, **kwargs): + sizes = [] + for key, values in self._sizes.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(NodeSize(driver=self.connection.driver, **attributes)) + + return sizes + + def list_locations(self, **kwargs): + raise NotImplementedError( + 'You cannot select a location for ' + 'DreamHost Private Servers at this time.') + + ############################################ + # Private Methods (helpers and extensions) # + ############################################ + def _resize_node(self, node, size): + if (size < 300 or size > 4000): + return False + + params = { + 'cmd' : 'dreamhost_ps-set_size', + 'ps' : node.id, + 'size' : size + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def _to_node(self, data): + """ + Convert the data from a DreamhostResponse object into a Node + """ + return Node( + id = data['ps'], + name = data['ps'], + state = NodeState.UNKNOWN, + public_ip = [data['ip']], + private_ip = [], + driver = self.connection.driver, + extra = { + 'current_size' : data['memory_mb'], + 'account_id' : data['account_id'], + 'type' : data['type']}) diff --git a/trunk/libcloud/compute/drivers/dummy.py b/trunk/libcloud/compute/drivers/dummy.py new file mode 100644 index 0000000000..7a91ce0290 --- /dev/null +++ b/trunk/libcloud/compute/drivers/dummy.py @@ -0,0 +1,306 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Dummy Driver + +@note: This driver is out of date +""" +import uuid +import socket +import struct + +from libcloud.common.base import ConnectionKey +from libcloud.compute.base import NodeImage, NodeSize, Node +from libcloud.compute.base import NodeDriver, NodeLocation +from libcloud.compute.types import Provider,NodeState + +class DummyConnection(ConnectionKey): + """ + Dummy connection class + """ + + def connect(self, host=None, port=None): + pass + +class DummyNodeDriver(NodeDriver): + """ + Dummy node driver + + This is a fake driver which appears to always create or destroy + nodes successfully. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node=driver.create_node() + >>> node.public_ip[0] + '127.0.0.3' + >>> node.name + 'dummy-3' + + If the credentials you give convert to an integer then the next + node to be created will be one higher. + + Each time you create a node you will get a different IP address. + + >>> driver = DummyNodeDriver(22) + >>> node=driver.create_node() + >>> node.name + 'dummy-23' + + """ + + name = "Dummy Node Provider" + type = Provider.DUMMY + + def __init__(self, creds): + self.creds = creds + try: + num = int(creds) + except ValueError: + num = None + if num: + self.nl = [] + startip = _ip_to_int('127.0.0.1') + for i in xrange(num): + ip = _int_to_ip(startip + i) + self.nl.append( + Node(id=i, + name='dummy-%d' % (i), + state=NodeState.RUNNING, + public_ip=[ip], + private_ip=[], + driver=self, + extra={'foo': 'bar'}) + ) + else: + self.nl = [ + Node(id=1, + name='dummy-1', + state=NodeState.RUNNING, + public_ip=['127.0.0.1'], + private_ip=[], + driver=self, + extra={'foo': 'bar'}), + Node(id=2, + name='dummy-2', + state=NodeState.RUNNING, + public_ip=['127.0.0.1'], + private_ip=[], + driver=self, + extra={'foo': 'bar'}), + ] + self.connection = DummyConnection(self.creds) + + def get_uuid(self, unique_field=None): + return str(uuid.uuid4()) + + def list_nodes(self): + """ + List the nodes known to a particular driver; + There are two default nodes created at the beginning + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node_list=driver.list_nodes() + >>> sorted([node.name for node in node_list ]) + ['dummy-1', 'dummy-2'] + + each item in the list returned is a node object from which you + can carry out any node actions you wish + + >>> node_list[0].reboot() + True + + As more nodes are added, list_nodes will return them + + >>> node=driver.create_node() + >>> node.size.id + 's1' + >>> node.image.id + 'i2' + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-3'] + """ + return self.nl + + def reboot_node(self, node): + """ + Sets the node state to rebooting; in this dummy driver always + returns True as if the reboot had been successful. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node=driver.create_node() + >>> from libcloud.compute.types import NodeState + >>> node.state == NodeState.RUNNING + True + >>> node.state == NodeState.REBOOTING + False + >>> driver.reboot_node(node) + True + >>> node.state == NodeState.REBOOTING + True + + Please note, dummy nodes never recover from the reboot. + """ + + node.state = NodeState.REBOOTING + return True + + def destroy_node(self, node): + """ + Sets the node state to terminated and removes it from the node list + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> from libcloud.compute.types import NodeState + >>> node = [node for node in driver.list_nodes() if node.name == 'dummy-1'][0] + >>> node.state == NodeState.RUNNING + True + >>> driver.destroy_node(node) + True + >>> node.state == NodeState.RUNNING + False + >>> [node for node in driver.list_nodes() if node.name == 'dummy-1'] + [] + """ + + node.state = NodeState.TERMINATED + self.nl.remove(node) + return True + + def list_images(self, location=None): + """ + Returns a list of images as a cloud provider might have + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([image.name for image in driver.list_images()]) + ['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10'] + """ + return [ + NodeImage(id=1, name="Ubuntu 9.10", driver=self), + NodeImage(id=2, name="Ubuntu 9.04", driver=self), + NodeImage(id=3, name="Slackware 4", driver=self), + ] + + def list_sizes(self, location=None): + """ + Returns a list of node sizes as a cloud provider might have + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([size.ram for size in driver.list_sizes()]) + [128, 512, 4096, 8192] + """ + + return [ + NodeSize(id=1, + name="Small", + ram=128, + disk=4, + bandwidth=500, + price=4, + driver=self), + NodeSize(id=2, + name="Medium", + ram=512, + disk=16, + bandwidth=1500, + price=8, + driver=self), + NodeSize(id=3, + name="Big", + ram=4096, + disk=32, + bandwidth=2500, + price=32, + driver=self), + NodeSize(id=4, + name="XXL Big", + ram=4096*2, + disk=32*4, + bandwidth=2500*3, + price=32*2, + driver=self), + ] + + def list_locations(self): + """ + Returns a list of locations of nodes + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([loc.name + " in " + loc.country for loc in driver.list_locations()]) + ['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"] + """ + return [ + NodeLocation(id=1, + name="Paul's Room", + country='US', + driver=self), + NodeLocation(id=2, + name="London Loft", + country='GB', + driver=self), + NodeLocation(id=3, + name="Island Datacenter", + country='FJ', + driver=self), + ] + + def create_node(self, **kwargs): + """ + Creates a dummy node; the node id is equal to the number of + nodes in the node list + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2'] + >>> nodeA = driver.create_node() + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-3'] + >>> driver.create_node().name + 'dummy-4' + >>> driver.destroy_node(nodeA) + True + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-4'] + """ + l = len(self.nl) + 1 + n = Node(id=l, + name='dummy-%d' % l, + state=NodeState.RUNNING, + public_ip=['127.0.0.%d' % l], + private_ip=[], + driver=self, + size=NodeSize(id='s1', name='foo', ram=2048, + disk=160, bandwidth=None, price=0.0, + driver=self), + image=NodeImage(id='i2', name='image', driver=self), + extra={'foo': 'bar'}) + self.nl.append(n) + return n + +def _ip_to_int(ip): + return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0]) + +def _int_to_ip(ip): + return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip))) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/libcloud/compute/drivers/ec2.py b/trunk/libcloud/compute/drivers/ec2.py new file mode 100644 index 0000000000..1a1879c5c7 --- /dev/null +++ b/trunk/libcloud/compute/drivers/ec2.py @@ -0,0 +1,1165 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Amazon EC2 driver +""" +from __future__ import with_statement + +import base64 +import hmac +import os +import time +import urllib +import copy + +from hashlib import sha256 +from xml.etree import ElementTree as ET + +from libcloud.utils import fixxpath, findtext, findattr, findall +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.aws import AWSBaseResponse +from libcloud.common.types import (InvalidCredsError, MalformedResponseError, + LibcloudError) +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize +from libcloud.compute.base import NodeImage + +EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com' +EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com' +EC2_US_WEST_OREGON_HOST = 'ec2.us-west-2.amazonaws.com' +EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com' +EC2_AP_SOUTHEAST_HOST = 'ec2.ap-southeast-1.amazonaws.com' +EC2_AP_NORTHEAST_HOST = 'ec2.ap-northeast-1.amazonaws.com' + +API_VERSION = '2010-08-31' + +NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION) + +""" +Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them. +From http://aws.amazon.com/ec2/instance-types/ +""" +EC2_INSTANCE_TYPES = { + 't1.micro': { + 'id': 't1.micro', + 'name': 'Micro Instance', + 'ram': 613, + 'disk': 15, + 'bandwidth': None + }, + 'm1.small': { + 'id': 'm1.small', + 'name': 'Small Instance', + 'ram': 1740, + 'disk': 160, + 'bandwidth': None + }, + 'm1.large': { + 'id': 'm1.large', + 'name': 'Large Instance', + 'ram': 7680, + 'disk': 850, + 'bandwidth': None + }, + 'm1.xlarge': { + 'id': 'm1.xlarge', + 'name': 'Extra Large Instance', + 'ram': 15360, + 'disk': 1690, + 'bandwidth': None + }, + 'c1.medium': { + 'id': 'c1.medium', + 'name': 'High-CPU Medium Instance', + 'ram': 1740, + 'disk': 350, + 'bandwidth': None + }, + 'c1.xlarge': { + 'id': 'c1.xlarge', + 'name': 'High-CPU Extra Large Instance', + 'ram': 7680, + 'disk': 1690, + 'bandwidth': None + }, + 'm2.xlarge': { + 'id': 'm2.xlarge', + 'name': 'High-Memory Extra Large Instance', + 'ram': 17510, + 'disk': 420, + 'bandwidth': None + }, + 'm2.2xlarge': { + 'id': 'm2.2xlarge', + 'name': 'High-Memory Double Extra Large Instance', + 'ram': 35021, + 'disk': 850, + 'bandwidth': None + }, + 'm2.4xlarge': { + 'id': 'm2.4xlarge', + 'name': 'High-Memory Quadruple Extra Large Instance', + 'ram': 70042, + 'disk': 1690, + 'bandwidth': None + }, + 'cg1.4xlarge': { + 'id': 'cg1.4xlarge', + 'name': 'Cluster GPU Quadruple Extra Large Instance', + 'ram': 22528, + 'disk': 1690, + 'bandwidth': None + }, + 'cc1.4xlarge': { + 'id': 'cc1.4xlarge', + 'name': 'Cluster Compute Quadruple Extra Large Instance', + 'ram': 23552, + 'disk': 1690, + 'bandwidth': None + }, + } + +CLUSTER_INSTANCES_IDS = ['cg1.4xlarge', 'cc1.4xlarge'] + +EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_AP_NORTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_US_WEST_OREGON_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) + + +class EC2NodeLocation(NodeLocation): + def __init__(self, id, name, country, driver, availability_zone): + super(EC2NodeLocation, self).__init__(id, name, country, driver) + self.availability_zone = availability_zone + + def __repr__(self): + return (('') + % (self.id, self.name, self.country, + self.availability_zone.name, self.driver.name)) + + +class EC2Response(AWSBaseResponse): + """ + EC2 specific response parsing and error handling. + """ + + def parse_error(self): + err_list = [] + # Okay, so for Eucalyptus, you can get a 403, with no body, + # if you are using the wrong user/password. + msg = "Failure: 403 Forbidden" + if self.status == 403 and self.body[:len(msg)] == msg: + raise InvalidCredsError(msg) + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", + body=self.body, driver=EC2NodeDriver) + + for err in body.findall('Errors/Error'): + code, message = err.getchildren() + err_list.append("%s: %s" % (code.text, message.text)) + if code.text == "InvalidClientTokenId": + raise InvalidCredsError(err_list[-1]) + if code.text == "SignatureDoesNotMatch": + raise InvalidCredsError(err_list[-1]) + if code.text == "AuthFailure": + raise InvalidCredsError(err_list[-1]) + if code.text == "OptInRequired": + raise InvalidCredsError(err_list[-1]) + if code.text == "IdempotentParameterMismatch": + raise IdempotentParamError(err_list[-1]) + return "\n".join(err_list) + + +class EC2Connection(ConnectionUserAndKey): + """ + Repersents a single connection to the EC2 Endpoint + """ + + host = EC2_US_EAST_HOST + responseCls = EC2Response + + def add_default_params(self, params): + params['SignatureVersion'] = '2' + params['SignatureMethod'] = 'HmacSHA256' + params['AWSAccessKeyId'] = self.user_id + params['Version'] = API_VERSION + params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) + params['Signature'] = self._get_aws_auth_param(params, self.key, + self.action) + return params + + def _get_aws_auth_param(self, params, secret_key, path='/'): + """ + Creates the signature required for AWS, per + http://bit.ly/aR7GaQ [docs.amazonwebservices.com]: + + StringToSign = HTTPVerb + "\n" + + ValueOfHostHeaderInLowercase + "\n" + + HTTPRequestURI + "\n" + + CanonicalizedQueryString + """ + keys = params.keys() + keys.sort() + pairs = [] + for key in keys: + pairs.append(urllib.quote(key, safe='') + '=' + + urllib.quote(params[key], safe='-_~')) + + qs = '&'.join(pairs) + + hostname = self.host + if (self.secure and self.port != 443) or \ + (not self.secure and self.port != 80): + hostname += ":" + str(self.port) + + string_to_sign = '\n'.join(('GET', hostname, path, qs)) + + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() + ) + return b64_hmac + + +class ExEC2AvailabilityZone(object): + """ + Extension class which stores information about an EC2 availability zone. + + Note: This class is EC2 specific. + """ + + def __init__(self, name, zone_state, region_name): + self.name = name + self.zone_state = zone_state + self.region_name = region_name + + def __repr__(self): + return (('') + % (self.name, self.zone_state, self.region_name)) + + +class EC2NodeDriver(NodeDriver): + """ + Amazon EC2 node driver + """ + + connectionCls = EC2Connection + type = Provider.EC2 + api_name = 'ec2_us_east' + name = 'Amazon EC2 (us-east-1)' + friendly_name = 'Amazon US N. Virginia' + country = 'US' + region_name = 'us-east-1' + path = '/' + + _instance_types = EC2_US_EAST_INSTANCE_TYPES + features = {'create_node': ['ssh_key']} + + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.TERMINATED, + 'terminated': NodeState.TERMINATED + } + + def _pathlist(self, key, arr): + """ + Converts a key and an array of values into AWS query param format. + """ + params = {} + i = 0 + for value in arr: + i += 1 + params["%s.%s" % (key, i)] = value + return params + + def _get_boolean(self, element): + tag = "{%s}%s" % (NAMESPACE, 'return') + return element.findtext(tag) == 'true' + + def _get_terminate_boolean(self, element): + status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name')) + return any([term_status == status + for term_status + in ('shutting-down', 'terminated')]) + + def _to_nodes(self, object, xpath, groups=None): + return [self._to_node(el, groups=groups) + for el in object.findall(fixxpath(xpath=xpath, + namespace=NAMESPACE))] + + def _to_node(self, element, groups=None): + try: + state = self.NODE_STATE_MAP[ + findattr(element=element, xpath="instanceState/name", + namespace=NAMESPACE) + ] + except KeyError: + state = NodeState.UNKNOWN + + instance_id = findtext(element=element, xpath='instanceId', + namespace=NAMESPACE) + tags = dict((findtext(element=item, xpath='key', namespace=NAMESPACE), + findtext(element=item, xpath='value', + namespace=NAMESPACE)) + for item in findall(element=element, xpath='tagSet/item', + namespace=NAMESPACE)) + + name = tags.get('Name', instance_id) + + public_ip = findtext(element=element, xpath='ipAddress', + namespace=NAMESPACE) + public_ips = [public_ip] if public_ip else [] + private_ip = findtext(element=element, xpath='privateIpAddress', + namespace=NAMESPACE) + private_ips = [private_ip] if private_ip else [] + + n = Node( + id=findtext(element=element, xpath='instanceId', + namespace=NAMESPACE), + name=name, + state=state, + public_ip=public_ips, + private_ip=private_ips, + driver=self.connection.driver, + extra={ + 'dns_name': findattr(element=element, xpath="dnsName", + namespace=NAMESPACE), + 'instanceId': findattr(element=element, xpath="instanceId", + namespace=NAMESPACE), + 'imageId': findattr(element=element, xpath="imageId", + namespace=NAMESPACE), + 'private_dns': findattr(element=element, + xpath="privateDnsName", + namespace=NAMESPACE), + 'status': findattr(element=element, xpath="instanceState/name", + namespace=NAMESPACE), + 'keyname': findattr(element=element, xpath="keyName", + namespace=NAMESPACE), + 'launchindex': findattr(element=element, + xpath="amiLaunchIndex", + namespace=NAMESPACE), + 'productcode': + [p.text for p in findall(element=element, + xpath="productCodesSet/item/productCode", + namespace=NAMESPACE + )], + 'instancetype': findattr(element=element, xpath="instanceType", + namespace=NAMESPACE), + 'launchdatetime': findattr(element=element, xpath="launchTime", + namespace=NAMESPACE), + 'availability': findattr(element, + xpath="placement/availabilityZone", + namespace=NAMESPACE), + 'kernelid': findattr(element=element, xpath="kernelId", + namespace=NAMESPACE), + 'ramdiskid': findattr(element=element, xpath="ramdiskId", + namespace=NAMESPACE), + 'clienttoken': findattr(element=element, xpath="clientToken", + namespace=NAMESPACE), + 'groups': groups, + 'tags': tags + } + ) + return n + + def _to_images(self, object): + return [self._to_image(el) + for el in object.findall( + fixxpath(xpath='imagesSet/item', namespace=NAMESPACE) + )] + + def _to_image(self, element): + n = NodeImage(id=findtext(element=element, xpath='imageId', + namespace=NAMESPACE), + name=findtext(element=element, xpath='imageLocation', + namespace=NAMESPACE), + driver=self.connection.driver, + extra={ + 'state': findattr(element=element, + xpath="imageState", + namespace=NAMESPACE), + 'ownerid': findattr(element=element, + xpath="imageOwnerId", + namespace=NAMESPACE), + 'owneralias': findattr(element=element, + xpath="imageOwnerAlias", + namespace=NAMESPACE), + 'ispublic': findattr(element=element, + xpath="isPublic", + namespace=NAMESPACE), + 'architecture': findattr(element=element, + xpath="architecture", + namespace=NAMESPACE), + 'imagetype': findattr(element=element, + xpath="imageType", + namespace=NAMESPACE), + 'platform': findattr(element=element, + xpath="platform", + namespace=NAMESPACE), + 'rootdevicetype': findattr(element=element, + xpath="rootDeviceType", + namespace=NAMESPACE), + 'virtualizationtype': findattr(element=element, + xpath="virtualizationType", + namespace=NAMESPACE), + 'hypervisor': findattr(element=element, + xpath="hypervisor", + namespace=NAMESPACE) + } + ) + return n + + def list_nodes(self): + params = {'Action': 'DescribeInstances'} + elem = self.connection.request(self.path, params=params).object + nodes = [] + for rs in findall(element=elem, xpath='reservationSet/item', + namespace=NAMESPACE): + groups = [g.findtext('') + for g in findall(element=rs, + xpath='groupSet/item/groupId', + namespace=NAMESPACE)] + nodes += self._to_nodes(rs, 'instancesSet/item', groups) + + nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) + for node in nodes: + ips = nodes_elastic_ips_mappings[node.id] + node.public_ip.extend(ips) + return nodes + + def list_sizes(self, location=None): + # Cluster instances are currently only available + # in the US - N. Virginia Region + include_ci = self.region_name == 'us-east-1' + sizes = self._get_sizes(include_cluser_instances=include_ci) + return sizes + + def _get_sizes(self, include_cluser_instances=False): + sizes = [] + for key, values in self._instance_types.iteritems(): + if not include_cluser_instances and\ + key in CLUSTER_INSTANCES_IDS: + continue + attributes = copy.deepcopy(values) + attributes.update({'price': self._get_size_price(size_id=key)}) + sizes.append(NodeSize(driver=self, **attributes)) + return sizes + + def list_images(self, location=None): + params = {'Action': 'DescribeImages'} + images = self._to_images( + self.connection.request(self.path, params=params).object + ) + return images + + def list_locations(self): + locations = [] + for index, availability_zone in \ + enumerate(self.ex_list_availability_zones()): + locations.append(EC2NodeLocation(index, + self.friendly_name, + self.country, + self, + availability_zone)) + return locations + + def ex_create_keypair(self, name): + """Creates a new keypair + + @note: This is a non-standard extension API, and + only works for EC2. + + @type name: C{str} + @param name: The name of the keypair to Create. This must be + unique, otherwise an InvalidKeyPair.Duplicate + exception is raised. + """ + params = { + 'Action': 'CreateKeyPair', + 'KeyName': name, + } + response = self.connection.request(self.path, params=params).object + key_material = findtext(element=response, xpath='keyMaterial', + namespace=NAMESPACE) + key_fingerprint = findtext(element=response, xpath='keyFingerprint', + namespace=NAMESPACE) + return { + 'keyMaterial': key_material, + 'keyFingerprint': key_fingerprint, + } + + def ex_import_keypair(self, name, keyfile): + """imports a new public key + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the public key to import. This must be unique, + otherwise an InvalidKeyPair.Duplicate exception is raised. + + @type keyfile: C{str} + @param keyfile: The filename with path of the public key to import. + + """ + with open(os.path.expanduser(keyfile)) as fh: + content = fh.read() + + base64key = base64.b64encode(content) + + params = {'Action': 'ImportKeyPair', + 'KeyName': name, + 'PublicKeyMaterial': base64key + } + + response = self.connection.request(self.path, params=params).object + key_name = findtext(element=response, xpath='keyName', + namespace=NAMESPACE) + key_fingerprint = findtext(element=response, xpath='keyFingerprint', + namespace=NAMESPACE) + return { + 'keyName': key_name, + 'keyFingerprint': key_fingerprint, + } + + def ex_describe_keypairs(self, name): + """Describes a keypiar by name + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the keypair to describe. + + """ + + params = {'Action': 'DescribeKeyPairs', + 'KeyName.1': name + } + + response = self.connection.request(self.path, params=params).object + key_name = findattr(element=response, xpath='keySet/item/keyName', + namespace=NAMESPACE) + return { + 'keyName': key_name + } + + def ex_create_security_group(self, name, description): + """Creates a new Security Group + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the security group to Create. + This must be unique. + + @type description: C{str} + @param description: Human readable description of a Security Group. + """ + params = {'Action': 'CreateSecurityGroup', + 'GroupName': name, + 'GroupDescription': description} + return self.connection.request(self.path, params=params).object + + def ex_authorize_security_group_permissive(self, name): + """Edit a Security Group to allow all traffic. + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the security group to edit + """ + + results = [] + params = {'Action': 'AuthorizeSecurityGroupIngress', + 'GroupName': name, + 'IpProtocol': 'tcp', + 'FromPort': '0', + 'ToPort': '65535', + 'CidrIp': '0.0.0.0/0'} + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + params['IpProtocol'] = 'udp' + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + + params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'}) + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + return results + + def ex_list_availability_zones(self, only_available=True): + """ + Return a list of L{ExEC2AvailabilityZone} objects for the + current region. + + Note: This is an extension method and is only available for EC2 + driver. + + @keyword only_available: If true, return only availability zones + with state 'available' + @type only_available: C{string} + """ + params = {'Action': 'DescribeAvailabilityZones'} + + if only_available: + params.update({'Filter.0.Name': 'state'}) + params.update({'Filter.0.Value.0': 'available'}) + + params.update({'Filter.1.Name': 'region-name'}) + params.update({'Filter.1.Value.0': self.region_name}) + + result = self.connection.request(self.path, + params=params.copy()).object + + availability_zones = [] + for element in findall(element=result, + xpath='availabilityZoneInfo/item', + namespace=NAMESPACE): + name = findtext(element=element, xpath='zoneName', + namespace=NAMESPACE) + zone_state = findtext(element=element, xpath='zoneState', + namespace=NAMESPACE) + region_name = findtext(element=element, xpath='regionName', + namespace=NAMESPACE) + + availability_zone = ExEC2AvailabilityZone( + name=name, + zone_state=zone_state, + region_name=region_name + ) + availability_zones.append(availability_zone) + + return availability_zones + + def ex_describe_tags(self, node): + """ + Return a dictionary of tags for this instance. + + @type node: C{Node} + @param node: Node instance + + @return dict Node tags + """ + params = {'Action': 'DescribeTags', + 'Filter.0.Name': 'resource-id', + 'Filter.0.Value.0': node.id, + 'Filter.1.Name': 'resource-type', + 'Filter.1.Value.0': 'instance', + } + + result = self.connection.request(self.path, + params=params.copy()).object + + tags = {} + for element in findall(element=result, xpath='tagSet/item', + namespace=NAMESPACE): + key = findtext(element=element, xpath='key', namespace=NAMESPACE) + value = findtext(element=element, + xpath='value', namespace=NAMESPACE) + + tags[key] = value + return tags + + def ex_create_tags(self, node, tags): + """ + Create tags for an instance. + + @type node: C{Node} + @param node: Node instance + @param tags: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + """ + if not tags: + return + + params = {'Action': 'CreateTags', + 'ResourceId.0': node.id} + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + self.connection.request(self.path, + params=params.copy()).object + + def ex_delete_tags(self, node, tags): + """ + Delete tags from an instance. + + @type node: C{Node} + @param node: Node instance + @param tags: A dictionary or other mapping of strings to strings, + specifying the tag names and tag values to be deleted. + """ + if not tags: + return + + params = {'Action': 'DeleteTags', + 'ResourceId.0': node.id} + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + self.connection.request(self.path, + params=params.copy()).object + + def _add_instance_filter(self, params, node): + """ + Add instance filter to the provided params dictionary. + """ + params.update({ + 'Filter.0.Name': 'instance-id', + 'Filter.0.Value.0': node.id + }) + + def ex_describe_addresses(self, nodes): + """ + Return Elastic IP addresses for all the nodes in the provided list. + + @type nodes: C{list} + @param nodes: List of C{Node} instances + + @return dict Dictionary where a key is a node ID and the value is a + list with the Elastic IP addresses associated with + this node. + """ + if not nodes: + return {} + + params = {'Action': 'DescribeAddresses'} + + if len(nodes) == 1: + self._add_instance_filter(params, nodes[0]) + + result = self.connection.request(self.path, + params=params.copy()).object + + node_instance_ids = [node.id for node in nodes] + nodes_elastic_ip_mappings = {} + + for node_id in node_instance_ids: + nodes_elastic_ip_mappings.setdefault(node_id, []) + for element in findall(element=result, xpath='addressesSet/item', + namespace=NAMESPACE): + instance_id = findtext(element=element, xpath='instanceId', + namespace=NAMESPACE) + ip_address = findtext(element=element, xpath='publicIp', + namespace=NAMESPACE) + + if instance_id not in node_instance_ids: + continue + + nodes_elastic_ip_mappings[instance_id].append(ip_address) + return nodes_elastic_ip_mappings + + def ex_describe_addresses_for_node(self, node): + """ + Return a list of Elastic IP addresses associated with this node. + + @type node: C{Node} + @param node: Node instance + + @return list Elastic IP addresses attached to this node. + """ + node_elastic_ips = self.ex_describe_addresses([node]) + return node_elastic_ips[node.id] + + def ex_modify_instance_attribute(self, node, attributes): + """ + Modify node attributes. + A list of valid attributes can be found at http://goo.gl/gxcj8 + + @type node: C{Node} + @param node: Node instance + + @type attributes: C{dict} + @param attributes: Dictionary with node attributes + + @return bool True on success, False otherwise. + """ + attributes = attributes or {} + attributes.update({'InstanceId': node.id}) + + params = {'Action': 'ModifyInstanceAttribute'} + params.update(attributes) + + result = self.connection.request(self.path, + params=params.copy()).object + element = findtext(element=result, xpath='return', + namespace=NAMESPACE) + return element == 'true' + + def ex_change_node_size(self, node, new_size): + """ + Change the node size. + Note: Node must be turned of before changing the size. + + @type node: C{Node} + @param node: Node instance + + @type new_size: C{NodeSize} + @param new_size: NodeSize intance + + @return bool True on success, False otherwise. + """ + if 'instancetype' in node.extra: + current_instance_type = node.extra['instancetype'] + + if current_instance_type == new_size.id: + raise ValueError('New instance size is the same as' + + 'the current one') + + attributes = {'InstanceType.Value': new_size.id} + return self.ex_modify_instance_attribute(node, attributes) + + def create_node(self, **kwargs): + """Create a new EC2 node + + See L{NodeDriver.create_node} for more keyword args. + Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com] + + @keyword ex_mincount: Minimum number of instances to launch + @type ex_mincount: C{int} + + @keyword ex_maxcount: Maximum number of instances to launch + @type ex_maxcount: C{int} + + @keyword ex_securitygroup: Name of security group + @type ex_securitygroup: C{str} + + @keyword ex_keyname: The name of the key pair + @type ex_keyname: C{str} + + @keyword ex_userdata: User data + @type ex_userdata: C{str} + + @keyword ex_clienttoken: Unique identifier to ensure idempotency + @type ex_clienttoken: C{str} + """ + image = kwargs["image"] + size = kwargs["size"] + params = { + 'Action': 'RunInstances', + 'ImageId': image.id, + 'MinCount': kwargs.get('ex_mincount', '1'), + 'MaxCount': kwargs.get('ex_maxcount', '1'), + 'InstanceType': size.id + } + + if 'ex_securitygroup' in kwargs: + if not isinstance(kwargs['ex_securitygroup'], list): + kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']] + for sig in range(len(kwargs['ex_securitygroup'])): + params['SecurityGroup.%d' % (sig + 1,)] = \ + kwargs['ex_securitygroup'][sig] + + if 'location' in kwargs: + availability_zone = getattr(kwargs['location'], + 'availability_zone', None) + if availability_zone: + if availability_zone.region_name != self.region_name: + raise AttributeError('Invalid availability zone: %s' + % (availability_zone.name)) + params['Placement.AvailabilityZone'] = availability_zone.name + + if 'ex_keyname' in kwargs: + params['KeyName'] = kwargs['ex_keyname'] + + if 'ex_userdata' in kwargs: + params['UserData'] = base64.b64encode(kwargs['ex_userdata']) + + if 'ex_clienttoken' in kwargs: + params['ClientToken'] = kwargs['ex_clienttoken'] + + object = self.connection.request(self.path, params=params).object + nodes = self._to_nodes(object, 'instancesSet/item') + + for node in nodes: + tags = {'Name': kwargs['name']} + + try: + self.ex_create_tags(node=node, tags=tags) + except Exception: + continue + + node.name = kwargs['name'] + node.extra.update({'tags': tags}) + + if len(nodes) == 1: + return nodes[0] + else: + return nodes + + def reboot_node(self, node): + """ + Reboot the node by passing in the node object + """ + params = {'Action': 'RebootInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_boolean(res) + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + params = {'Action': 'TerminateInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_terminate_boolean(res) + + +class IdempotentParamError(LibcloudError): + """ + Request used the same client token as a previous, + but non-identical request. + """ + + def __str__(self): + return repr(self.value) + + +class EC2EUConnection(EC2Connection): + """ + Connection class for EC2 in the Western Europe Region + """ + host = EC2_EU_WEST_HOST + + +class EC2EUNodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Western Europe Region + """ + + api_name = 'ec2_eu_west' + name = 'Amazon EC2 (eu-west-1)' + friendly_name = 'Amazon Europe Ireland' + country = 'IE' + region_name = 'eu-west-1' + connectionCls = EC2EUConnection + _instance_types = EC2_EU_WEST_INSTANCE_TYPES + + +class EC2USWestConnection(EC2Connection): + """ + Connection class for EC2 in the Western US Region + """ + + host = EC2_US_WEST_HOST + + +class EC2USWestNodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Western US Region + """ + + api_name = 'ec2_us_west' + name = 'Amazon EC2 (us-west-1)' + friendly_name = 'Amazon US N. California' + country = 'US' + region_name = 'us-west-1' + connectionCls = EC2USWestConnection + _instance_types = EC2_US_WEST_INSTANCE_TYPES + + +class EC2USWestOregonConnection(EC2Connection): + """ + Connection class for EC2 in the Western US Region (Oregon). + """ + + host = EC2_US_WEST_OREGON_HOST + + +class EC2USWestOregonNodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the US West Oregon region. + """ + + api_name = 'ec2_us_west_oregon' + name = 'Amazon EC2 (us-west-2)' + friendly_name = 'Amazon US West - Oregon' + country = 'US' + region_name = 'us-west-2' + connectionCls = EC2USWestOregonConnection + _instance_types = EC2_US_WEST_OREGON_INSTANCE_TYPES + + +class EC2APSEConnection(EC2Connection): + """ + Connection class for EC2 in the Southeast Asia Pacific Region + """ + + host = EC2_AP_SOUTHEAST_HOST + + +class EC2APNEConnection(EC2Connection): + """ + Connection class for EC2 in the Northeast Asia Pacific Region + """ + + host = EC2_AP_NORTHEAST_HOST + + +class EC2APSENodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Southeast Asia Pacific Region + """ + + api_name = 'ec2_ap_southeast' + name = 'Amazon EC2 (ap-southeast-1)' + friendly_name = 'Amazon Asia-Pacific Singapore' + country = 'SG' + region_name = 'ap-southeast-1' + connectionCls = EC2APSEConnection + _instance_types = EC2_AP_SOUTHEAST_INSTANCE_TYPES + + +class EC2APNENodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Northeast Asia Pacific Region + """ + + api_name = 'ec2_ap_northeast' + name = 'Amazon EC2 (ap-northeast-1)' + friendly_name = 'Amazon Asia-Pacific Tokyo' + country = 'JP' + region_name = 'ap-northeast-1' + connectionCls = EC2APNEConnection + _instance_types = EC2_AP_NORTHEAST_INSTANCE_TYPES + + +class EucConnection(EC2Connection): + """ + Connection class for Eucalyptus + """ + + host = None + + +class EucNodeDriver(EC2NodeDriver): + """ + Driver class for Eucalyptus + """ + + name = 'Eucalyptus' + connectionCls = EucConnection + _instance_types = EC2_US_WEST_INSTANCE_TYPES + + def __init__(self, key, secret=None, secure=True, host=None, + path=None, port=None): + super(EucNodeDriver, self).__init__(key, secret, secure, host, port) + if path is None: + path = "/services/Eucalyptus" + self.path = path + + def list_locations(self): + raise NotImplementedError( + 'list_locations not implemented for this driver') + + def _add_instance_filter(self, params, node): + """ + Eucalyptus driver doesn't support filtering on instance id so this is a + no-op. + """ + pass + +# Nimbus clouds have 3 EC2-style instance types but their particular RAM +# allocations are configured by the admin +NIMBUS_INSTANCE_TYPES = { + 'm1.small': { + 'id': 'm1.small', + 'name': 'Small Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, + 'm1.large': { + 'id': 'm1.large', + 'name': 'Large Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, + 'm1.xlarge': { + 'id': 'm1.xlarge', + 'name': 'Extra Large Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, + } + + +class NimbusConnection(EC2Connection): + """ + Connection class for Nimbus + """ + + host = None + + +class NimbusNodeDriver(EC2NodeDriver): + """ + Driver class for Nimbus + """ + + type = Provider.NIMBUS + name = 'Nimbus' + api_name = 'nimbus' + region_name = 'nimbus' + friendly_name = 'Nimbus Private Cloud' + connectionCls = NimbusConnection + _instance_types = NIMBUS_INSTANCE_TYPES + + def ex_describe_addresses(self, nodes): + """ + Nimbus doesn't support elastic IPs, so this is a passthrough + """ + nodes_elastic_ip_mappings = {} + for node in nodes: + # empty list per node + nodes_elastic_ip_mappings[node.id] = [] + return nodes_elastic_ip_mappings + + def ex_create_tags(self, node, tags): + """ + Nimbus doesn't support creating tags, so this is a passthrough + """ + pass diff --git a/trunk/libcloud/compute/drivers/ecp.py b/trunk/libcloud/compute/drivers/ecp.py new file mode 100644 index 0000000000..2305549b11 --- /dev/null +++ b/trunk/libcloud/compute/drivers/ecp.py @@ -0,0 +1,360 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Enomaly ECP driver +""" +import time +import base64 +import httplib +import socket +import os + +# JSON is included in the standard library starting with Python 2.6. For 2.5 +# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation +from libcloud.compute.base import NodeImage, Node +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import is_private_subnet + +#Defaults +API_HOST = '' +API_PORT = (80,443) + +class ECPResponse(Response): + + def success(self): + if self.status == httplib.OK or self.status == httplib.CREATED: + try: + j_body = json.loads(self.body) + except ValueError: + self.error = "JSON response cannot be decoded." + return False + if j_body['errno'] == 0: + return True + else: + self.error = "ECP error: %s" % j_body['message'] + return False + elif self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError() + else: + self.error = "HTTP Error Code: %s" % self.status + return False + + def parse_error(self): + return self.error + + #Interpret the json responses - no error checking required + def parse_body(self): + return json.loads(self.body) + + def getheaders(self): + return self.headers + +class ECPConnection(ConnectionUserAndKey): + """ + Connection class for the Enomaly ECP driver + """ + + responseCls = ECPResponse + host = API_HOST + port = API_PORT + + def add_default_headers(self, headers): + #Authentication + username = self.user_id + password = self.key + base64string = base64.encodestring( + '%s:%s' % (username, password))[:-1] + authheader = "Basic %s" % base64string + headers['Authorization']= authheader + + return headers + + def _encode_multipart_formdata(self, fields): + """ + Based on Wade Leftwich's function: + http://code.activestate.com/recipes/146306/ + """ + #use a random boundary that does not appear in the fields + boundary = '' + while boundary in ''.join(fields): + boundary = os.urandom(16).encode('hex') + L = [] + for i in fields: + L.append('--' + boundary) + L.append('Content-Disposition: form-data; name="%s"' % i) + L.append('') + L.append(fields[i]) + L.append('--' + boundary + '--') + L.append('') + body = '\r\n'.join(L) + content_type = 'multipart/form-data; boundary=%s' % boundary + header = {'Content-Type':content_type} + return header, body + + +class ECPNodeDriver(NodeDriver): + """ + Enomaly ECP node driver + """ + + name = "Enomaly Elastic Computing Platform" + type = Provider.ECP + connectionCls = ECPConnection + + def list_nodes(self): + """ + Returns a list of all running Nodes + """ + + #Make the call + res = self.connection.request('/rest/hosting/vm/list').parse_body() + + #Put together a list of node objects + nodes=[] + for vm in res['vms']: + node = self._to_node(vm) + if not node == None: + nodes.append(node) + + #And return it + return nodes + + + def _to_node(self, vm): + """ + Turns a (json) dictionary into a Node object. + This returns only running VMs. + """ + + #Check state + if not vm['state'] == "running": + return None + + #IPs + iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1'] + + public_ips = [] + private_ips = [] + for ip in iplist: + try: + socket.inet_aton(ip) + except socket.error: + # not a valid ip + continue + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + + #Create the node object + n = Node( + id=vm['uuid'], + name=vm['name'], + state=NodeState.RUNNING, + public_ip=public_ips, + private_ip=private_ips, + driver=self, + ) + + return n + + def reboot_node(self, node): + """ + Shuts down a VM and then starts it again. + """ + + #Turn the VM off + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'stop'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + node.state = NodeState.REBOOTING + #Wait for it to turn off and then continue (to turn it on again) + while node.state == NodeState.REBOOTING: + #Check if it's off. + response = self.connection.request( + '/rest/hosting/vm/%s' % node.id + ).parse_body() + if response['vm']['state'] == 'off': + node.state = NodeState.TERMINATED + else: + time.sleep(5) + + + #Turn the VM back on. + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'start'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + node.state = NodeState.RUNNING + return True + + def destroy_node(self, node): + """ + Shuts down and deletes a VM. + """ + + #Shut down first + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'stop'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method = 'POST', + headers=d[0], + data=d[1] + ).parse_body() + + #Ensure there was no applicationl level error + node.state = NodeState.PENDING + #Wait for the VM to turn off before continuing + while node.state == NodeState.PENDING: + #Check if it's off. + response = self.connection.request( + '/rest/hosting/vm/%s' % node.id + ).parse_body() + if response['vm']['state'] == 'off': + node.state = NodeState.TERMINATED + else: + time.sleep(5) + + #Delete the VM + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'delete'}) + self.connection.request( + '/rest/hosting/vm/%s' % (node.id), + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + return True + + def list_images(self, location=None): + """ + Returns a list of all package templates aka appiances aka images + """ + + #Make the call + response = self.connection.request( + '/rest/hosting/ptemplate/list').parse_body() + + #Turn the response into an array of NodeImage objects + images = [] + for ptemplate in response['packages']: + images.append(NodeImage( + id = ptemplate['uuid'], + name= '%s: %s' % (ptemplate['name'], ptemplate['description']), + driver = self, + )) + + return images + + + def list_sizes(self, location=None): + """ + Returns a list of all hardware templates + """ + + #Make the call + response = self.connection.request( + '/rest/hosting/htemplate/list').parse_body() + + #Turn the response into an array of NodeSize objects + sizes = [] + for htemplate in response['templates']: + sizes.append(NodeSize( + id = htemplate['uuid'], + name = htemplate['name'], + ram = htemplate['memory'], + disk = 0, #Disk is independent of hardware template + bandwidth = 0, #There is no way to keep track of bandwidth + price = 0, #The billing system is external + driver = self, + )) + + return sizes + + def list_locations(self): + """ + This feature does not exist in ECP. Returns hard coded dummy location. + """ + return [ + NodeLocation(id=1, + name="Cloud", + country='', + driver=self), + ] + + def create_node(self, **kwargs): + """ + Creates a virtual machine. + + Parameters: name (string), image (NodeImage), size (NodeSize) + """ + + #Find out what network to put the VM on. + res = self.connection.request('/rest/hosting/network/list').parse_body() + + #Use the first / default network because there is no way to specific + #which one + network = res['networks'][0]['uuid'] + + #Prepare to make the VM + data = { + 'name' : str(kwargs['name']), + 'package' : str(kwargs['image'].id), + 'hardware' : str(kwargs['size'].id), + 'network_uuid' : str(network), + 'disk' : '' + } + + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata(data) + response = self.connection.request( + '/rest/hosting/vm/', + method='PUT', + headers = d[0], + data=d[1] + ).parse_body() + + #Create a node object and return it. + n = Node( + id=response['machine_id'], + name=data['name'], + state=NodeState.PENDING, + public_ip=[], + private_ip=[], + driver=self, + ) + + return n diff --git a/trunk/libcloud/compute/drivers/elastichosts.py b/trunk/libcloud/compute/drivers/elastichosts.py new file mode 100644 index 0000000000..516d26f463 --- /dev/null +++ b/trunk/libcloud/compute/drivers/elastichosts.py @@ -0,0 +1,156 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +ElasticHosts Driver +""" + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver +from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection + + +# API end-points +API_ENDPOINTS = { + 'uk-1': { + 'name': 'London Peer 1', + 'country': 'United Kingdom', + 'host': 'api.lon-p.elastichosts.com' + }, + 'uk-2': { + 'name': 'London BlueSquare', + 'country': 'United Kingdom', + 'host': 'api.lon-b.elastichosts.com' + }, + 'us-1': { + 'name': 'San Antonio Peer 1', + 'country': 'United States', + 'host': 'api.sat-p.elastichosts.com' + }, +} + +# Default API end-point for the base connection class. +DEFAULT_ENDPOINT = 'us-1' + +# Retrieved from http://www.elastichosts.com/cloud-hosting/api +STANDARD_DRIVES = { + '38df0986-4d85-4b76-b502-3878ffc80161': { + 'uuid': '38df0986-4d85-4b76-b502-3878ffc80161', + 'description': 'CentOS Linux 5.5', + 'size_gunzipped': '3GB', + 'supports_deployment': True, + }, + '980cf63c-f21e-4382-997b-6541d5809629': { + 'uuid': '980cf63c-f21e-4382-997b-6541d5809629', + 'description': 'Debian Linux 5.0', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'aee5589a-88c3-43ef-bb0a-9cab6e64192d': { + 'uuid': 'aee5589a-88c3-43ef-bb0a-9cab6e64192d', + 'description': 'Ubuntu Linux 10.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': { + 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0', + 'description': 'Windows Web Server 2008', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '30824e97-05a4-410c-946e-2ba5a92b07cb': { + 'uuid': '30824e97-05a4-410c-946e-2ba5a92b07cb', + 'description': 'Windows Web Server 2008 R2', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '9ecf810e-6ad1-40ef-b360-d606f0444671': { + 'uuid': '9ecf810e-6ad1-40ef-b360-d606f0444671', + 'description': 'Windows Web Server 2008 R2 + SQL Server', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '10a88d1c-6575-46e3-8d2c-7744065ea530': { + 'uuid': '10a88d1c-6575-46e3-8d2c-7744065ea530', + 'description': 'Windows Server 2008 Standard R2', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47': { + 'uuid': '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47', + 'description': 'Windows Server 2008 Standard R2 + SQL Server', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, +} + + +class ElasticHostsBaseConnection(ElasticStackBaseConnection): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + + +class ElasticHostsBaseNodeDriver(ElasticStackBaseNodeDriver): + type = Provider.ELASTICHOSTS + api_name = 'elastichosts' + name = 'ElasticHosts' + connectionCls = ElasticHostsBaseConnection + features = {"create_node": ["generates_password"]} + _standard_drives = STANDARD_DRIVES + + +class ElasticHostsUK1Connection(ElasticStackBaseConnection): + """ + Connection class for the ElasticHosts driver for + the London Peer 1 end-point + """ + + host = API_ENDPOINTS['uk-1']['host'] + + +class ElasticHostsUK1NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the London Peer 1 end-point + """ + connectionCls = ElasticHostsUK1Connection + + +class ElasticHostsUK2Connection(ElasticStackBaseConnection): + """ + Connection class for the ElasticHosts driver for + the London Bluesquare end-point + """ + host = API_ENDPOINTS['uk-2']['host'] + + +class ElasticHostsUK2NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the London Bluesquare end-point + """ + connectionCls = ElasticHostsUK2Connection + + +class ElasticHostsUS1Connection(ElasticStackBaseConnection): + """ + Connection class for the ElasticHosts driver for + the San Antonio Peer 1 end-point + """ + host = API_ENDPOINTS['us-1']['host'] + + +class ElasticHostsUS1NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the San Antonio Peer 1 end-point + """ + connectionCls = ElasticHostsUS1Connection diff --git a/trunk/libcloud/compute/drivers/elasticstack.py b/trunk/libcloud/compute/drivers/elasticstack.py new file mode 100644 index 0000000000..f48a16bdaf --- /dev/null +++ b/trunk/libcloud/compute/drivers/elasticstack.py @@ -0,0 +1,452 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Base driver for the providers based on the ElasticStack platform - +http://www.elasticstack.com. +""" + +import re +import time +import base64 +import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node +from libcloud.compute.base import NodeImage +from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment +from libcloud.compute.deployment import MultiStepDeployment + + +NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, +} + +# Default timeout (in seconds) for the drive imaging process +IMAGING_TIMEOUT = 10 * 60 + +# ElasticStack doesn't specify special instance types, so I just specified +# some plans based on the other provider offerings. +# +# Basically for CPU any value between 500Mhz and 20000Mhz should work, +# 256MB to 8192MB for ram and 1GB to 2TB for disk. +INSTANCE_TYPES = { + 'small': { + 'id': 'small', + 'name': 'Small instance', + 'cpu': 2000, + 'memory': 1700, + 'disk': 160, + 'bandwidth': None, + }, + 'medium': { + 'id': 'medium', + 'name': 'Medium instance', + 'cpu': 3000, + 'memory': 4096, + 'disk': 500, + 'bandwidth': None, + }, + 'large': { + 'id': 'large', + 'name': 'Large instance', + 'cpu': 4000, + 'memory': 7680, + 'disk': 850, + 'bandwidth': None, + }, + 'extra-large': { + 'id': 'extra-large', + 'name': 'Extra Large instance', + 'cpu': 8000, + 'memory': 8192, + 'disk': 1690, + 'bandwidth': None, + }, + 'high-cpu-medium': { + 'id': 'high-cpu-medium', + 'name': 'High-CPU Medium instance', + 'cpu': 5000, + 'memory': 1700, + 'disk': 350, + 'bandwidth': None, + }, + 'high-cpu-extra-large': { + 'id': 'high-cpu-extra-large', + 'name': 'High-CPU Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 1690, + 'bandwidth': None, + }, +} + + +class ElasticStackException(Exception): + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + + +class ElasticStackResponse(JsonResponse): + def success(self): + if self.status == 401: + raise InvalidCredsError() + + return self.status >= 200 and self.status <= 299 + + def parse_error(self): + error_header = self.headers.get('x-elastic-error', '') + return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip()) + + +class ElasticStackNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, + self.disk, self.bandwidth, self.price, self.driver.name)) + + +class ElasticStackBaseConnection(ConnectionUserAndKey): + """ + Base connection class for the ElasticStack driver + """ + + host = None + responseCls = ElasticStackResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + headers['Authorization'] = ('Basic %s' + % (base64.b64encode('%s:%s' + % (self.user_id, + self.key)))) + return headers + + +class ElasticStackBaseNodeDriver(NodeDriver): + connectionCls = ElasticStackBaseConnection + features = {"create_node": ["generates_password"]} + + def reboot_node(self, node): + # Reboots the node + response = self.connection.request( + action='/servers/%s/reset' % (node.id), + method='POST' + ) + return response.status == 204 + + def destroy_node(self, node): + # Kills the server immediately + response = self.connection.request( + action='/servers/%s/destroy' % (node.id), + method='POST' + ) + return response.status == 204 + + def list_images(self, location=None): + # Returns a list of available pre-installed system drive images + images = [] + for key, value in self._standard_drives.iteritems(): + image = NodeImage( + id=value['uuid'], + name=value['description'], + driver=self.connection.driver, + extra={ + 'size_gunzipped': value['size_gunzipped'] + } + ) + images.append(image) + + return images + + def list_sizes(self, location=None): + sizes = [] + for key, value in INSTANCE_TYPES.iteritems(): + size = ElasticStackNodeSize( + id=value['id'], + name=value['name'], cpu=value['cpu'], ram=value['memory'], + disk=value['disk'], bandwidth=value['bandwidth'], + price=self._get_size_price(size_id=value['id']), + driver=self.connection.driver + ) + sizes.append(size) + + return sizes + + def list_nodes(self): + # Returns a list of active (running) nodes + response = self.connection.request(action='/servers/info').object + + nodes = [] + for data in response: + node = self._to_node(data) + nodes.append(node) + + return nodes + + def create_node(self, **kwargs): + """Creates a ElasticStack instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: String with a name for this new node (required) + @type name: C{string} + + @keyword smp: Number of virtual processors or None to calculate + based on the cpu speed + @type smp: C{int} + + @keyword nic_model: e1000, rtl8139 or virtio + (if not specified, e1000 is used) + @type nic_model: C{string} + + @keyword vnc_password: If set, the same password is also used for + SSH access with user toor, + otherwise VNC access is disabled and + no SSH login is possible. + @type vnc_password: C{string} + """ + size = kwargs['size'] + image = kwargs['image'] + smp = kwargs.get('smp', 'auto') + nic_model = kwargs.get('nic_model', 'e1000') + vnc_password = ssh_password = kwargs.get('vnc_password', None) + + if nic_model not in ('e1000', 'rtl8139', 'virtio'): + raise ElasticStackException('Invalid NIC model specified') + + # check that drive size is not smaller then pre installed image size + + # First we create a drive with the specified size + drive_data = {} + drive_data.update({'name': kwargs['name'], + 'size': '%sG' % (kwargs['size'].disk)}) + + response = self.connection.request(action='/drives/create', + data=json.dumps(drive_data), + method='POST').object + + if not response: + raise ElasticStackException('Drive creation failed') + + drive_uuid = response['drive'] + + # Then we image the selected pre-installed system drive onto it + response = self.connection.request( + action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id), + method='POST' + ) + + if response.status != 204: + raise ElasticStackException('Drive imaging failed') + + # We wait until the drive is imaged and then boot up the node + # (in most cases, the imaging process shouldn't take longer + # than a few minutes) + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + + imaging_start = time.time() + while 'imaging' in response: + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + + elapsed_time = time.time() - imaging_start + if ('imaging' in response + and elapsed_time >= IMAGING_TIMEOUT): + raise ElasticStackException('Drive imaging timed out') + + time.sleep(1) + + node_data = {} + node_data.update({'name': kwargs['name'], + 'cpu': size.cpu, + 'mem': size.ram, + 'ide:0:0': drive_uuid, + 'boot': 'ide:0:0', + 'smp': smp}) + node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) + + if vnc_password: + node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) + + response = self.connection.request( + action='/servers/create', data=json.dumps(node_data), + method='POST' + ).object + + if isinstance(response, list): + nodes = [self._to_node(node, ssh_password) for node in response] + else: + nodes = self._to_node(response, ssh_password) + + return nodes + + # Extension methods + def ex_set_node_configuration(self, node, **kwargs): + # Changes the configuration of the running server + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', + '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', + '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', + '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + + invalid_keys = [] + for key in kwargs.keys(): + matches = False + for regex in valid_keys: + if re.match(regex, key): + matches = True + break + if not matches: + invalid_keys.append(key) + + if invalid_keys: + raise ElasticStackException( + 'Invalid configuration key specified: %s' + % (',' .join(invalid_keys)) + ) + + response = self.connection.request( + action='/servers/%s/set' % (node.id), data=json.dumps(kwargs), + method='POST' + ) + + return (response.status == httplib.OK and response.body != '') + + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + @keyword enable_root: If true, root password will be set to + vnc_password (this will enable SSH access) + and default 'toor' account will be deleted. + @type enable_root: C{bool} + + For detailed description and keywords args, see + L{NodeDriver.deploy_node}. + """ + image = kwargs['image'] + vnc_password = kwargs.get('vnc_password', None) + enable_root = kwargs.get('enable_root', False) + + if not vnc_password: + raise ValueError('You need to provide vnc_password argument ' + 'if you want to use deployment') + + if (image in self._standard_drives + and not self._standard_drives[image]['supports_deployment']): + raise ValueError('Image %s does not support deployment' + % (image.id)) + + if enable_root: + script = ("unset HISTFILE;" + "echo root:%s | chpasswd;" + "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" + "history -c") % vnc_password + root_enable_script = ScriptDeployment(script=script, + delete=True) + deploy = kwargs.get('deploy', None) + if deploy: + if (isinstance(deploy, ScriptDeployment) + or isinstance(deploy, SSHKeyDeployment)): + deployment = MultiStepDeployment([deploy, + root_enable_script]) + elif isinstance(deploy, MultiStepDeployment): + deployment = deploy + deployment.add(root_enable_script) + else: + deployment = root_enable_script + + kwargs['deploy'] = deployment + + if not kwargs.get('ssh_username', None): + kwargs['ssh_username'] = 'toor' + + return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs) + + def ex_shutdown_node(self, node): + # Sends the ACPI power-down event + response = self.connection.request( + action='/servers/%s/shutdown' % (node.id), + method='POST' + ) + return response.status == 204 + + def ex_destroy_drive(self, drive_uuid): + # Deletes a drive + response = self.connection.request( + action='/drives/%s/destroy' % (drive_uuid), + method='POST' + ) + return response.status == 204 + + # Helper methods + def _to_node(self, data, ssh_password=None): + try: + state = NODE_STATE_MAP[data['status']] + except KeyError: + state = NodeState.UNKNOWN + + if isinstance(data['nic:0:dhcp'], list): + public_ip = data['nic:0:dhcp'] + else: + public_ip = [data['nic:0:dhcp']] + + extra = {'cpu': data['cpu'], + 'smp': data['smp'], + 'mem': data['mem'], + 'started': data['started']} + + if 'vnc:ip' in data and 'vnc:password' in data: + extra.update({'vnc_ip': data['vnc:ip'], + 'vnc_password': data['vnc:password']}) + + if ssh_password: + extra.update({'password': ssh_password}) + + node = Node(id=data['server'], name=data['name'], state=state, + public_ip=public_ip, private_ip=None, + driver=self.connection.driver, + extra=extra) + + return node diff --git a/trunk/libcloud/compute/drivers/gandi.py b/trunk/libcloud/compute/drivers/gandi.py new file mode 100644 index 0000000000..8542f188e7 --- /dev/null +++ b/trunk/libcloud/compute/drivers/gandi.py @@ -0,0 +1,398 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gandi driver for compute +""" +from datetime import datetime + +from libcloud.common.gandi import BaseGandiDriver, GandiException, \ + NetworkInterface, IPAddress, Disk +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + + +NODE_STATE_MAP = { + 'running': NodeState.RUNNING, + 'halted': NodeState.TERMINATED, + 'paused': NodeState.TERMINATED, + 'locked': NodeState.TERMINATED, + 'being_created': NodeState.PENDING, + 'invalid': NodeState.UNKNOWN, + 'legally_locked': NodeState.PENDING, + 'deleted': NodeState.TERMINATED +} + +NODE_PRICE_HOURLY_USD = 0.02 + + +class GandiNodeDriver(BaseGandiDriver, NodeDriver): + """ + Gandi node driver + + """ + api_name = 'gandi' + friendly_name = 'Gandi.net' + country = 'FR' + type = Provider.GANDI + # TODO : which features to enable ? + features = {} + + def _node_info(self, id): + try: + obj = self.connection.request('vm.info', int(id)) + return obj + except Exception, e: + raise GandiException(1003, e) + return None + + # Generic methods for driver + def _to_node(self, vm): + return Node( + id=vm['id'], + name=vm['hostname'], + state=NODE_STATE_MAP.get( + vm['state'], + NodeState.UNKNOWN + ), + public_ip=vm.get('ip'), + private_ip='', + driver=self, + extra={ + 'ai_active': vm.get('ai_active'), + 'datacenter_id': vm.get('datacenter_id'), + 'description': vm.get('description') + } + ) + + def _to_nodes(self, vms): + return [self._to_node(v) for v in vms] + + def list_nodes(self): + vms = self.connection.request('vm.list') + ips = self.connection.request('ip.list') + for vm in vms: + for ip in ips: + if vm['ifaces_id'][0] == ip['iface_id']: + vm['ip'] = ip.get('ip') + + nodes = self._to_nodes(vms) + return nodes + + def reboot_node(self, node): + op = self.connection.request('vm.reboot', int(node.id)) + op_res = self._wait_operation(op['id']) + vm = self.connection.request('vm.info', int(node.id)) + if vm['state'] == 'running': + return True + return False + + def destroy_node(self, node): + vm = self._node_info(node.id) + if vm['state'] == 'running': + # Send vm_stop and wait for accomplish + op_stop = self.connection.request('vm.stop', int(node.id)) + if not self._wait_operation(op_stop['id']): + raise GandiException(1010, 'vm.stop failed') + # Delete + op = self.connection.request('vm.delete', int(node.id)) + if self._wait_operation(op['id']): + return True + return False + + def deploy_node(self, **kwargs): + raise NotImplementedError( + 'deploy_node not implemented for gandi driver') + + def create_node(self, **kwargs): + """Create a new Gandi node + + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword location: Which data center to create a node in. If empty, + undefined behavoir will be selected. (optional) + @type location: L{NodeLocation} + + @keyword size: The size of resources allocated to this node. + (required) + @type size: L{NodeSize} + + @keyword login: user name to create for login on machine (required) + @type login: String + + @keyword password: password for user that'll be created (required) + @type password: String + + @keywork inet_family: version of ip to use, default 4 (optional) + @type inet_family: int + """ + + if kwargs.get('login') is None or kwargs.get('password') is None: + raise GandiException(1020, + 'login and password must be defined for node creation') + + location = kwargs.get('location') + if location and isinstance(location, NodeLocation): + dc_id = int(location.id) + else: + raise GandiException(1021, + 'location must be a subclass of NodeLocation') + + size = kwargs.get('size') + if not size and not isinstance(size, NodeSize): + raise GandiException(1022, + 'size must be a subclass of NodeSize') + + src_disk_id = int(kwargs['image'].id) + + disk_spec = { + 'datacenter_id': dc_id, + 'name': 'disk_%s' % kwargs['name'] + } + + vm_spec = { + 'datacenter_id': dc_id, + 'hostname': kwargs['name'], + 'login': kwargs['login'], + 'password': kwargs['password'], # TODO : use NodeAuthPassword + 'memory': int(size.ram), + 'cores': int(size.id), + 'bandwidth': int(size.bandwidth), + 'ip_version': kwargs.get('inet_family', 4), + } + + # Call create_from helper api. Return 3 operations : disk_create, + # iface_create,vm_create + (op_disk, op_iface, op_vm) = self.connection.request( + 'vm.create_from', + vm_spec, disk_spec, src_disk_id + ) + + # We wait for vm_create to finish + if self._wait_operation(op_vm['id']): + # after successful operation, get ip information + # thru first interface + node = self._node_info(op_vm['vm_id']) + ifaces = node.get('ifaces') + if len(ifaces) > 0: + ips = ifaces[0].get('ips') + if len(ips) > 0: + node['ip'] = ips[0]['ip'] + return self._to_node(node) + + return None + + def _to_image(self, img): + return NodeImage( + id=img['disk_id'], + name=img['label'], + driver=self.connection.driver + ) + + def list_images(self, location=None): + try: + if location: + filtering = {'datacenter_id': int(location.id)} + else: + filtering = {} + images = self.connection.request('image.list', filtering) + return [self._to_image(i) for i in images] + except Exception, e: + raise GandiException(1011, e) + + def _to_size(self, id, size): + return NodeSize( + id=id, + name='%s cores' % id, + ram=size['memory'], + disk=size['disk'], + bandwidth=size['bandwidth'], + price=(self._get_size_price(size_id='1') * id), + driver=self.connection.driver, + ) + + def list_sizes(self, location=None): + account = self.connection.request('account.info') + # Look for available shares, and return a list of share_definition + available_res = account['resources']['available'] + + if available_res['shares'] == 0: + return None + else: + share_def = account['share_definition'] + available_cores = available_res['cores'] + # 0.75 core given when creating a server + max_core = int(available_cores + 0.75) + shares = [] + if available_res['servers'] < 1: + # No server quota, no way + return shares + for i in range(1, max_core + 1): + share = {id: i} + share_is_available = True + for k in ['memory', 'disk', 'bandwidth']: + if share_def[k] * i > available_res[k]: + # We run out for at least one resource inside + share_is_available = False + else: + share[k] = share_def[k] * i + if share_is_available: + nb_core = i + shares.append(self._to_size(nb_core, share)) + return shares + + def _to_loc(self, loc): + return NodeLocation( + id=loc['id'], + name=loc['name'], + country=loc['country'], + driver=self + ) + + def list_locations(self): + res = self.connection.request("datacenter.list") + return [self._to_loc(l) for l in res] + + def _to_iface(self, iface): + ips = [] + for ip in iface.get('ips', []): + new_ip = IPAddress( + ip['id'], + NODE_STATE_MAP.get( + ip['state'], + NodeState.UNKNOWN + ), + ip['ip'], + self.connection.driver, + version=ip.get('version'), + extra={'reverse': ip['reverse']} + ) + ips.append(new_ip) + return NetworkInterface( + iface['id'], + NODE_STATE_MAP.get( + iface['state'], + NodeState.UNKNOWN + ), + mac_address=None, + driver=self.connection.driver, + ips=ips, + node_id=iface.get('vm_id'), + extra={'bandwidth': iface['bandwidth']}, + ) + + def _to_ifaces(self, ifaces): + return [self._to_iface(i) for i in ifaces] + + def ex_list_interfaces(self): + """Specific method to list network interfaces""" + ifaces = self.connection.request('iface.list') + ips = self.connection.request('ip.list') + for iface in ifaces: + iface['ips'] = filter(lambda i: i['iface_id'] == iface['id'], ips) + return self._to_ifaces(ifaces) + + def _to_disk(self, element): + disk = Disk( + id=element['id'], + state=NODE_STATE_MAP.get( + element['state'], + NodeState.UNKNOWN + ), + name=element['name'], + driver=self.connection.driver, + size=element['size'], + extra={'can_snapshot': element['can_snapshot']} + ) + return disk + + def _to_disks(self, elements): + return [self._to_disk(el) for el in elements] + + def ex_list_disks(self): + """Specific method to list all disk""" + res = self.connection.request('disk.list', {}) + disks = [] + return self._to_disks(res) + + def ex_node_attach_disk(self, node, disk): + """Specific method to attach a disk to a node""" + op = self.connection.request('vm.disk_attach', + int(node.id), int(disk.id)) + if self._wait_operation(op['id']): + return True + return False + + def ex_node_detach_disk(self, node, disk): + """Specific method to detach a disk from a node""" + op = self.connection.request('vm.disk_detach', + int(node.id), int(disk.id)) + if self._wait_operation(op['id']): + return True + return False + + def ex_node_attach_interface(self, node, iface): + """Specific method to attach an interface to a node""" + op = self.connection.request('vm.iface_attach', + int(node.id), int(iface.id)) + if self._wait_operation(op['id']): + return True + return False + + def ex_node_detach_interface(self, node, iface): + """Specific method to detach an interface from a node""" + op = self.connection.request('vm.iface_detach', + int(node.id), int(iface.id)) + if self._wait_operation(op['id']): + return True + return False + + def ex_snapshot_disk(self, disk, name=None): + """Specific method to make a snapshot of a disk""" + if not disk.extra.get('can_snapshot'): + raise GandiException(1021, "Disk %s can't snapshot" % disk.id) + if not name: + suffix = datetime.today().strftime("%Y%m%d") + name = "snap_%s" % (suffix) + op = self.connection.request('disk.create_from', + { + 'name': name, + 'type': 'snapshot', + }, + int(disk.id), + ) + if self._wait_operation(op['id']): + return True + return False + + def ex_update_disk(self, disk, new_size=None, new_name=None): + """Specific method to update size or name of a disk + WARNING: if a server is attached it'll be rebooted + """ + params = {} + if new_size: + params.update({'size': new_size}) + if new_name: + params.update({'name': new_name}) + op = self.connection.request('disk.update', + int(disk.id), + params) + if self._wait_operation(op['id']): + return True + return False diff --git a/trunk/libcloud/compute/drivers/gogrid.py b/trunk/libcloud/compute/drivers/gogrid.py new file mode 100644 index 0000000000..a1f8a5bd18 --- /dev/null +++ b/trunk/libcloud/compute/drivers/gogrid.py @@ -0,0 +1,401 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +GoGrid driver +""" +import time +import hashlib +import copy + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +STATE = { + "Starting": NodeState.PENDING, + "On": NodeState.RUNNING, + "On/Saving": NodeState.RUNNING, + "Off": NodeState.PENDING, + "Restarting": NodeState.REBOOTING, + "Saving": NodeState.PENDING, + "Restoring": NodeState.PENDING, +} + +GOGRID_INSTANCE_TYPES = { + '512MB': {'id': '512MB', + 'name': '512MB', + 'ram': 512, + 'disk': 30, + 'bandwidth': None}, + '1GB': {'id': '1GB', + 'name': '1GB', + 'ram': 1024, + 'disk': 60, + 'bandwidth': None}, + '2GB': {'id': '2GB', + 'name': '2GB', + 'ram': 2048, + 'disk': 120, + 'bandwidth': None}, + '4GB': {'id': '4GB', + 'name': '4GB', + 'ram': 4096, + 'disk': 240, + 'bandwidth': None}, + '8GB': {'id': '8GB', + 'name': '8GB', + 'ram': 8192, + 'disk': 480, + 'bandwidth': None}, + '16GB': {'id': '16GB', + 'name': '16GB', + 'ram': 16384, + 'disk': 960, + 'bandwidth': None}, + '24GB': {'id': '24GB', + 'name': '24GB', + 'ram': 24576, + 'disk': 960, + 'bandwidth': None}, +} + + +class GoGridNode(Node): + # Generating uuid based on public ip to get around missing id on + # create_node in gogrid api + # + # Used public ip since it is not mutable and specified at create time, + # so uuid of node should not change after add is completed + def get_uuid(self): + return hashlib.sha1( + "%s:%d" % (self.public_ip,self.driver.type) + ).hexdigest() + +class GoGridNodeDriver(BaseGoGridDriver, NodeDriver): + """ + GoGrid node driver + """ + + connectionCls = GoGridConnection + type = Provider.GOGRID + api_name = 'gogrid' + name = 'GoGrid' + features = {"create_node": ["generates_password"]} + + _instance_types = GOGRID_INSTANCE_TYPES + + def _get_state(self, element): + try: + return STATE[element['state']['name']] + except: + pass + return NodeState.UNKNOWN + + def _get_ip(self, element): + return element.get('ip').get('ip') + + def _get_id(self, element): + return element.get('id') + + def _to_node(self, element, password=None): + state = self._get_state(element) + ip = self._get_ip(element) + id = self._get_id(element) + n = GoGridNode(id=id, + name=element['name'], + state=state, + public_ip=[ip], + private_ip=[], + extra={'ram': element.get('ram').get('name'), + 'description': element.get('description', '')}, + driver=self.connection.driver) + if password: + n.extra['password'] = password + + return n + + def _to_image(self, element): + n = NodeImage(id=element['id'], + name=element['friendlyName'], + driver=self.connection.driver) + return n + + def _to_images(self, object): + return [ self._to_image(el) + for el in object['list'] ] + + def _to_location(self, element): + location = NodeLocation(id=element['id'], + name=element['name'], + country="US", + driver=self.connection.driver) + return location + + def _to_locations(self, object): + return [self._to_location(el) + for el in object['list']] + + def list_images(self, location=None): + params = {} + if location is not None: + params["datacenter"] = location.id + images = self._to_images( + self.connection.request('/api/grid/image/list', params).object) + return images + + def list_nodes(self): + passwords_map = {} + + res = self._server_list() + try: + for password in self._password_list()['list']: + try: + passwords_map[password['server']['id']] = password['password'] + except KeyError: + pass + except InvalidCredsError: + # some gogrid API keys don't have permission to access the password list. + pass + + return [ self._to_node(el, passwords_map.get(el.get('id'))) + for el + in res['list'] ] + + def reboot_node(self, node): + id = node.id + power = 'restart' + res = self._server_power(id, power) + if not res.success(): + raise Exception(res.parse_error()) + return True + + def destroy_node(self, node): + id = node.id + res = self._server_delete(id) + if not res.success(): + raise Exception(res.parse_error()) + return True + + def _server_list(self): + return self.connection.request('/api/grid/server/list').object + + def _password_list(self): + return self.connection.request('/api/support/password/list').object + + def _server_power(self, id, power): + # power in ['start', 'stop', 'restart'] + params = {'id': id, 'power': power} + return self.connection.request("/api/grid/server/power", params, + method='POST') + + def _server_delete(self, id): + params = {'id': id} + return self.connection.request("/api/grid/server/delete", params, + method='POST') + + def _get_first_ip(self, location=None): + ips = self.ex_list_ips(public=True, assigned=False, location=location) + try: + return ips[0].ip + except IndexError: + raise LibcloudError('No public unassigned IPs left', + GoGridNodeDriver) + + def list_sizes(self, location=None): + sizes = [] + for key, values in self._instance_types.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(NodeSize(driver=self.connection.driver, **attributes)) + + return sizes + + def list_locations(self): + locations = self._to_locations( + self.connection.request('/api/common/lookup/list', + params={'lookup': 'ip.datacenter'}).object) + return locations + + def ex_create_node_nowait(self, **kwargs): + """Don't block until GoGrid allocates id for a node + but return right away with id == None. + + The existance of this method is explained by the fact + that GoGrid assigns id to a node only few minutes after + creation.""" + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + try: + ip = kwargs['ex_ip'] + except KeyError: + ip = self._get_first_ip(kwargs.get('location')) + + params = {'name': name, + 'image': image.id, + 'description': kwargs.get('ex_description', ''), + 'server.ram': size.id, + 'ip': ip} + + object = self.connection.request('/api/grid/server/add', + params=params, method='POST').object + node = self._to_node(object['list'][0]) + + return node + + def create_node(self, **kwargs): + """Create a new GoGird node + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_description: Description of a Node + @type ex_description: C{string} + @keyword ex_ip: Public IP address to use for a Node. If not + specified, first available IP address will be picked + @type ex_ip: C{string} + """ + node = self.ex_create_node_nowait(**kwargs) + + timeout = 60 * 20 + waittime = 0 + interval = 2 * 60 + + while node.id is None and waittime < timeout: + nodes = self.list_nodes() + + for i in nodes: + if i.public_ip[0] == node.public_ip[0] and i.id is not None: + return i + + waittime += interval + time.sleep(interval) + + if id is None: + raise Exception("Wasn't able to wait for id allocation for the node %s" % str(node)) + + return node + + def ex_save_image(self, node, name): + """Create an image for node. + + Please refer to GoGrid documentation to get info + how prepare a node for image creation: + + http://wiki.gogrid.com/wiki/index.php/MyGSI + + @keyword node: node to use as a base for image + @type node: L{Node} + @keyword name: name for new image + @type name: C{string} + """ + params = {'server': node.id, + 'friendlyName': name} + object = self.connection.request('/api/grid/image/save', params=params, + method='POST').object + + return self._to_images(object)[0] + + def ex_edit_node(self, **kwargs): + """Change attributes of a node. + + @keyword node: node to be edited + @type node: L{Node} + @keyword size: new size of a node + @type size: L{NodeSize} + @keyword ex_description: new description of a node + @type ex_description: C{string} + """ + node = kwargs['node'] + size = kwargs['size'] + + params = {'id': node.id, + 'server.ram': size.id} + + if 'ex_description' in kwargs: + params['description'] = kwargs['ex_description'] + + object = self.connection.request('/api/grid/server/edit', + params=params).object + + return self._to_node(object['list'][0]) + + def ex_edit_image(self, **kwargs): + """Edit metadata of a server image. + + @keyword image: image to be edited + @type image: L{NodeImage} + @keyword public: should be the image public? + @type public: C{bool} + @keyword ex_description: description of the image (optional) + @type ex_description: C{string} + @keyword name: name of the image + @type name C{string} + + """ + + image = kwargs['image'] + public = kwargs['public'] + + params = {'id': image.id, + 'isPublic': str(public).lower()} + + if 'ex_description' in kwargs: + params['description'] = kwargs['ex_description'] + + if 'name' in kwargs: + params['friendlyName'] = kwargs['name'] + + object = self.connection.request('/api/grid/image/edit', + params=params).object + + return self._to_image(object['list'][0]) + + def ex_list_ips(self, **kwargs): + """Return list of IP addresses assigned to + the account. + + @keyword public: set to True to list only + public IPs or False to list only + private IPs. Set to None or not specify + at all not to filter by type + @type public: C{bool} + @keyword assigned: set to True to list only addresses + assigned to servers, False to list unassigned + addresses and set to None or don't set at all + not no filter by state + @type assigned: C{bool} + @keyword location: filter IP addresses by location + @type location: L{NodeLocation} + @return: C{list} of L{GoGridIpAddress}es + """ + + params = {} + + if "public" in kwargs and kwargs["public"] is not None: + params["ip.type"] = {True: "Public", + False: "Private"}[kwargs["public"]] + if "assigned" in kwargs and kwargs["assigned"] is not None: + params["ip.state"] = {True: "Assigned", + False: "Unassigned"}[kwargs["assigned"]] + if "location" in kwargs and kwargs['location'] is not None: + params['datacenter'] = kwargs['location'].id + + ips = self._to_ips( + self.connection.request('/api/grid/ip/list', + params=params).object) + return ips diff --git a/trunk/libcloud/compute/drivers/ibm_sbc.py b/trunk/libcloud/compute/drivers/ibm_sbc.py new file mode 100644 index 0000000000..df9a9078f2 --- /dev/null +++ b/trunk/libcloud/compute/drivers/ibm_sbc.py @@ -0,0 +1,184 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Driver for the IBM Developer Cloud. +""" +import base64, urllib + +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, Node, NodeImage, NodeSize, NodeLocation, NodeAuthSSHKey + +HOST = 'www-147.ibm.com' +REST_BASE = '/computecloud/enterprise/api/rest/20100331' + +class IBMResponse(XmlResponse): + def success(self): + return int(self.status) == 200 + + def parse_error(self): + if int(self.status) == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + return self.body + +class IBMConnection(ConnectionUserAndKey): + """ + Connection class for the IBM Developer Cloud driver + """ + + host = HOST + responseCls = IBMResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'text/xml' + headers['Authorization'] = ('Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key)))) + if not 'Content-Type' in headers: + headers['Content-Type'] = 'text/xml' + return headers + + def encode_data(self, data): + return urllib.urlencode(data) + +class IBMNodeDriver(NodeDriver): + """ + IBM Developer Cloud node driver. + """ + connectionCls = IBMConnection + type = Provider.IBM + name = "IBM Developer Cloud" + + NODE_STATE_MAP = { 0: NodeState.PENDING, # New + 1: NodeState.PENDING, # Provisioning + 2: NodeState.TERMINATED, # Failed + 3: NodeState.TERMINATED, # Removed + 4: NodeState.TERMINATED, # Rejected + 5: NodeState.RUNNING, # Active + 6: NodeState.UNKNOWN, # Unknown + 7: NodeState.PENDING, # Deprovisioning + 8: NodeState.REBOOTING, # Restarting + 9: NodeState.PENDING, # Starting + 10: NodeState.PENDING, # Stopping + 11: NodeState.TERMINATED,# Stopped + 12: NodeState.PENDING, # Deprovision Pending + 13: NodeState.PENDING, # Restart Pending + 14: NodeState.PENDING, # Attaching + 15: NodeState.PENDING } # Detaching + + def create_node(self, **kwargs): + """ + Creates a node in the IBM Developer Cloud. + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_configurationData: Image-specific configuration parameters. + Configuration parameters are defined in + the parameters.xml file. The URL to + this file is defined in the NodeImage + at extra[parametersURL]. + @type ex_configurationData: C{dict} + """ + + # Compose headers for message body + data = {} + data.update({'name': kwargs['name']}) + data.update({'imageID': kwargs['image'].id}) + data.update({'instanceType': kwargs['size'].id}) + if 'location' in kwargs: + data.update({'location': kwargs['location'].id}) + else: + data.update({'location': '1'}) + if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey): + data.update({'publicKey': kwargs['auth'].pubkey}) + if 'ex_configurationData' in kwargs: + configurationData = kwargs['ex_configurationData'] + for key in configurationData.keys(): + data.update({key: configurationData.get(key)}) + + # Send request! + resp = self.connection.request(action = REST_BASE + '/instances', + headers = {'Content-Type': 'application/x-www-form-urlencoded'}, + method = 'POST', + data = data).object + return self._to_nodes(resp)[0] + + def destroy_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + status = int(self.connection.request(action = url, method='DELETE').status) + return status == 200 + + def reboot_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'state': 'restart'} + + resp = self.connection.request(action = url, + method = 'PUT', + headers = headers, + data = data) + return int(resp.status) == 200 + + def list_nodes(self): + return self._to_nodes(self.connection.request(REST_BASE + '/instances').object) + + def list_images(self, location = None): + return self._to_images(self.connection.request(REST_BASE + '/offerings/image').object) + + def list_sizes(self, location = None): + return [ NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None, None, self.connection.driver), + NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None, None, None, self.connection.driver), + NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None, None, self.connection.driver), + NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None, None, self.connection.driver), + NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None, None, self.connection.driver), + NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None, None, None, self.connection.driver), + NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None, None, self.connection.driver), + NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None, None, None, self.connection.driver), + NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit', None, None, None, None, self.connection.driver) ] + + def list_locations(self): + return self._to_locations(self.connection.request(REST_BASE + '/locations').object) + + def _to_nodes(self, object): + return [ self._to_node(instance) for instance in object.findall('Instance') ] + + def _to_node(self, instance): + return Node(id = instance.findtext('ID'), + name = instance.findtext('Name'), + state = self.NODE_STATE_MAP[int(instance.findtext('Status'))], + public_ip = instance.findtext('IP'), + private_ip = None, + driver = self.connection.driver) + + def _to_images(self, object): + return [ self._to_image(image) for image in object.findall('Image') ] + + def _to_image(self, image): + return NodeImage(id = image.findtext('ID'), + name = image.findtext('Name'), + driver = self.connection.driver, + extra = {'parametersURL': image.findtext('Manifest')}) + + def _to_locations(self, object): + return [ self._to_location(location) for location in object.findall('Location') ] + + def _to_location(self, location): + # NOTE: country currently hardcoded + return NodeLocation(id = location.findtext('ID'), + name = location.findtext('Name'), + country = 'US', + driver = self.connection.driver) diff --git a/trunk/libcloud/compute/drivers/linode.py b/trunk/libcloud/compute/drivers/linode.py new file mode 100644 index 0000000000..35016637e9 --- /dev/null +++ b/trunk/libcloud/compute/drivers/linode.py @@ -0,0 +1,486 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""libcloud driver for the Linode(R) API + +This driver implements all libcloud functionality for the Linode API. Since the +API is a bit more fine-grained, create_node abstracts a significant amount of +work (and may take a while to run). + +Linode home page http://www.linode.com/ +Linode API documentation http://www.linode.com/api/ +Alternate bindings for reference http://github.com/tjfontaine/linode-python + +Linode(R) is a registered trademark of Linode, LLC. + +""" + +try: + import simplejson as json +except ImportError: + import json + +import itertools +import os + +from copy import copy + +from libcloud.common.linode import (API_ROOT, LinodeException, LinodeConnection, + LINODE_PLAN_IDS) +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation +from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey +from libcloud.compute.base import NodeImage + +class LinodeNodeDriver(NodeDriver): + """libcloud driver for the Linode API + + Rough mapping of which is which: + + list_nodes linode.list + reboot_node linode.reboot + destroy_node linode.delete + create_node linode.create, linode.update, + linode.disk.createfromdistribution, + linode.disk.create, linode.config.create, + linode.ip.addprivate, linode.boot + list_sizes avail.linodeplans + list_images avail.distributions + list_locations avail.datacenters + + For more information on the Linode API, be sure to read the reference: + + http://www.linode.com/api/ + """ + type = Provider.LINODE + name = "Linode" + connectionCls = LinodeConnection + _linode_plan_ids = LINODE_PLAN_IDS + + def __init__(self, key): + """Instantiate the driver with the given API key + + @keyword key: the API key to use + @type key: C{str}""" + self.datacenter = None + NodeDriver.__init__(self, key) + + # Converts Linode's state from DB to a NodeState constant. + LINODE_STATES = { + -2: NodeState.UNKNOWN, # Boot Failed + -1: NodeState.PENDING, # Being Created + 0: NodeState.PENDING, # Brand New + 1: NodeState.RUNNING, # Running + 2: NodeState.TERMINATED, # Powered Off + 3: NodeState.REBOOTING, # Shutting Down + 4: NodeState.UNKNOWN # Reserved + } + + def list_nodes(self): + """List all Linodes that the API key can access + + This call will return all Linodes that the API key in use has access to. + If a node is in this list, rebooting will work; however, creation and + destruction are a separate grant. + + @return: C{list} of L{Node} objects that the API key can access""" + params = { "api_action": "linode.list" } + data = self.connection.request(API_ROOT, params=params).objects[0] + return self._to_nodes(data) + + def reboot_node(self, node): + """Reboot the given Linode + + Will issue a shutdown job followed by a boot job, using the last booted + configuration. In most cases, this will be the only configuration. + + @keyword node: the Linode to reboot + @type node: L{Node}""" + params = { "api_action": "linode.reboot", "LinodeID": node.id } + self.connection.request(API_ROOT, params=params) + return True + + def destroy_node(self, node): + """Destroy the given Linode + + Will remove the Linode from the account and issue a prorated credit. A + grant for removing Linodes from the account is required, otherwise this + method will fail. + + In most cases, all disk images must be removed from a Linode before the + Linode can be removed; however, this call explicitly skips those + safeguards. There is no going back from this method. + + @keyword node: the Linode to destroy + @type node: L{Node}""" + params = { "api_action": "linode.delete", "LinodeID": node.id, + "skipChecks": True } + self.connection.request(API_ROOT, params=params) + return True + + def create_node(self, **kwargs): + """Create a new Linode, deploy a Linux distribution, and boot + + This call abstracts much of the functionality of provisioning a Linode + and getting it booted. A global grant to add Linodes to the account is + required, as this call will result in a billing charge. + + Note that there is a safety valve of 5 Linodes per hour, in order to + prevent a runaway script from ruining your day. + + @keyword name: the name to assign the Linode (mandatory) + @type name: C{str} + + @keyword image: which distribution to deploy on the Linode (mandatory) + @type image: L{NodeImage} + + @keyword size: the plan size to create (mandatory) + @type size: L{NodeSize} + + @keyword auth: an SSH key or root password (mandatory) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @keyword location: which datacenter to create the Linode in + @type location: L{NodeLocation} + + @keyword ex_swap: size of the swap partition in MB (128) + @type ex_swap: C{int} + + @keyword ex_rsize: size of the root partition in MB (plan size - swap). + @type ex_rsize: C{int} + + @keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable). + @type ex_kernel: C{str} + + @keyword ex_payment: one of 1, 12, or 24; subscription length (1) + @type ex_payment: C{int} + + @keyword ex_comment: a small comment for the configuration (libcloud) + @type ex_comment: C{str} + + @keyword ex_private: whether or not to request a private IP (False) + @type ex_private: C{bool} + + @keyword lconfig: what to call the configuration (generated) + @type lconfig: C{str} + + @keyword lroot: what to call the root image (generated) + @type lroot: C{str} + + @keyword lswap: what to call the swap space (generated) + @type lswap: C{str} + + @return: a L{Node} representing the newly-created Linode + """ + name = kwargs["name"] + image = kwargs["image"] + size = kwargs["size"] + auth = kwargs["auth"] + + # Pick a location (resolves LIBCLOUD-41 in JIRA) + if "location" in kwargs: + chosen = kwargs["location"].id + elif self.datacenter: + chosen = self.datacenter + else: + raise LinodeException(0xFB, "Need to select a datacenter first") + + # Step 0: Parameter validation before we purchase + # We're especially careful here so we don't fail after purchase, rather + # than getting halfway through the process and having the API fail. + + # Plan ID + plans = self.list_sizes() + if size.id not in [p.id for p in plans]: + raise LinodeException(0xFB, "Invalid plan ID -- avail.plans") + + # Payment schedule + payment = "1" if "ex_payment" not in kwargs else str(kwargs["ex_payment"]) + if payment not in ["1", "12", "24"]: + raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)") + + ssh = None + root = None + # SSH key and/or root password + if isinstance(auth, NodeAuthSSHKey): + ssh = auth.pubkey + elif isinstance(auth, NodeAuthPassword): + root = auth.password + + if not ssh and not root: + raise LinodeException(0xFB, "Need SSH key or root password") + if not root is None and len(root) < 6: + raise LinodeException(0xFB, "Root password is too short") + + # Swap size + try: swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"]) + except: raise LinodeException(0xFB, "Need an integer swap size") + + # Root partition size + imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else \ + int(kwargs["ex_rsize"]) + if (imagesize + swap) > size.disk: + raise LinodeException(0xFB, "Total disk images are too big") + + # Distribution ID + distros = self.list_images() + if image.id not in [d.id for d in distros]: + raise LinodeException(0xFB, + "Invalid distro -- avail.distributions") + + # Kernel + if "ex_kernel" in kwargs: + kernel = kwargs["ex_kernel"] + else: + if image.extra['64bit']: + kernel = 111 if image.extra['pvops'] else 107 + else: + kernel = 110 if image.extra['pvops'] else 60 + params = { "api_action": "avail.kernels" } + kernels = self.connection.request(API_ROOT, params=params).objects[0] + if kernel not in [z["KERNELID"] for z in kernels]: + raise LinodeException(0xFB, "Invalid kernel -- avail.kernels") + + # Comments + comments = "Created by Apache libcloud " if \ + "ex_comment" not in kwargs else kwargs["ex_comment"] + + # Labels + label = { + "lconfig": "[%s] Configuration Profile" % name, + "lroot": "[%s] %s Disk Image" % (name, image.name), + "lswap": "[%s] Swap Space" % name + } + for what in ["lconfig", "lroot", "lswap"]: + if what in kwargs: + label[what] = kwargs[what] + + # Step 1: linode.create + params = { + "api_action": "linode.create", + "DatacenterID": chosen, + "PlanID": size.id, + "PaymentTerm": payment + } + data = self.connection.request(API_ROOT, params=params).objects[0] + linode = { "id": data["LinodeID"] } + + # Step 1b. linode.update to rename the Linode + params = { + "api_action": "linode.update", + "LinodeID": linode["id"], + "Label": name + } + self.connection.request(API_ROOT, params=params) + + # Step 1c. linode.ip.addprivate if it was requested + if "ex_private" in kwargs and kwargs["ex_private"]: + params = { + "api_action": "linode.ip.addprivate", + "LinodeID": linode["id"] + } + self.connection.request(API_ROOT, params=params) + + # Step 2: linode.disk.createfromdistribution + if not root: + root = os.urandom(8).encode('hex') + params = { + "api_action": "linode.disk.createfromdistribution", + "LinodeID": linode["id"], + "DistributionID": image.id, + "Label": label["lroot"], + "Size": imagesize, + "rootPass": root, + } + if ssh: params["rootSSHKey"] = ssh + data = self.connection.request(API_ROOT, params=params).objects[0] + linode["rootimage"] = data["DiskID"] + + # Step 3: linode.disk.create for swap + params = { + "api_action": "linode.disk.create", + "LinodeID": linode["id"], + "Label": label["lswap"], + "Type": "swap", + "Size": swap + } + data = self.connection.request(API_ROOT, params=params).objects[0] + linode["swapimage"] = data["DiskID"] + + # Step 4: linode.config.create for main profile + disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"]) + params = { + "api_action": "linode.config.create", + "LinodeID": linode["id"], + "KernelID": kernel, + "Label": label["lconfig"], + "Comments": comments, + "DiskList": disks + } + data = self.connection.request(API_ROOT, params=params).objects[0] + linode["config"] = data["ConfigID"] + + # Step 5: linode.boot + params = { + "api_action": "linode.boot", + "LinodeID": linode["id"], + "ConfigID": linode["config"] + } + self.connection.request(API_ROOT, params=params) + + # Make a node out of it and hand it back + params = { "api_action": "linode.list", "LinodeID": linode["id"] } + data = self.connection.request(API_ROOT, params=params).objects[0] + return self._to_nodes(data) + + def list_sizes(self, location=None): + """List available Linode plans + + Gets the sizes that can be used for creating a Linode. Since available + Linode plans vary per-location, this method can also be passed a + location to filter the availability. + + @keyword location: the facility to retrieve plans in + @type location: NodeLocation + + @return: a C{list} of L{NodeSize}s""" + params = { "api_action": "avail.linodeplans" } + data = self.connection.request(API_ROOT, params=params).objects[0] + sizes = [] + for obj in data: + n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"], + disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"], + price=obj["PRICE"], driver=self.connection.driver) + sizes.append(n) + return sizes + + def list_images(self): + """List available Linux distributions + + Retrieve all Linux distributions that can be deployed to a Linode. + + @return: a C{list} of L{NodeImage}s""" + params = { "api_action": "avail.distributions" } + data = self.connection.request(API_ROOT, params=params).objects[0] + distros = [] + for obj in data: + i = NodeImage(id=obj["DISTRIBUTIONID"], + name=obj["LABEL"], + driver=self.connection.driver, + extra={'pvops': obj['REQUIRESPVOPSKERNEL'], + '64bit': obj['IS64BIT']}) + distros.append(i) + return distros + + def list_locations(self): + """List available facilities for deployment + + Retrieve all facilities that a Linode can be deployed in. + + @return: a C{list} of L{NodeLocation}s""" + params = { "api_action": "avail.datacenters" } + data = self.connection.request(API_ROOT, params=params).objects[0] + nl = [] + for dc in data: + country = None + if "USA" in dc["LOCATION"]: country = "US" + elif "UK" in dc["LOCATION"]: country = "GB" + elif "JP" in dc["LOCATION"]: country = "JP" + else: country = "??" + nl.append(NodeLocation(dc["DATACENTERID"], + dc["LOCATION"], + country, + self)) + return nl + + def linode_set_datacenter(self, dc): + """Set the default datacenter for Linode creation + + Since Linodes must be created in a facility, this function sets the + default that L{create_node} will use. If a C{location} keyword is not + passed to L{create_node}, this method must have already been used. + + @keyword dc: the datacenter to create Linodes in unless specified + @type dc: L{NodeLocation}""" + did = dc.id + params = { "api_action": "avail.datacenters" } + data = self.connection.request(API_ROOT, params=params).objects[0] + for datacenter in data: + if did == dc["DATACENTERID"]: + self.datacenter = did + return + + dcs = ", ".join([d["DATACENTERID"] for d in data]) + self.datacenter = None + raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs) + + def _to_nodes(self, objs): + """Convert returned JSON Linodes into Node instances + + @keyword objs: C{list} of JSON dictionaries representing the Linodes + @type objs: C{list} + @return: C{list} of L{Node}s""" + + # Get the IP addresses for the Linodes + nodes = {} + batch = [] + for o in objs: + lid = o["LINODEID"] + nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ip=[], + private_ip=[], state=self.LINODE_STATES[o["STATUS"]], + driver=self.connection.driver) + n.extra = copy(o) + n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM")) + batch.append({"api_action": "linode.ip.list", "LinodeID": lid}) + + # Avoid batch limitation + ip_answers = [] + args = [iter(batch)] * 25 + izip_longest = getattr(itertools, 'izip_longest', _izip_longest) + for twenty_five in izip_longest(*args): + twenty_five = [q for q in twenty_five if q] + params = { "api_action": "batch", + "api_requestArray": json.dumps(twenty_five) } + req = self.connection.request(API_ROOT, params=params) + if not req.success() or len(req.objects) == 0: + return None + ip_answers.extend(req.objects) + + # Add the returned IPs to the nodes and return them + for ip_list in ip_answers: + for ip in ip_list: + lid = ip["LINODEID"] + which = nodes[lid].public_ip if ip["ISPUBLIC"] == 1 else \ + nodes[lid].private_ip + which.append(ip["IPADDRESS"]) + return nodes.values() + + features = {"create_node": ["ssh_key", "password"]} + +def _izip_longest(*args, **kwds): + """Taken from Python docs + + http://docs.python.org/library/itertools.html#itertools.izip + """ + fillvalue = kwds.get('fillvalue') + def sentinel(counter = ([fillvalue]*(len(args)-1)).pop): + yield counter() # yields the fillvalue, or raises IndexError + fillers = itertools.repeat(fillvalue) + iters = [itertools.chain(it, sentinel(), fillers) for it in args] + try: + for tup in itertools.izip(*iters): + yield tup + except IndexError: + pass diff --git a/trunk/libcloud/compute/drivers/ninefold.py b/trunk/libcloud/compute/drivers/ninefold.py new file mode 100644 index 0000000000..945c72e7b8 --- /dev/null +++ b/trunk/libcloud/compute/drivers/ninefold.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.providers import Provider + +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver + +class NinefoldNodeDriver(CloudStackNodeDriver): + "Driver for Ninefold's Compute platform." + + host = 'api.ninefold.com' + path = '/compute/v1.0/' + + type = Provider.NINEFOLD + name = 'Ninefold' diff --git a/trunk/libcloud/compute/drivers/opennebula.py b/trunk/libcloud/compute/drivers/opennebula.py new file mode 100644 index 0000000000..d3b3c38225 --- /dev/null +++ b/trunk/libcloud/compute/drivers/opennebula.py @@ -0,0 +1,429 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +OpenNebula Driver +""" + +try: + import simplejson as json +except ImportError: + import json + +from xml.etree import ElementTree as ET +from base64 import b64encode +import hashlib +import httplib + +from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.compute.base import NodeImage, NodeSize +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider + +API_HOST = '' +API_PORT = (4567, 443) +API_SECURE = True +DEFAULT_API_VERSION = '3.0' + + +class OpenNebulaResponse(XmlResponse): + """ + Response class for the OpenNebula driver. + """ + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def parse_error(self): + if int(self.status) == httplib.UNAUTHORIZED: + raise InvalidCredsError(self.body) + return self.body + + +class OpenNebulaConnection(ConnectionUserAndKey): + """ + Connection class for the OpenNebula driver. + """ + + host = API_HOST + port = API_PORT + secure = API_SECURE + responseCls = OpenNebulaResponse + + def add_default_headers(self, headers): + pass_sha1 = hashlib.sha1(self.key).hexdigest() + headers['Authorization'] = ('Basic %s' % b64encode('%s:%s' % + (self.user_id, pass_sha1))) + return headers + + +class OpenNebulaNodeSize(NodeSize): + + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + cpu=None, vcpu=None): + self.cpu = cpu + self.vcpu = vcpu + super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram, + disk=disk, + bandwidth=bandwidth, + price=price, driver=driver) + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name, self.cpu)) + + +class OpenNebulaNetwork(object): + """ + A virtual network. + + NodeNetwork objects are analogous to physical switches connecting 2 + or more physical nodes together. + + Apart from name and id, there is no further standard information; + other parameters are stored in a driver specific "extra" variable + """ + + def __init__(self, id, name, driver, extra=None): + self.id = str(id) + self.name = name + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.driver.name)) + + +class OpenNebulaNodeDriver(NodeDriver): + """ + OpenNebula node driver. + """ + + connectionCls = OpenNebulaConnection + name = 'OpenNebula' + type = Provider.OPENNEBULA + + NODE_STATE_MAP = { + 'PENDING': NodeState.PENDING, + 'HOLD': NodeState.PENDING, + 'PROLOG': NodeState.PENDING, + 'RUNNING': NodeState.RUNNING, + 'MIGRATE': NodeState.PENDING, + 'EPILOG': NodeState.TERMINATED, + 'STOPPED': NodeState.TERMINATED, + 'SUSPENDED': NodeState.PENDING, + 'FAILED': NodeState.TERMINATED, + 'UNKNOWN': NodeState.UNKNOWN, + 'DONE': NodeState.TERMINATED + } + + def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION, + **kwargs): + if cls is OpenNebulaNodeDriver: + if api_version == '1.4': + cls = OpenNebula_1_4_NodeDriver + elif api_version == '3.0': + cls = OpenNebula_3_0_NodeDriver + else: + raise NotImplementedError( + "No OpenNebulaNodeDriver found for API version %s" % + (api_version)) + return super(OpenNebulaNodeDriver, cls).__new__(cls) + + def list_sizes(self, location=None): + return [ + OpenNebulaNodeSize(id=1, + name='small', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=2, + name='medium', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=3, + name='large', + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + + def list_nodes(self): + return self._to_nodes(self.connection.request('/compute').object) + + def list_images(self, location=None): + return self._to_images(self.connection.request('/storage').object) + + def list_locations(self): + return [NodeLocation(0, 'OpenNebula', 'ONE', self)] + + def reboot_node(self, node): + compute_id = str(node.id) + + url = '/compute/%s' % compute_id + resp1 = self.connection.request(url, method='PUT', + data=self._xml_action(compute_id, + 'STOPPED')) + + if resp1.status == 400: + return False + + resp2 = self.connection.request(url, method='PUT', + data=self._xml_action(compute_id, + 'RESUME')) + + if resp2.status == 400: + return False + + return True + + def destroy_node(self, node): + url = '/compute/%s' % (str(node.id)) + resp = self.connection.request(url, method='DELETE') + + return resp.status == 204 + + def create_node(self, **kwargs): + """Create a new OpenNebula node + + See L{NodeDriver.create_node} for more keyword args. + """ + compute = ET.Element('COMPUTE') + + name = ET.SubElement(compute, 'NAME') + name.text = kwargs['name'] + + xml = ET.tostring(compute) + node = self.connection.request('/compute', method='POST', + data=xml).object + + return self._to_node(node) + + def ex_list_networks(self, location=None): + """ + List virtual networks on a provider + @return: C{list} of L{OpenNebulaNetwork} objects + """ + return self._to_networks(self.connection.request('/network').object) + + def _to_images(self, object): + images = [] + for element in object.findall('DISK'): + image_id = element.attrib['href'].partition('/storage/')[2] + image = self.connection.request(('/storage/%s' % ( + image_id))).object + images.append(self._to_image(image)) + + return images + + def _to_image(self, image): + return NodeImage(id=image.findtext('ID'), + name=image.findtext('NAME'), + driver=self.connection.driver, + extra={'size': image.findtext('SIZE'), + 'url': image.findtext('URL')}) + + def _to_networks(self, object): + networks = [] + for element in object.findall('NETWORK'): + network_id = element.attrib['href'].partition('/network/')[2] + network_element = self.connection.request(('/network/%s' % ( + network_id))).object + networks.append(self._to_network(network_element)) + + return networks + + def _to_network(self, element): + return OpenNebulaNetwork(id=element.findtext('ID'), + name=element.findtext('NAME'), + driver=self.connection.driver, + extra={'address': element.findtext('ADDRESS'), + 'size': element.findtext('SIZE')}) + + def _to_nodes(self, object): + computes = [] + for element in object.findall('COMPUTE'): + compute_id = element.attrib['href'].partition('/compute/')[2] + compute = self.connection.request(('/compute/%s' % ( + compute_id))).object + computes.append(self._to_node(compute)) + + return computes + + def _extract_networks(self, compute): + networks = [] + + network_list = compute.find('NETWORK') + for element in network_list.findall('NIC'): + networks.append( + OpenNebulaNetwork(id=element.attrib.get('network', None), + name=None, + driver=self.connection.driver, + extra={'ip': element.attrib.get('ip', None)})) + + return networks + + def _to_node(self, compute): + try: + state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()] + except KeyError: + state = NodeState.UNKNOWN + + networks = self._extract_networks(compute) + + return Node(id=compute.findtext('ID'), + name=compute.findtext('NAME'), + state=state, + public_ip=networks, + private_ip=[], + driver=self.connection.driver) + + def _xml_action(self, compute_id, action): + compute = ET.Element('COMPUTE') + + compute_id = ET.SubElement(compute, 'ID') + compute_id.text = str(compute_id) + + state = ET.SubElement(compute, 'STATE') + state.text = action + + xml = ET.tostring(compute) + return xml + + +class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver): + pass + + +class OpenNebula_3_0_NodeDriver(OpenNebulaNodeDriver): + def create_node(self, **kwargs): + """Create a new OpenNebula node + + See L{NodeDriver.create_node} for more keyword args. + """ + compute = ET.Element('COMPUTE') + + name = ET.SubElement(compute, 'NAME') + name.text = kwargs['name'] + + instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') + instance_type.text = kwargs['size'].name + + disk = ET.SubElement(compute, 'DISK') + storage = ET.SubElement(disk, 'STORAGE', {'href': '/storage/%s' % + (str(kwargs['image'].id))}) + + xml = ET.tostring(compute) + node = self.connection.request('/compute', method='POST', + data=xml).object + + return self._to_node(node) + + def list_sizes(self, location=None): + return [ + OpenNebulaNodeSize(id=1, + name='small', + ram=1024, + cpu=1, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=2, + name='medium', + ram=4096, + cpu=4, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=3, + name='large', + ram=8192, + cpu=8, + disk=None, + bandwidth=None, + price=None, + driver=self), + OpenNebulaNodeSize(id=4, + name='custom', + ram=0, + cpu=0, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + + def ex_list_networks(self, location=None): + """ + List virtual networks on a provider + @return: C{list} of L{OpenNebulaNetwork} objects + """ + return self._to_networks(self.connection.request('/network').object) + + def _to_images(self, object): + images = [] + for element in object.findall('STORAGE'): + image_id = element.attrib["href"].partition("/storage/")[2] + image = self.connection.request(("/storage/%s" % + (image_id))).object + images.append(self._to_image(image)) + + return images + + def _to_image(self, image): + return NodeImage(id=image.findtext('ID'), + name=image.findtext('NAME'), + driver=self.connection.driver, + extra={'description': image.findtext('DESCRIPTION'), + 'TYPE': image.findtext('TYPE'), + 'size': image.findtext('SIZE'), + 'fstype': image.findtext('FSTYPE', None)}) + + def _extract_networks(self, compute): + networks = [] + + for element in compute.findall('NIC'): + network = element.find('NETWORK') + network_id = network.attrib['href'].partition('/network/')[2] + + ips = [] + for ip in element.findall('IP'): + ips.append(ip) + + networks.append( + OpenNebulaNetwork(id=network_id, + name=network.attrib['name'], + driver=self.connection.driver, + extra={'ip': ips, + 'mac': element.findtext('MAC'), + })) + + return networks diff --git a/trunk/libcloud/compute/drivers/openstack.py b/trunk/libcloud/compute/drivers/openstack.py new file mode 100644 index 0000000000..1dcdb2f0d6 --- /dev/null +++ b/trunk/libcloud/compute/drivers/openstack.py @@ -0,0 +1,1062 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +OpenStack driver +""" + +try: + import simplejson as json +except ImportError: + import json + +import os + +import warnings +import httplib +import base64 + +from xml.etree import ElementTree as ET + +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.types import MalformedResponseError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeSize, NodeImage +from libcloud.compute.base import NodeDriver, Node, NodeLocation +from libcloud.pricing import get_size_price +from libcloud.common.base import Response +from libcloud.utils import findall + +__all__ = [ + 'OpenStack_1_0_Response', + 'OpenStack_1_0_Connection', + 'OpenStack_1_0_NodeDriver', + 'OpenStack_1_0_SharedIpGroup', + 'OpenStack_1_0_NodeIpAddresses', + 'OpenStack_1_1_Response', + 'OpenStack_1_1_Connection', + 'OpenStack_1_1_NodeDriver', + 'OpenStackNodeDriver' + ] + +ATOM_NAMESPACE = "http://www.w3.org/2005/Atom" + +DEFAULT_API_VERSION = '1.1' + + +class OpenStackResponse(Response): + + node_driver = None + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def has_content_type(self, content_type): + content_type_value = self.headers.get('content-type') or '' + content_type_value = content_type_value.lower() + return content_type_value.find(content_type.lower()) > -1 + + def parse_body(self): + if self.status == httplib.NO_CONTENT or not self.body: + return None + + if self.has_content_type('application/xml'): + try: + return ET.XML(self.body) + except: + raise MalformedResponseError( + 'Failed to parse XML', + body=self.body, + driver=self.node_driver) + + elif self.has_content_type('application/json'): + try: + return json.loads(self.body) + except: + raise MalformedResponseError( + 'Failed to parse JSON', + body=self.body, + driver=self.node_driver) + else: + return self.body + + def parse_error(self): + text = None + body = self.parse_body() + + if self.has_content_type('application/xml'): + text = "; ".join([err.text or '' for err in body.getiterator() + if err.text]) + elif self.has_content_type('application/json'): + text = ';'.join([fault_data['message'] for fault_data + in body.values()]) + else: + # while we hope a response is always one of xml or json, we have + # seen html or text in the past, its not clear we can really do + # something to make it more readable here, so we will just pass + # it along as the whole response body in the text variable. + text = body + + return '%s %s %s' % (self.status, self.error, text) + + +class OpenStackComputeConnection(OpenStackBaseConnection): + + def request(self, action, params=None, data='', headers=None, + method='GET'): + if not headers: + headers = {} + if not params: + params = {} + + if method in ("POST", "PUT"): + headers = {'Content-Type': self.default_content_type} + + if method == "GET": + params['cache-busting'] = os.urandom(8).encode('hex') + + return super(OpenStackComputeConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers) + + +class OpenStackNodeDriver(NodeDriver): + """ + Base OpenStack node driver. Should not be used directly. + """ + + NODE_STATE_MAP = { + 'BUILD': NodeState.PENDING, + 'REBUILD': NodeState.PENDING, + 'ACTIVE': NodeState.RUNNING, + 'SUSPENDED': NodeState.TERMINATED, + 'QUEUE_RESIZE': NodeState.PENDING, + 'PREP_RESIZE': NodeState.PENDING, + 'VERIFY_RESIZE': NodeState.RUNNING, + 'PASSWORD': NodeState.PENDING, + 'RESCUE': NodeState.PENDING, + 'REBUILD': NodeState.PENDING, + 'REBOOT': NodeState.REBOOTING, + 'HARD_REBOOT': NodeState.REBOOTING, + 'SHARE_IP': NodeState.PENDING, + 'SHARE_IP_NO_CONFIG': NodeState.PENDING, + 'DELETE_IP': NodeState.PENDING, + 'UNKNOWN': NodeState.UNKNOWN + } + + def __new__(cls, key, secret=None, secure=True, host=None, port=None, + api_version=DEFAULT_API_VERSION, **kwargs): + if cls is OpenStackNodeDriver: + if api_version == '1.0': + cls = OpenStack_1_0_NodeDriver + elif api_version == '1.1': + cls = OpenStack_1_1_NodeDriver + else: + raise NotImplementedError( + "No OpenStackNodeDriver found for API version %s" % + (api_version)) + return super(OpenStackNodeDriver, cls).__new__(cls) + + def destroy_node(self, node): + uri = '/servers/%s' % (node.id) + resp = self.connection.request(uri, method='DELETE') + # The OpenStack and Rackspace documentation both say this API will + # return a 204, but in-fact, everyone everywhere agrees it actually + # returns a 202, so we are going to accept either, and someday, + # someone will fix either the implementation or the documentation to + # agree. + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + + def reboot_node(self, node): + return self._reboot_node(node, reboot_type='HARD') + + def list_nodes(self): + return self._to_nodes(self.connection.request('/servers/detail') + .object) + + def list_images(self, location=None, ex_only_active=True): + return self._to_images(self.connection.request('/images/detail') + .object, ex_only_active) + + def list_sizes(self, location=None): + return self._to_sizes(self.connection.request('/flavors/detail') + .object) + + def list_locations(self): + return [NodeLocation(0, '', '', self)] + + def _ex_connection_class_kwargs(self): + rv = {} + if self._ex_force_base_url: + rv['ex_force_base_url'] = self._ex_force_base_url + if self._ex_force_auth_url: + rv['ex_force_auth_url'] = self._ex_force_auth_url + if self._ex_force_auth_version: + rv['ex_force_auth_version'] = self._ex_force_auth_version + return rv + + def ex_get_node_details(self, node_id): + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s' % (node_id) + resp = self.connection.request(uri, method='GET') + if resp.status == httplib.NOT_FOUND: + return None + + return self._to_node_from_obj(resp.object) + + def ex_soft_reboot_node(self, node): + return self._reboot_node(node, reboot_type='SOFT') + + def ex_hard_reboot_node(self, node): + return self._reboot_node(node, reboot_type='HARD') + + +class OpenStack_1_0_Response(OpenStackResponse): + + def __init__(self, *args, **kwargs): + # done because of a circular reference from + # NodeDriver -> Connection -> Response + self.node_driver = OpenStack_1_0_NodeDriver + super(OpenStack_1_0_Response, self).__init__(*args, **kwargs) + + +class OpenStack_1_0_Connection(OpenStackComputeConnection): + responseCls = OpenStack_1_0_Response + _url_key = "server_url" + default_content_type = 'application/xml; charset=UTF-8' + accept_format = 'application/xml' + XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' + + +class OpenStack_1_0_NodeDriver(OpenStackNodeDriver): + """ + OpenStack node driver. + + Extra node attributes: + - password: root password, available after create. + - hostId: represents the host your cloud server runs on + - imageId: id of image + - flavorId: id of flavor + """ + connectionCls = OpenStack_1_0_Connection + type = Provider.OPENSTACK + api_name = 'openstack' + name = 'OpenStack' + + features = {"create_node": ["generates_password"]} + + def __init__(self, *args, **kwargs): + self._ex_force_base_url = kwargs.pop('ex_force_base_url', None) + self._ex_force_auth_url = kwargs.pop('ex_force_auth_url', None) + self._ex_force_auth_version = kwargs.pop('ex_force_auth_version', None) + self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', + None)) + self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE + super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs) + + def _to_images(self, object, ex_only_active): + images = [] + for image in findall(object, 'image', self.XML_NAMESPACE): + if ex_only_active and image.get('status') != 'ACTIVE': + continue + images.append(self._to_image(image)) + + return images + + def _to_image(self, element): + return NodeImage(id=element.get('id'), + name=element.get('name'), + driver=self.connection.driver, + extra={'updated': element.get('updated'), + 'created': element.get('created'), + 'status': element.get('status'), + 'serverId': element.get('serverId'), + 'progress': element.get('progress')}) + + def _change_password_or_name(self, node, name=None, password=None): + uri = '/servers/%s' % (node.id) + + if not name: + name = node.name + + body = {'xmlns': self.XML_NAMESPACE, + 'name': name} + + if password != None: + body['adminPass'] = password + + server_elm = ET.Element('server', body) + + resp = self.connection.request( + uri, method='PUT', data=ET.tostring(server_elm)) + + if resp.status == httplib.NO_CONTENT and password != None: + node.extra['password'] = password + + return resp.status == httplib.NO_CONTENT + + def create_node(self, **kwargs): + """ + Create a new node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_metadata: Key/Value metadata to associate with a node + @type ex_metadata: C{dict} + + @keyword ex_files: File Path => File contents to create on + the node + @type ex_files: C{dict} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + attributes = {'xmlns': self.XML_NAMESPACE, + 'name': name, + 'imageId': str(image.id), + 'flavorId': str(size.id)} + + if 'ex_shared_ip_group' in kwargs: + # Deprecate this. Be explicit and call the variable + # ex_shared_ip_group_id since user needs to pass in the id, not the + # name. + warnings.warn('ex_shared_ip_group argument is deprecated. Please' + + ' use ex_shared_ip_group_id') + + if 'ex_shared_ip_group_id' in kwargs: + shared_ip_group_id = kwargs['ex_shared_ip_group_id'] + attributes['sharedIpGroupId'] = shared_ip_group_id + + server_elm = ET.Element('server', attributes) + + metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {})) + if metadata_elm: + server_elm.append(metadata_elm) + + files_elm = self._files_to_xml(kwargs.get("ex_files", {})) + if files_elm: + server_elm.append(files_elm) + + resp = self.connection.request("/servers", + method='POST', + data=ET.tostring(server_elm)) + return self._to_node(resp.object) + + def ex_set_password(self, node, password): + """ + Sets the Node's root password. + + This will reboot the instance to complete the operation. + + L{node.extra['password']} will be set to the new value if the + operation was successful. + """ + return self._change_password_or_name(node, password=password) + + def ex_set_server_name(self, node, name): + """ + Sets the Node's name. + + This will reboot the instance to complete the operation. + """ + return self._change_password_or_name(node, name=name) + + def ex_resize(self, node, size): + """ + Change an existing server flavor / scale the server up or down. + + @keyword node: node to resize. + @param node: C{Node} + + @keyword size: new size. + @param size: C{NodeSize} + """ + elm = ET.Element( + 'resize', + {'xmlns': self.XML_NAMESPACE, + 'flavorId': str(size.id), + } + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_confirm_resize(self, node): + """ + Confirm a resize request which is currently in progress. If a resize + request is not explicitly confirmed or reverted it's automatically + confirmed after 24 hours. + + For more info refer to the API documentation: http://goo.gl/zjFI1 + + @keyword node: node for which the resize request will be confirmed. + @param node: C{Node} + """ + elm = ET.Element( + 'confirmResize', + {'xmlns': self.XML_NAMESPACE}, + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.NO_CONTENT + + def ex_revert_resize(self, node): + """ + Revert a resize request which is currently in progress. + All resizes are automatically confirmed after 24 hours if they have + not already been confirmed explicitly or reverted. + + For more info refer to the API documentation: http://goo.gl/AizBu + + @keyword node: node for which the resize request will be reverted. + @param node: C{Node} + """ + elm = ET.Element( + 'revertResize', + {'xmlns': self.XML_NAMESPACE} + ) + + resp = self.connection.request("/servers/%s/action" % (node.id), + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.NO_CONTENT + + def ex_rebuild(self, node_id, image_id): + # @TODO: Remove those ifs in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + if isinstance(image_id, NodeImage): + image_id = image_id.id + + elm = ET.Element( + 'rebuild', + {'xmlns': self.XML_NAMESPACE, + 'imageId': image_id, + } + ) + + resp = self.connection.request("/servers/%s/action" % node_id, + method='POST', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_create_ip_group(self, group_name, node_id=None): + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + group_elm = ET.Element( + 'sharedIpGroup', + {'xmlns': self.XML_NAMESPACE, + 'name': group_name, + } + ) + + if node_id: + ET.SubElement(group_elm, + 'server', + {'id': node_id} + ) + + resp = self.connection.request('/shared_ip_groups', + method='POST', + data=ET.tostring(group_elm)) + return self._to_shared_ip_group(resp.object) + + def ex_list_ip_groups(self, details=False): + uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups' + resp = self.connection.request(uri, + method='GET') + groups = findall(resp.object, 'sharedIpGroup', + self.XML_NAMESPACE) + return [self._to_shared_ip_group(el) for el in groups] + + def ex_delete_ip_group(self, group_id): + uri = '/shared_ip_groups/%s' % group_id + resp = self.connection.request(uri, method='DELETE') + return resp.status == httplib.NO_CONTENT + + def ex_share_ip(self, group_id, node_id, ip, configure_node=True): + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + if configure_node: + str_configure = 'true' + else: + str_configure = 'false' + + elm = ET.Element( + 'shareIp', + {'xmlns': self.XML_NAMESPACE, + 'sharedIpGroupId': group_id, + 'configureServer': str_configure}, + ) + + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='PUT', + data=ET.tostring(elm)) + return resp.status == httplib.ACCEPTED + + def ex_unshare_ip(self, node_id, ip): + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='DELETE') + return resp.status == httplib.ACCEPTED + + def ex_list_ip_addresses(self, node_id): + # @TODO: Remove this if in 0.6 + if isinstance(node_id, Node): + node_id = node_id.id + + uri = '/servers/%s/ips' % node_id + resp = self.connection.request(uri, + method='GET') + return self._to_ip_addresses(resp.object) + + def _metadata_to_xml(self, metadata): + if len(metadata) == 0: + return None + + metadata_elm = ET.Element('metadata') + for k, v in metadata.items(): + meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)}) + meta_elm.text = str(v) + + return metadata_elm + + def _files_to_xml(self, files): + if len(files) == 0: + return None + + personality_elm = ET.Element('personality') + for k, v in files.items(): + file_elm = ET.SubElement(personality_elm, + 'file', + {'path': str(k)}) + file_elm.text = base64.b64encode(v) + + return personality_elm + + def _reboot_node(self, node, reboot_type='SOFT'): + resp = self._node_action(node, ['reboot', ('type', reboot_type)]) + return resp.status == httplib.ACCEPTED + + def _node_action(self, node, body): + if isinstance(body, list): + attr = ' '.join(['%s="%s"' % (item[0], item[1]) + for item in body[1:]]) + body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr) + uri = '/servers/%s/action' % (node.id) + resp = self.connection.request(uri, method='POST', data=body) + return resp + + def _to_nodes(self, object): + node_elements = findall(object, 'server', self.XML_NAMESPACE) + return [self._to_node(el) for el in node_elements] + + def _to_node_from_obj(self, obj): + return self._to_node(findall(obj, 'server', + self.XML_NAMESPACE)[0]) + + def _to_node(self, el): + def get_ips(el): + return [ip.get('addr') for ip in el] + + def get_meta_dict(el): + d = {} + for meta in el: + d[meta.get('key')] = meta.text + return d + + public_ip = get_ips(findall(el, 'addresses/public/ip', + self.XML_NAMESPACE)) + private_ip = get_ips(findall(el, 'addresses/private/ip', + self.XML_NAMESPACE)) + metadata = get_meta_dict(findall(el, 'metadata/meta', + self.XML_NAMESPACE)) + + n = Node(id=el.get('id'), + name=el.get('name'), + state=self.NODE_STATE_MAP.get( + el.get('status'), NodeState.UNKNOWN), + public_ip=public_ip, + private_ip=private_ip, + driver=self.connection.driver, + extra={ + 'password': el.get('adminPass'), + 'hostId': el.get('hostId'), + 'imageId': el.get('imageId'), + 'flavorId': el.get('flavorId'), + 'uri': "https://%s%s/servers/%s" % ( + self.connection.host, + self.connection.request_path, el.get('id')), + 'metadata': metadata, + }) + return n + + def _to_sizes(self, object): + elements = findall(object, 'flavor', self.XML_NAMESPACE) + return [self._to_size(el) for el in elements] + + def _to_size(self, el): + return NodeSize(id=el.get('id'), + name=el.get('name'), + ram=int(el.get('ram')), + disk=int(el.get('disk')), + # XXX: needs hardcode + bandwidth=None, + # Hardcoded + price=self._get_size_price(el.get('id')), + driver=self.connection.driver) + + def ex_limits(self): + """ + Extra call to get account's limits, such as + rates (for example amount of POST requests per day) + and absolute limits like total amount of available + RAM to be used by servers. + + @return: C{dict} with keys 'rate' and 'absolute' + """ + + def _to_rate(el): + rate = {} + for item in el.items(): + rate[item[0]] = item[1] + + return rate + + def _to_absolute(el): + return {el.get('name'): el.get('value')} + + limits = self.connection.request("/limits").object + rate = [_to_rate(el) for el in findall(limits, 'rate/limit', + self.XML_NAMESPACE)] + absolute = {} + for item in findall(limits, 'absolute/limit', + self.XML_NAMESPACE): + absolute.update(_to_absolute(item)) + + return {"rate": rate, "absolute": absolute} + + def ex_save_image(self, node, name): + """Create an image for node. + + @keyword node: node to use as a base for image + @param node: L{Node} + @keyword name: name for new image + @param name: C{string} + """ + + image_elm = ET.Element( + 'image', + {'xmlns': self.XML_NAMESPACE, + 'name': name, + 'serverId': node.id}) + + return self._to_image(self.connection.request("/images", + method="POST", + data=ET.tostring(image_elm)).object) + + def ex_delete_image(self, image): + """Delete an image for node. + + @keyword image: the image to be deleted + @param image: L{NodeImage} + """ + uri = '/images/%s' % image.id + resp = self.connection.request(uri, method='DELETE') + return resp.status == httplib.NO_CONTENT + + def _to_shared_ip_group(self, el): + servers_el = findall(el, 'servers', self.XML_NAMESPACE) + if servers_el: + servers = [s.get('id') + for s in findall(servers_el[0], 'server', + self.XML_NAMESPACE)] + else: + servers = None + return OpenStack_1_0_SharedIpGroup(id=el.get('id'), + name=el.get('name'), + servers=servers) + + def _to_ip_addresses(self, el): + return OpenStack_1_0_NodeIpAddresses( + [ip.get('addr') for ip in + findall(findall(el, 'public', self.XML_NAMESPACE)[0], + 'ip', self.XML_NAMESPACE)], + [ip.get('addr') for ip in + findall(findall(el, 'private', self.XML_NAMESPACE)[0], + 'ip', self.XML_NAMESPACE)]) + + def _get_size_price(self, size_id): + try: + return get_size_price(driver_type='compute', + driver_name=self.api_name, + size_id=size_id) + except KeyError: + return 0.0 + + +class OpenStack_1_0_SharedIpGroup(object): + """ + Shared IP group info. + """ + + def __init__(self, id, name, servers=None): + self.id = str(id) + self.name = name + self.servers = servers + + +class OpenStack_1_0_NodeIpAddresses(object): + """ + List of public and private IP addresses of a Node. + """ + + def __init__(self, public_addresses, private_addresses): + self.public_addresses = public_addresses + self.private_addresses = private_addresses + + +class OpenStack_1_1_Response(OpenStackResponse): + + def __init__(self, *args, **kwargs): + # done because of a circular reference from + # NodeDriver -> Connection -> Response + self.node_driver = OpenStack_1_1_NodeDriver + super(OpenStack_1_1_Response, self).__init__(*args, **kwargs) + + +class OpenStack_1_1_Connection(OpenStackComputeConnection): + responseCls = OpenStack_1_1_Response + _url_key = "server_url" + accept_format = 'application/json' + default_content_type = 'application/json; charset=UTF-8' + + def encode_data(self, data): + return json.dumps(data) + + +class OpenStack_1_1_NodeDriver(OpenStackNodeDriver): + """ + OpenStack node driver. + """ + connectionCls = OpenStack_1_1_Connection + type = Provider.OPENSTACK + api_name = 'openstack' + name = 'OpenStack' + + features = {"create_node": ["generates_password"]} + + def __init__(self, *args, **kwargs): + self._ex_force_base_url = kwargs.pop('ex_force_base_url', None) + self._ex_force_auth_url = kwargs.pop('ex_force_auth_url', None) + self._ex_force_auth_version = kwargs.pop('ex_force_auth_version', + None) + self._ex_force_api_version = str(kwargs.pop('ex_force_api_version', + None)) + super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs) + + def create_node(self, **kwargs): + """Create a new node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_metadata: Key/Value metadata to associate with a node + @type ex_metadata: C{dict} + + @keyword ex_files: File Path => File contents to create on + the node + @type ex_files: C{dict} + """ + + server_params = self._create_args_to_params(None, **kwargs) + + resp = self.connection.request("/servers", + method='POST', + data={'server': server_params}) + + return self._to_node(resp.object['server']) + + def _to_images(self, obj, ex_only_active): + images = [] + for image in obj['images']: + if ex_only_active and image.get('status') != 'ACTIVE': + continue + images.append(self._to_image(image)) + + return images + + def _to_image(self, api_image): + return NodeImage( + id=api_image['id'], + name=api_image['name'], + driver=self, + extra=dict( + updated=api_image['updated'], + created=api_image['created'], + status=api_image['status'], + progress=api_image.get('progress'), + metadata=api_image.get('metadata'), + ) + ) + + def _to_nodes(self, obj): + servers = obj['servers'] + return [self._to_node(server) for server in servers] + + def _to_sizes(self, obj): + flavors = obj['flavors'] + return [self._to_size(flavor) for flavor in flavors] + + def _create_args_to_params(self, node, **kwargs): + server_params = { + 'name': kwargs.get('name'), + 'metadata': kwargs.get('ex_metadata', {}), + 'personality': self._files_to_personality(kwargs.get("ex_files", + {})) + } + + if 'name' in kwargs: + server_params['name'] = kwargs.get('name') + else: + server_params['name'] = node.name + + if 'image' in kwargs: + server_params['imageRef'] = kwargs.get('image').id + else: + server_params['imageRef'] = node.extra.get('imageId') + + if 'size' in kwargs: + server_params['flavorRef'] = kwargs.get('size').id + else: + server_params['flavorRef'] = node.extra.get('flavorId') + + return server_params + + def _files_to_personality(self, files): + rv = [] + + for k, v in files.items(): + rv.append({'path': k, 'contents': base64.b64encode(v)}) + + return rv + + def _reboot_node(self, node, reboot_type='SOFT'): + resp = self._node_action(node, 'reboot', type=reboot_type) + return resp.status == httplib.ACCEPTED + + def ex_set_password(self, node, password): + resp = self._node_action(node, 'changePassword', adminPass=password) + node.extra['password'] = password + return resp.status == httplib.ACCEPTED + + def ex_rebuild(self, node, image): + """ + Rebuild a Node. + + @type node: C{Node} + @param node: Node to rebuild. + + @type image: C{NodeImage} + @param image: New image to use. + """ + server_params = self._create_args_to_params(node, image=image) + resp = self._node_action(node, 'rebuild', **server_params) + return resp.status == httplib.ACCEPTED + + def ex_resize(self, node, size): + """ + Change a node size. + + @type node: C{Node} + @param node: Node to resize. + + @type image: C{NodeSize} + @param image: New size to use. + """ + + server_params = self._create_args_to_params(node, size=size) + resp = self._node_action(node, 'resize', **server_params) + return resp.status == httplib.ACCEPTED + + def ex_confirm_resize(self, node): + resp = self._node_action(node, 'confirmResize') + return resp.status == httplib.NO_CONTENT + + def ex_revert_resize(self, node): + resp = self._node_action(node, 'revertResize') + return resp.status == httplib.ACCEPTED + + def ex_save_image(self, node, name, metadata=None): + # This has not yet been implemented by OpenStack 1.1 + raise NotImplementedError() + + optional_params = {} + if metadata: + optional_params['metadata'] = metadata + resp = self._node_action(node, 'createImage', name=name, + **optional_params) + # TODO: concevt location header into NodeImage object + return resp.status == httplib.NO_CONTENT + + def ex_set_server_name(self, node, name): + """ + Sets the Node's name. + """ + return self._update_node(node, name=name) + + def ex_get_metadata(self, node): + """ + Get a Node's metadata. + + @return Key/Value metadata associated with node. + @type C{dict} + """ + return self.connection.request( + '/servers/%s/metadata' % (node.id,), method='GET', + ).object['metadata'] + + def ex_set_metadata(self, node, metadata): + """ + Sets the Node's metadata. + + @keyword metadata: Key/Value metadata to associate with a node + @type metadata: C{dict} + """ + return self.connection.request( + '/servers/%s/metadata' % (node.id,), method='PUT', + data={'metadata': metadata} + ).object['metadata'] + + def ex_update_node(self, node, **node_updates): + """ + Update the Node's editable attributes. The OpenStack API currently + supports editing name and IPv4/IPv6 access addresses. + + The driver currently only supports updating the node name. + + @keyword name: New name for the server + @type name: C{str} + """ + potential_data = self._create_args_to_params(node, **node_updates) + updates = {'name': potential_data['name']} + return self._update_node(node, **updates) + + def ex_get_size(self, size_id): + return self._to_size(self.connection.request('/flavors/%s' % + (size_id,)) + .object['flavor']) + + def ex_get_image(self, image_id): + return self._to_image(self.connection.request('/images/%s' % + (image_id,)) + .object['image']) + + def ex_delete_image(self, image): + # This has not yet been implemented by OpenStack 1.1 + raise NotImplementedError() + + resp = self.connection.request('/images/%s' % (image.id,), + method='DELETE') + return resp.status == httplib.ACCEPTED + + def _node_action(self, node, action, **params): + params = params or None + return self.connection.request('/servers/%s/action' % (node.id,), + method='POST', data={action: params}) + + def _update_node(self, node, **node_updates): + """ + Updates the editable attributes of a server, which currently include + its name and IPv4/IPv6 access addresses. + """ + return self._to_node( + self.connection.request( + '/servers/%s' % (node.id,), method='PUT', + data={'server': node_updates} + ).object['server'] + ) + + def _to_node_from_obj(self, obj): + return self._to_node(obj['server']) + + def _to_node(self, api_node): + return Node( + id=api_node['id'], + name=api_node['name'], + state=self.NODE_STATE_MAP.get(api_node['status'], + NodeState.UNKNOWN), + public_ip=[addr_desc['addr'] for addr_desc in + api_node['addresses'].get('public', [])], + private_ip=[addr_desc['addr'] for addr_desc in + api_node['addresses'].get('private', [])], + driver=self, + extra=dict( + hostId=api_node['hostId'], + # Docs says "tenantId", but actual is "tenant_id". *sigh* + # Best handle both. + tenantId=api_node.get('tenant_id') or api_node['tenantId'], + imageId=api_node['image']['id'], + flavorId=api_node['flavor']['id'], + uri=(link['href'] for link in api_node['links'] if + link['rel'] == 'self').next(), + metadata=api_node['metadata'], + password=api_node.get('adminPass'), + ), + ) + + def _to_size(self, api_flavor, price=None, bandwidth=None): + # if provider-specific subclasses can get better values for + # price/bandwidth, then can pass them in when they super(). + if not price: + price = self._get_size_price(str(api_flavor['id'])) + + return NodeSize( + id=api_flavor['id'], + name=api_flavor['name'], + ram=api_flavor['ram'], + disk=api_flavor['disk'], + bandwidth=bandwidth, + price=price, + driver=self, + ) + + def _get_size_price(self, size_id): + try: + return get_size_price( + driver_type='compute', + driver_name=self.api_name, + size_id=size_id, + ) + except KeyError: + return(0.0) diff --git a/trunk/libcloud/compute/drivers/opsource.py b/trunk/libcloud/compute/drivers/opsource.py new file mode 100644 index 0000000000..71d3e7764c --- /dev/null +++ b/trunk/libcloud/compute/drivers/opsource.py @@ -0,0 +1,567 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Opsource Driver +""" +from xml.etree import ElementTree as ET +from base64 import b64encode +import httplib + +from libcloud.compute.base import NodeDriver, Node, NodeAuthPassword +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation +from libcloud.common.types import LibcloudError, InvalidCredsError +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.utils import fixxpath, findtext, findall +from libcloud.compute.types import NodeState, Provider + +# Roadmap / TODO: +# +# 0.1 - Basic functionality: create, delete, start, stop, reboot - servers +# (base OS images only, no customer images suported yet) +# x implement list_nodes() +# x implement create_node() (only support Base OS images, +# no customer images yet) +# x implement reboot() +# x implement destroy_node() +# x implement list_sizes() +# x implement list_images() (only support Base OS images, +# no customer images yet) +# x implement list_locations() +# x implement ex_* extension functions for opsource-specific featurebody =s +# x ex_graceful_shutdown +# x ex_start_node +# x ex_power_off +# x ex_list_networks (needed for create_node()) +# x refactor: switch to using fixxpath() from the vcloud driver for +# dealing with xml namespace tags +# x refactor: move some functionality from OpsourceConnection.request() +# method into new .request_with_orgId() method +# x add OpsourceStatus object support to: +# x _to_node() +# x _to_network() +# x implement test cases +# +# 0.2 - Support customer images (snapshots) and server modification functions +# - support customer-created images: +# - list deployed customer images (in list_images() ?) +# - list pending customer images (in list_images() ?) +# - delete customer images +# - modify customer images +# - add "pending-servers" in list_nodes() +# - implement various ex_* extension functions for opsource-specific features +# - ex_modify_server() +# - ex_add_storage_to_server() +# - ex_snapshot_server() (create's customer image) +# +# 0.3 - support Network API +# 0.4 - Support VIP/Load-balancing API +# 0.5 - support Files Account API +# 0.6 - support Reports API +# 1.0 - Opsource 0.9 API feature complete, tested + +# setup a few variables to represent all of the opsource cloud namespaces +NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" +ORGANIZATION_NS = NAMESPACE_BASE + "/organization" +SERVER_NS = NAMESPACE_BASE + "/server" +NETWORK_NS = NAMESPACE_BASE + "/network" +DIRECTORY_NS = NAMESPACE_BASE + "/directory" +RESET_NS = NAMESPACE_BASE + "/reset" +VIP_NS = NAMESPACE_BASE + "/vip" +IMAGEIMPORTEXPORT_NS = NAMESPACE_BASE + "/imageimportexport" +DATACENTER_NS = NAMESPACE_BASE + "/datacenter" +SUPPORT_NS = NAMESPACE_BASE + "/support" +GENERAL_NS = NAMESPACE_BASE + "/general" +IPPLAN_NS = NAMESPACE_BASE + "/ipplan" +WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel" + + +class OpsourceResponse(XmlResponse): + + def parse_error(self): + if self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError(self.body) + elif self.status == httplib.FORBIDDEN: + raise InvalidCredsError(self.body) + + body = self.parse_body() + + if self.status == httplib.BAD_REQUEST: + code = findtext(body, 'resultCode', SERVER_NS) + message = findtext(body, 'resultDetail', SERVER_NS) + raise OpsourceAPIException(code, + message, + driver=OpsourceNodeDriver) + + return self.body + + +class OpsourceAPIException(LibcloudError): + def __init__(self, code, msg, driver): + self.code = code + self.msg = msg + self.driver = driver + + def __str__(self): + return "%s: %s" % (self.code, self.msg) + + def __repr__(self): + return ("" % + (self.code, self.msg)) + + +class OpsourceConnection(ConnectionUserAndKey): + """ + Connection class for the Opsource driver + """ + + host = 'api.opsourcecloud.net' + api_path = '/oec' + api_version = '0.9' + _orgId = None + responseCls = OpsourceResponse + + def add_default_headers(self, headers): + headers['Authorization'] = ('Basic %s' % b64encode('%s:%s' % + (self.user_id, self.key))) + return headers + + def request(self, action, params=None, data='', + headers=None, method='GET'): + action = "%s/%s/%s" % (self.api_path, self.api_version, action) + + return super(OpsourceConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers) + + def request_with_orgId(self, action, params=None, data='', + headers=None, method='GET'): + action = "%s/%s" % (self.get_resource_path(), action) + + return super(OpsourceConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers) + + def get_resource_path(self): + """ + This method returns a resource path which is necessary for referencing + resources that require a full path instead of just an ID, such as + networks, and customer snapshots. + """ + return ("%s/%s/%s" % (self.api_path, self.api_version, + self._get_orgId())) + + def _get_orgId(self): + """ + Send the /myaccount API request to opsource cloud and parse the + 'orgId' from the XML response object. We need the orgId to use most + of the other API functions + """ + if self._orgId == None: + body = self.request('myaccount').object + self._orgId = findtext(body, 'orgId', DIRECTORY_NS) + return self._orgId + + +class OpsourceStatus(object): + """ + Opsource API pending operation status class + action, requestTime, username, numberOfSteps, updateTime, + step.name, step.number, step.percentComplete, failureReason, + """ + def __init__(self, action=None, requestTime=None, userName=None, + numberOfSteps=None, updateTime=None, step_name=None, + step_number=None, step_percentComplete=None, + failureReason=None): + self.action = action + self.requestTime = requestTime + self.userName = userName + self.numberOfSteps = numberOfSteps + self.updateTime = updateTime + self.step_name = step_name + self.step_number = step_number + self.step_percentComplete = step_percentComplete + self.failureReason = failureReason + + def __repr__(self): + return (('') + % (self.id, self.name, self.description, self.location, + self.privateNet, self.multicast)) + + +class OpsourceNodeDriver(NodeDriver): + """ + Opsource node driver. + """ + + connectionCls = OpsourceConnection + name = 'Opsource' + type = Provider.OPSOURCE + features = {"create_node": ["password"]} + + def create_node(self, **kwargs): + """ + Create a new opsource node + + Standard keyword arguments from L{NodeDriver.create_node}: + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword auth: Initial authentication information for the + node (required) + @type auth: L{NodeAuthPassword} + + Non-standard keyword arguments: + @keyword ex_description: description for this node (required) + @type ex_description: C{str} + + @keyword ex_network: Network to create the node within (required) + @type ex_network: L{OpsourceNetwork} + + @keyword ex_isStarted: Start server after creation? default + true (required) + @type ex_isStarted: C{bool} + + @return: The newly created L{Node}. NOTE: Opsource does not provide a + way to determine the ID of the server that was just created, + so the returned L{Node} is not guaranteed to be the same one + that was created. This is only the case when multiple nodes + with the same name exist. + """ + name = kwargs['name'] + image = kwargs['image'] + + # XXX: Node sizes can be adjusted after a node is created, but + # cannot be set at create time because size is part of the + # image definition. + password = None + if 'auth' in kwargs: + auth = kwargs.get('auth') + if isinstance(auth, NodeAuthPassword): + password = auth.password + else: + raise ValueError('auth must be of NodeAuthPassword type') + + ex_description = kwargs.get('ex_description', '') + ex_isStarted = kwargs.get('ex_isStarted', True) + + ex_network = kwargs.get('ex_network') + if not isinstance(ex_network, OpsourceNetwork): + raise ValueError('ex_network must be of OpsourceNetwork type') + vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(), + ex_network.id) + + imageResourcePath = None + if 'resourcePath' in image.extra: + imageResourcePath = image.extra['resourcePath'] + else: + imageResourcePath = "%s/%s" % (self.connection.get_resource_path(), + image.id) + + server_elm = ET.Element('Server', {'xmlns': SERVER_NS}) + ET.SubElement(server_elm, "name").text = name + ET.SubElement(server_elm, "description").text = ex_description + ET.SubElement(server_elm, "vlanResourcePath").text = vlanResourcePath + ET.SubElement(server_elm, "imageResourcePath").text = imageResourcePath + ET.SubElement(server_elm, "administratorPassword").text = password + ET.SubElement(server_elm, "isStarted").text = str(ex_isStarted) + + self.connection.request_with_orgId('server', + method='POST', + data=ET.tostring(server_elm)).object + + # XXX: return the last node in the list that has a matching name. this + # is likely but not guaranteed to be the node we just created + # because opsource allows multiple nodes to have the same name + return filter(lambda x: x.name == name, self.list_nodes())[-1] + + def destroy_node(self, node): + body = self.connection.request_with_orgId('server/%s?delete' % + (node.id)).object + + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def reboot_node(self, node): + body = self.connection.request_with_orgId('server/%s?restart' % + (node.id)).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def list_nodes(self): + nodes = self._to_nodes(self.connection + .request_with_orgId('server/deployed').object) + nodes.extend(self._to_nodes(self.connection + .request_with_orgId('server/pendingDeploy').object)) + return nodes + + def list_images(self, location=None): + """ + return a list of available images + Currently only returns the default 'base OS images' provided by + opsource. Customer images (snapshots) are not yet supported. + """ + return self._to_base_images(self.connection.request('base/image') + .object) + + def list_sizes(self, location=None): + return [ + NodeSize(id=1, + name="default", + ram=0, + disk=0, + bandwidth=0, + price=0, + driver=self.connection.driver), + ] + + def list_locations(self): + """ + list locations (datacenters) available for instantiating servers and + networks. + """ + return self._to_locations(self.connection + .request_with_orgId('datacenter').object) + + def list_networks(self, location=None): + """ + List networks deployed across all data center locations for your + organization. The response includes the location of each network. + + Returns a list of OpsourceNetwork objects + """ + return self._to_networks(self.connection + .request_with_orgId('networkWithLocation').object) + + def _to_base_images(self, object): + images = [] + for element in object.findall(fixxpath("ServerImage", SERVER_NS)): + images.append(self._to_base_image(element)) + + return images + + def _to_base_image(self, element): + # Eventually we will probably need multiple _to_image() functions + # that parse differently than . + # DeployedImages are customer snapshot images, and ServerImages are + # 'base' images provided by opsource + location_id = findtext(element, 'location', SERVER_NS) + location = self.ex_get_location_by_id(location_id) + + extra = { + 'description': findtext(element, 'description', SERVER_NS), + 'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS), + 'OS_displayName': findtext(element, 'operatingSystem/displayName', + SERVER_NS), + 'cpuCount': findtext(element, 'cpuCount', SERVER_NS), + 'resourcePath': findtext(element, 'resourcePath', SERVER_NS), + 'memory': findtext(element, 'memory', SERVER_NS), + 'osStorage': findtext(element, 'osStorage', SERVER_NS), + 'additionalStorage': findtext(element, 'additionalStorage', + SERVER_NS), + 'created': findtext(element, 'created', SERVER_NS), + 'location': location, + } + + return NodeImage(id=str(findtext(element, 'id', SERVER_NS)), + name=str(findtext(element, 'name', SERVER_NS)), + extra=extra, + driver=self.connection.driver) + + def ex_start_node(self, node): + """ + Powers on an existing deployed server + """ + body = self.connection.request_with_orgId('server/%s?start' % + node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_shutdown_graceful(self, node): + """ + This function will attempt to "gracefully" stop a server by + initiating a shutdown sequence within the guest operating system. + A successful response on this function means the system has + successfully passed the request into the operating system. + """ + body = self.connection.request_with_orgId('server/%s?shutdown' % + (node.id)).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_power_off(self, node): + """ + This function will abruptly power-off a server. Unlike + ex_shutdown_graceful, success ensures the node will stop but some OS + and application configurations may be adversely affected by the + equivalent of pulling the power plug out of the machine. + """ + body = self.connection.request_with_orgId('server/%s?poweroff' % + node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_list_networks(self): + """ + List networks deployed across all data center locations for your + organization. The response includes the location of each network. + + Returns a list of OpsourceNetwork objects + """ + response = self.connection.request_with_orgId('networkWithLocation') \ + .object + return self._to_networks(response) + + def ex_get_location_by_id(self, id): + location = None + if id is not None: + location = filter(lambda x: x.id == id, self.list_locations())[0] + return location + + def _to_networks(self, object): + networks = [] + for element in findall(object, 'network', NETWORK_NS): + networks.append(self._to_network(element)) + + return networks + + def _to_network(self, element): + multicast = False + if findtext(element, 'multicast', NETWORK_NS) == 'true': + multicast = True + + status = self._to_status(element.find(fixxpath('status', NETWORK_NS))) + + location_id = findtext(element, 'location', NETWORK_NS) + location = self.ex_get_location_by_id(location_id) + + return OpsourceNetwork(id=findtext(element, 'id', NETWORK_NS), + name=findtext(element, 'name', NETWORK_NS), + description=findtext(element, 'description', + NETWORK_NS), + location=location, + privateNet=findtext(element, 'privateNet', + NETWORK_NS), + multicast=multicast, + status=status) + + def _to_locations(self, object): + locations = [] + for element in object.findall(fixxpath('datacenter', DATACENTER_NS)): + locations.append(self._to_location(element)) + + return locations + + def _to_location(self, element): + l = NodeLocation(id=findtext(element, 'location', DATACENTER_NS), + name=findtext(element, 'displayName', DATACENTER_NS), + country=findtext(element, 'country', DATACENTER_NS), + driver=self) + return l + + def _to_nodes(self, object): + node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS)) + node_elements.extend(object.findall(fixxpath('PendingDeployServer', + SERVER_NS))) + return [self._to_node(el) for el in node_elements] + + def _to_node(self, element): + if findtext(element, 'isStarted', SERVER_NS) == 'true': + state = NodeState.RUNNING + else: + state = NodeState.TERMINATED + + status = self._to_status(element.find(fixxpath('status', SERVER_NS))) + + extra = { + 'description': findtext(element, 'description', SERVER_NS), + 'sourceImageId': findtext(element, 'sourceImageId', SERVER_NS), + 'networkId': findtext(element, 'networkId', SERVER_NS), + 'machineName': findtext(element, 'machineName', SERVER_NS), + 'deployedTime': findtext(element, 'deployedTime', SERVER_NS), + 'cpuCount': findtext(element, 'machineSpecification/cpuCount', + SERVER_NS), + 'memoryMb': findtext(element, 'machineSpecification/memoryMb', + SERVER_NS), + 'osStorageGb': findtext(element, + 'machineSpecification/osStorageGb', SERVER_NS), + 'additionalLocalStorageGb': findtext(element, + 'machineSpecification/additionalLocalStorageGb', SERVER_NS), + 'OS_type': findtext(element, + 'machineSpecification/operatingSystem/type', SERVER_NS), + 'OS_displayName': findtext(element, + 'machineSpecification/operatingSystem/displayName', SERVER_NS), + 'status': status, + } + + n = Node(id=findtext(element, 'id', SERVER_NS), + name=findtext(element, 'name', SERVER_NS), + state=state, + public_ip="unknown", + private_ip=findtext(element, 'privateIpAddress', SERVER_NS), + driver=self.connection.driver, + extra=extra) + return n + + def _to_status(self, element): + if element == None: + return OpsourceStatus() + s = OpsourceStatus(action=findtext(element, 'action', SERVER_NS), + requestTime=findtext(element, 'requestTime', + SERVER_NS), + userName=findtext(element, 'userName', + SERVER_NS), + numberOfSteps=findtext(element, 'numberOfSteps', + SERVER_NS), + step_name=findtext(element, 'step/name', + SERVER_NS), + step_number=findtext(element, 'step_number', + SERVER_NS), + step_percentComplete=findtext(element, + 'step/percentComplete', SERVER_NS), + failureReason=findtext(element, 'failureReason', + SERVER_NS)) + return s diff --git a/trunk/libcloud/compute/drivers/rackspace.py b/trunk/libcloud/compute/drivers/rackspace.py new file mode 100644 index 0000000000..aba896ea6c --- /dev/null +++ b/trunk/libcloud/compute/drivers/rackspace.py @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Rackspace driver +""" +from libcloud.compute.types import Provider +from libcloud.compute.base import NodeLocation +from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection, OpenStack_1_0_NodeDriver, OpenStack_1_0_Response + +from libcloud.common.rackspace import ( + AUTH_URL_US, AUTH_URL_UK) + + +class RackspaceConnection(OpenStack_1_0_Connection): + """ + Connection class for the Rackspace driver + """ + + responseCls = OpenStack_1_0_Response + auth_url = AUTH_URL_US + XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0' + + +class RackspaceNodeDriver(OpenStack_1_0_NodeDriver): + name = 'Rackspace' + connectionCls = RackspaceConnection + type = Provider.RACKSPACE + api_name = 'rackspace' + + def list_locations(self): + """Lists available locations + + Locations cannot be set or retrieved via the API, but currently + there are two locations, DFW and ORD. + """ + return [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] + + +class RackspaceUKConnection(RackspaceConnection): + """ + Connection class for the Rackspace UK driver + """ + auth_url = AUTH_URL_UK + + +class RackspaceUKNodeDriver(RackspaceNodeDriver): + """Driver for Rackspace in the UK (London) + """ + + name = 'Rackspace (UK)' + connectionCls = RackspaceUKConnection + + def list_locations(self): + return [NodeLocation(0, 'Rackspace UK London', 'UK', self)] diff --git a/trunk/libcloud/compute/drivers/rimuhosting.py b/trunk/libcloud/compute/drivers/rimuhosting.py new file mode 100644 index 0000000000..ecf1766658 --- /dev/null +++ b/trunk/libcloud/compute/drivers/rimuhosting.py @@ -0,0 +1,310 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +RimuHosting Driver +""" +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation +from libcloud.compute.base import NodeImage, NodeAuthPassword + +API_CONTEXT = '/r' +API_HOST = 'rimuhosting.com' + +class RimuHostingException(Exception): + """ + Exception class for RimuHosting driver + """ + + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + +class RimuHostingResponse(JsonResponse): + def __init__(self, response, connection): + self.body = response.read() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + self.connection = connection + + if self.success(): + self.object = self.parse_body() + + def success(self): + if self.status == 403: + raise InvalidCredsError() + return True + + def parse_body(self): + try: + js = super(RimuHostingResponse, self).parse_body() + if js[js.keys()[0]]['response_type'] == "ERROR": + raise RimuHostingException( + js[js.keys()[0]]['human_readable_message'] + ) + return js[js.keys()[0]] + except KeyError: + raise RimuHostingException('Could not parse body: %s' + % (self.body)) + +class RimuHostingConnection(ConnectionKey): + """ + Connection class for the RimuHosting driver + """ + + api_context = API_CONTEXT + host = API_HOST + port = 443 + responseCls = RimuHostingResponse + + def __init__(self, key, secure=True): + # override __init__ so that we can set secure of False for testing + ConnectionKey.__init__(self,key,secure) + + def add_default_headers(self, headers): + # We want JSON back from the server. Could be application/xml + # (but JSON is better). + headers['Accept'] = 'application/json' + # Must encode all data as json, or override this header. + headers['Content-Type'] = 'application/json' + + headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key) + return headers; + + def request(self, action, params=None, data='', headers=None, method='GET'): + if not headers: + headers = {} + if not params: + params = {} + # Override this method to prepend the api_context + return ConnectionKey.request(self, self.api_context + action, + params, data, headers, method) + +class RimuHostingNodeDriver(NodeDriver): + """ + RimuHosting node driver + """ + + type = Provider.RIMUHOSTING + name = 'RimuHosting' + connectionCls = RimuHostingConnection + + def __init__(self, key, host=API_HOST, port=443, + api_context=API_CONTEXT, secure=True): + # Pass in some extra vars so that + self.key = key + self.secure = secure + self.connection = self.connectionCls(key ,secure) + self.connection.host = host + self.connection.api_context = api_context + self.connection.port = port + self.connection.driver = self + self.connection.connect() + + def _order_uri(self, node,resource): + # Returns the order uri with its resourse appended. + return "/orders/%s/%s" % (node.id,resource) + + # TODO: Get the node state. + def _to_node(self, order): + n = Node(id=order['slug'], + name=order['domain_name'], + state=NodeState.RUNNING, + public_ip=( + [order['allocated_ips']['primary_ip']] + + order['allocated_ips']['secondary_ips'] + ), + private_ip=[], + driver=self.connection.driver, + extra={'order_oid': order['order_oid'], + 'monthly_recurring_fee': order.get('billing_info').get('monthly_recurring_fee')}) + return n + + def _to_size(self,plan): + return NodeSize( + id=plan['pricing_plan_code'], + name=plan['pricing_plan_description'], + ram=plan['minimum_memory_mb'], + disk=plan['minimum_disk_gb'], + bandwidth=plan['minimum_data_transfer_allowance_gb'], + price=plan['monthly_recurring_amt']['amt_usd'], + driver=self.connection.driver + ) + + def _to_image(self,image): + return NodeImage(id=image['distro_code'], + name=image['distro_description'], + driver=self.connection.driver) + + def list_sizes(self, location=None): + # Returns a list of sizes (aka plans) + # Get plans. Note this is really just for libcloud. + # We are happy with any size. + if location == None: + location = ''; + else: + location = ";dc_location=%s" % (location.id) + + res = self.connection.request('/pricing-plans;server-type=VPS%s' % (location)).object + return map(lambda x : self._to_size(x), res['pricing_plan_infos']) + + def list_nodes(self): + # Returns a list of Nodes + # Will only include active ones. + res = self.connection.request('/orders;include_inactive=N').object + return map(lambda x : self._to_node(x), res['about_orders']) + + def list_images(self, location=None): + # Get all base images. + # TODO: add other image sources. (Such as a backup of a VPS) + # All Images are available for use at all locations + res = self.connection.request('/distributions').object + return map(lambda x : self._to_image(x), res['distro_infos']) + + def reboot_node(self, node): + # Reboot + # PUT the state of RESTARTING to restart a VPS. + # All data is encoded as JSON + data = {'reboot_request':{'running_state':'RESTARTING'}} + uri = self._order_uri(node,'vps/running-state') + self.connection.request(uri,data=json.dumps(data),method='PUT') + # XXX check that the response was actually successful + return True + + def destroy_node(self, node): + # Shutdown a VPS. + uri = self._order_uri(node,'vps') + self.connection.request(uri,method='DELETE') + # XXX check that the response was actually successful + return True + + def create_node(self, **kwargs): + """Creates a RimuHosting instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: Must be a FQDN. e.g example.com. + @type name: C{string} + + @keyword ex_billing_oid: If not set, a billing method is automatically picked. + @type ex_billing_oid: C{string} + + @keyword ex_host_server_oid: The host server to set the VPS up on. + @type ex_host_server_oid: C{string} + + @keyword ex_vps_order_oid_to_clone: Clone another VPS to use as the image for the new VPS. + @type ex_vps_order_oid_to_clone: C{string} + + @keyword ex_num_ips: Number of IPs to allocate. Defaults to 1. + @type ex_num_ips: C{int} + + @keyword ex_extra_ip_reason: Reason for needing the extra IPs. + @type ex_extra_ip_reason: C{string} + + @keyword ex_memory_mb: Memory to allocate to the VPS. + @type ex_memory_mb: C{int} + + @keyword ex_disk_space_mb: Diskspace to allocate to the VPS. Defaults to 4096 (4GB). + @type ex_disk_space_mb: C{int} + + @keyword ex_disk_space_2_mb: Secondary disk size allocation. Disabled by default. + @type ex_disk_space_2_mb: C{int} + + @keyword ex_control_panel: Control panel to install on the VPS. + @type ex_control_panel: C{string} + """ + # Note we don't do much error checking in this because we + # expect the API to error out if there is a problem. + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + data = { + 'instantiation_options':{ + 'domain_name': name, 'distro': image.id + }, + 'pricing_plan_code': size.id, + } + + if kwargs.has_key('ex_control_panel'): + data['instantiation_options']['control_panel'] = kwargs['ex_control_panel'] + + if kwargs.has_key('auth'): + auth = kwargs['auth'] + if not isinstance(auth, NodeAuthPassword): + raise ValueError('auth must be of NodeAuthPassword type') + data['instantiation_options']['password'] = auth.password + + if kwargs.has_key('ex_billing_oid'): + #TODO check for valid oid. + data['billing_oid'] = kwargs['ex_billing_oid'] + + if kwargs.has_key('ex_host_server_oid'): + data['host_server_oid'] = kwargs['ex_host_server_oid'] + + if kwargs.has_key('ex_vps_order_oid_to_clone'): + data['vps_order_oid_to_clone'] = kwargs['ex_vps_order_oid_to_clone'] + + if kwargs.has_key('ex_num_ips') and int(kwargs['ex_num_ips']) > 1: + if not kwargs.has_key('ex_extra_ip_reason'): + raise RimuHostingException('Need an reason for having an extra IP') + else: + if not data.has_key('ip_request'): + data['ip_request'] = {} + data['ip_request']['num_ips'] = int(kwargs['ex_num_ips']) + data['ip_request']['extra_ip_reason'] = kwargs['ex_extra_ip_reason'] + + if kwargs.has_key('ex_memory_mb'): + if not data.has_key('vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] + + if kwargs.has_key('ex_disk_space_mb'): + if not data.has_key('ex_vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['disk_space_mb'] = kwargs['ex_disk_space_mb'] + + if kwargs.has_key('ex_disk_space_2_mb'): + if not data.has_key('vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['disk_space_2_mb'] = kwargs['ex_disk_space_2_mb'] + + res = self.connection.request( + '/orders/new-vps', + method='POST', + data=json.dumps({"new-vps":data}) + ).object + node = self._to_node(res['about_order']) + node.extra['password'] = res['new_order_request']['instantiation_options']['password'] + return node + + def list_locations(self): + return [ + NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self), + NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self), + NodeLocation('DCLONDON', "RimuHosting London", 'GB', self), + NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self), + ] + + features = {"create_node": ["password"]} diff --git a/trunk/libcloud/compute/drivers/serverlove.py b/trunk/libcloud/compute/drivers/serverlove.py new file mode 100644 index 0000000000..4b27c640e7 --- /dev/null +++ b/trunk/libcloud/compute/drivers/serverlove.py @@ -0,0 +1,82 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +ServerLove Driver +""" + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver +from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection + + +# API end-points +API_ENDPOINTS = { + 'uk-1': { + 'name': 'United Kingdom, Manchester', + 'country': 'United Kingdom', + 'host': 'api.z1-man.serverlove.com' + } +} + +# Default API end-point for the base connection class. +DEFAULT_ENDPOINT = 'uk-1' + +# Retrieved from http://www.serverlove.com/cloud-server-faqs/api-questions/ +STANDARD_DRIVES = { + '679f5f44-0be7-4745-a658-cccd4334c1aa': { + 'uuid': '679f5f44-0be7-4745-a658-cccd4334c1aa', + 'description': 'CentOS 5.5', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '5f2e0e29-2937-42b9-b362-d2d07eddbdeb': { + 'uuid': '5f2e0e29-2937-42b9-b362-d2d07eddbdeb', + 'description': 'Ubuntu Linux 10.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '5795b68f-ed26-4639-b41d-c93235062b6b': { + 'uuid': '5795b68f-ed26-4639-b41d-c93235062b6b', + 'description': 'Debian Linux 5', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '41993a02-0b22-4e49-bb47-0aa8975217e4': { + 'uuid': '41993a02-0b22-4e49-bb47-0aa8975217e4', + 'description': 'Windows Server 2008 R2 Standard', + 'size_gunzipped': '15GB', + 'supports_deployment': False, + }, + '85623ca1-9c2a-4398-a771-9a43c347e86b': { + 'uuid': '85623ca1-9c2a-4398-a771-9a43c347e86b', + 'description': 'Windows Web Server 2008 R2', + 'size_gunzipped': '15GB', + 'supports_deployment': False, + } +} + + +class ServerLoveConnection(ElasticStackBaseConnection): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + + +class ServerLoveNodeDriver(ElasticStackBaseNodeDriver): + type = Provider.SERVERLOVE + api_name = 'serverlove' + name = 'ServerLove' + connectionCls = ServerLoveConnection + features = {'create_node': ['generates_password']} + _standard_drives = STANDARD_DRIVES diff --git a/trunk/libcloud/compute/drivers/skalicloud.py b/trunk/libcloud/compute/drivers/skalicloud.py new file mode 100644 index 0000000000..791d07729d --- /dev/null +++ b/trunk/libcloud/compute/drivers/skalicloud.py @@ -0,0 +1,82 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +skalicloud Driver +""" + +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver +from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection + + +# API end-points +API_ENDPOINTS = { + 'my-1': { + 'name': 'Malaysia, Kuala Lumpur', + 'country': 'Malaysia', + 'host': 'api.sdg-my.skalicloud.com' + } +} + +# Default API end-point for the base connection class. +DEFAULT_ENDPOINT = 'my-1' + +# Retrieved from http://www.skalicloud.com/cloud-api/ +STANDARD_DRIVES = { + '90aa51f2-15c0-4cff-81ee-e93aa20b9468': { + 'uuid': '90aa51f2-15c0-4cff-81ee-e93aa20b9468', + 'description': 'CentOS 5.5 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f': { + 'uuid': 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f', + 'description': 'Debian 5 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '3051699a-a536-4220-aeb5-67f2ec101a09': { + 'uuid': '3051699a-a536-4220-aeb5-67f2ec101a09', + 'description': 'Ubuntu Server 10.10 -64bit', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9': { + 'uuid': '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9', + 'description': 'Windows 2008R2 Web Edition', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '93bf390e-4f46-4252-a8bc-9d6d80e3f955': { + 'uuid': '93bf390e-4f46-4252-a8bc-9d6d80e3f955', + 'description': 'Windows Server 2008R2 Standard', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + } +} + + +class SkaliCloudConnection(ElasticStackBaseConnection): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + + +class SkaliCloudNodeDriver(ElasticStackBaseNodeDriver): + type = Provider.SKALICLOUD + api_name = 'skalicloud' + name = 'skalicloud' + connectionCls = SkaliCloudConnection + features = {"create_node": ["generates_password"]} + _standard_drives = STANDARD_DRIVES diff --git a/trunk/libcloud/compute/drivers/slicehost.py b/trunk/libcloud/compute/drivers/slicehost.py new file mode 100644 index 0000000000..d23ae5ef5a --- /dev/null +++ b/trunk/libcloud/compute/drivers/slicehost.py @@ -0,0 +1,232 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Slicehost Driver +""" +import base64 +import socket + +from xml.etree import ElementTree as ET +from xml.parsers.expat import ExpatError + +from libcloud.common.base import ConnectionKey, XmlResponse +from libcloud.compute.types import NodeState, Provider, InvalidCredsError +from libcloud.compute.base import NodeSize, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import Node, is_private_subnet + +class SlicehostResponse(XmlResponse): + def parse_error(self): + if self.status == 401: + raise InvalidCredsError(self.body) + + body = super(SlicehostResponse, self).parse_body() + try: + return "; ".join([ err.text + for err in + body.findall('error') ]) + except ExpatError: + return self.body + + +class SlicehostConnection(ConnectionKey): + """ + Connection class for the Slicehost driver + """ + + host = 'api.slicehost.com' + responseCls = SlicehostResponse + + def add_default_headers(self, headers): + headers['Authorization'] = ('Basic %s' + % (base64.b64encode('%s:' % self.key))) + return headers + + +class SlicehostNodeDriver(NodeDriver): + """ + Slicehost node driver + """ + + connectionCls = SlicehostConnection + + type = Provider.SLICEHOST + name = 'Slicehost' + + features = {"create_node": ["generates_password"]} + + NODE_STATE_MAP = { 'active': NodeState.RUNNING, + 'build': NodeState.PENDING, + 'reboot': NodeState.REBOOTING, + 'hard_reboot': NodeState.REBOOTING, + 'terminated': NodeState.TERMINATED } + + def list_nodes(self): + return self._to_nodes(self.connection.request('/slices.xml').object) + + def list_sizes(self, location=None): + return self._to_sizes(self.connection.request('/flavors.xml').object) + + def list_images(self, location=None): + return self._to_images(self.connection.request('/images.xml').object) + + def list_locations(self): + return [ + NodeLocation(0, 'Slicehost St. Louis (STL-A)', 'US', self), + NodeLocation(0, 'Slicehost St. Louis (STL-B)', 'US', self), + NodeLocation(0, 'Slicehost Dallas-Fort Worth (DFW-1)', 'US', self) + ] + + def create_node(self, **kwargs): + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + uri = '/slices.xml' + + # create a slice obj + root = ET.Element('slice') + el_name = ET.SubElement(root, 'name') + el_name.text = name + flavor_id = ET.SubElement(root, 'flavor-id') + flavor_id.text = str(size.id) + image_id = ET.SubElement(root, 'image-id') + image_id.text = str(image.id) + xml = ET.tostring(root) + + node = self._to_nodes( + self.connection.request( + uri, + method='POST', + data=xml, + headers={'Content-Type': 'application/xml'} + ).object + )[0] + return node + + def reboot_node(self, node): + """Reboot the node by passing in the node object""" + + # 'hard' could bubble up as kwarg depending on how reboot_node + # turns out. Defaulting to soft reboot. + #hard = False + #reboot = self.api.hard_reboot if hard else self.api.reboot + #expected_status = 'hard_reboot' if hard else 'reboot' + + uri = '/slices/%s/reboot.xml' % (node.id) + node = self._to_nodes( + self.connection.request(uri, method='PUT').object + )[0] + return node.state == NodeState.REBOOTING + + def destroy_node(self, node): + """Destroys the node + + Requires 'Allow Slices to be deleted or rebuilt from the API' to be + ticked at https://manage.slicehost.com/api, otherwise returns:: + + You must enable slice deletes in the SliceManager + Permission denied + + """ + uri = '/slices/%s/destroy.xml' % (node.id) + self.connection.request(uri, method='PUT') + return True + + def _to_nodes(self, object): + if object.tag == 'slice': + return [ self._to_node(object) ] + node_elements = object.findall('slice') + return [ self._to_node(el) for el in node_elements ] + + def _to_node(self, element): + + attrs = [ 'name', 'image-id', 'progress', 'id', 'bw-out', 'bw-in', + 'flavor-id', 'status', 'ip-address', 'root-password' ] + + node_attrs = {} + for attr in attrs: + node_attrs[attr] = element.findtext(attr) + + # slicehost does not determine between public and private, so we + # have to figure it out + public_ip = [] + private_ip = [] + + ip_address = element.findtext('ip-address') + if is_private_subnet(ip_address): + private_ip.append(ip_address) + else: + public_ip.append(ip_address) + + for addr in element.findall('addresses/address'): + ip = addr.text + try: + socket.inet_aton(ip) + except socket.error: + # not a valid ip + continue + if is_private_subnet(ip): + private_ip.append(ip) + else: + public_ip.append(ip) + + public_ip = list(set(public_ip)) + + try: + state = self.NODE_STATE_MAP[element.findtext('status')] + except: + state = NodeState.UNKNOWN + + # for consistency with other drivers, we put this in two places. + node_attrs['password'] = node_attrs['root-password'] + extra = {} + for k in node_attrs.keys(): + ek = k.replace("-", "_") + extra[ek] = node_attrs[k] + n = Node(id=element.findtext('id'), + name=element.findtext('name'), + state=state, + public_ip=public_ip, + private_ip=private_ip, + driver=self.connection.driver, + extra=extra) + return n + + def _to_sizes(self, object): + if object.tag == 'flavor': + return [ self._to_size(object) ] + elements = object.findall('flavor') + return [ self._to_size(el) for el in elements ] + + def _to_size(self, element): + s = NodeSize(id=int(element.findtext('id')), + name=str(element.findtext('name')), + ram=int(element.findtext('ram')), + disk=None, # XXX: needs hardcode + bandwidth=None, # XXX: needs hardcode + price=float(element.findtext('price'))/(100*24*30), + driver=self.connection.driver) + return s + + def _to_images(self, object): + if object.tag == 'image': + return [ self._to_image(object) ] + elements = object.findall('image') + return [ self._to_image(el) for el in elements ] + + def _to_image(self, element): + i = NodeImage(id=int(element.findtext('id')), + name=str(element.findtext('name')), + driver=self.connection.driver) + return i diff --git a/trunk/libcloud/compute/drivers/softlayer.py b/trunk/libcloud/compute/drivers/softlayer.py new file mode 100644 index 0000000000..e0153a9000 --- /dev/null +++ b/trunk/libcloud/compute/drivers/softlayer.py @@ -0,0 +1,442 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Softlayer driver +""" + +import time +import xmlrpclib + +import libcloud + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage + +DATACENTERS = { + 'sea01': {'country': 'US'}, + 'wdc01': {'country': 'US'}, + 'dal01': {'country': 'US'} +} + +NODE_STATE_MAP = { + 'RUNNING': NodeState.RUNNING, + 'HALTED': NodeState.TERMINATED, + 'PAUSED': NodeState.TERMINATED, +} + +DEFAULT_PACKAGE = 46 + +SL_IMAGES = [ + {'id': 1684, 'name': 'CentOS 5 - Minimal Install (32 bit)'}, + {'id': 1685, 'name': 'CentOS 5 - Minimal Install (64 bit)'}, + {'id': 1686, 'name': 'CentOS 5 - LAMP Install (32 bit)'}, + {'id': 1687, 'name': 'CentOS 5 - LAMP Install (64 bit)'}, + {'id': 1688, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (32 bit)'}, + {'id': 1689, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (64 bit)'}, + {'id': 1690, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (32 bit)'}, + {'id': 1691, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (64 bit)'}, + {'id': 1692, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit)'}, + {'id': 1693, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit)'}, + {'id': 1694, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (32 bit)'}, + {'id': 1695, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (64 bit)'}, + {'id': 1696, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit)'}, + {'id': 1697, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit)'}, + {'id': 1698, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (32 bit)'}, + {'id': 1699, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (64 bit)'}, + {'id': 1700, 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)'}, + {'id': 1701, 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)'}, + {'id': 1703, 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)'}, + {'id': 1705, 'name': 'Windows Server 2008 Standard Edition (64bit)'}, + {'id': 1715, 'name': 'Windows Server 2003 Datacenter SP2 (64 bit)'}, + {'id': 1716, 'name': 'Windows Server 2003 Datacenter SP2 (32 bit)'}, + {'id': 1742, 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)'}, + {'id': 1752, 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)'}, + {'id': 1756, 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)'}, + {'id': 1761, 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)'}, + {'id': 1766, 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)'}, + {'id': 1770, 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)'}, + {'id': 1857, 'name': 'Windows Server 2008 R2 Standard Edition (64bit)'}, + {'id': 1860, 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)'}, + {'id': 1863, 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)'}, +] + +""" +The following code snippet will print out all available "prices" + mask = { 'items': '' } + res = self.connection.request( + "SoftLayer_Product_Package", + "getObject", + res, + id=46, + object_mask=mask + ) + + from pprint import pprint; pprint(res) +""" +SL_TEMPLATES = { + 'sl1': { + 'imagedata': { + 'name': '2 x 2.0 GHz, 1GB ram, 100GB', + 'ram': 1024, + 'disk': 100, + 'bandwidth': None + }, + 'prices': [ + {'id': 1644}, # 1 GB + {'id': 1639}, # 100 GB (SAN) + {'id': 1963}, # Private 2 x 2.0 GHz Cores + {'id': 21}, # 1 IP Address + {'id': 55}, # Host Ping + {'id': 58}, # Automated Notification + {'id': 1800}, # 0 GB Bandwidth + {'id': 57}, # Email and Ticket + {'id': 274}, # 1000 Mbps Public & Private Networks + {'id': 905}, # Reboot / Remote Console + {'id': 418}, # Nessus Vulnerability Assessment & Reporting + {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account + ], + }, + 'sl2': { + 'imagedata': { + 'name': '2 x 2.0 GHz, 4GB ram, 350GB', + 'ram': 4096, + 'disk': 350, + 'bandwidth': None + }, + 'prices': [ + {'id': 1646}, # 4 GB + {'id': 1639}, # 100 GB (SAN) - This is the only available "First Disk" + {'id': 1638}, # 250 GB (SAN) + {'id': 1963}, # Private 2 x 2.0 GHz Cores + {'id': 21}, # 1 IP Address + {'id': 55}, # Host Ping + {'id': 58}, # Automated Notification + {'id': 1800}, # 0 GB Bandwidth + {'id': 57}, # Email and Ticket + {'id': 274}, # 1000 Mbps Public & Private Networks + {'id': 905}, # Reboot / Remote Console + {'id': 418}, # Nessus Vulnerability Assessment & Reporting + {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account + ], + } +} + +class SoftLayerException(LibcloudError): + """ + Exception class for SoftLayer driver + """ + pass + +class SoftLayerSafeTransport(xmlrpclib.SafeTransport): + pass + +class SoftLayerTransport(xmlrpclib.Transport): + pass + +class SoftLayerProxy(xmlrpclib.ServerProxy): + transportCls = (SoftLayerTransport, SoftLayerSafeTransport) + API_PREFIX = 'https://api.softlayer.com/xmlrpc/v3/' + + def __init__(self, service, user_agent, verbose=0): + cls = self.transportCls[0] + if SoftLayerProxy.API_PREFIX[:8] == "https://": + cls = self.transportCls[1] + t = cls(use_datetime=0) + t.user_agent = user_agent + xmlrpclib.ServerProxy.__init__( + self, + uri="%s/%s" % (SoftLayerProxy.API_PREFIX, service), + transport=t, + verbose=verbose + ) + +class SoftLayerConnection(object): + """ + Connection class for the SoftLayer driver + """ + + proxyCls = SoftLayerProxy + driver = None + + def __init__(self, user, key): + self.user = user + self.key = key + self.ua = [] + + def request(self, service, method, *args, **kwargs): + sl = self.proxyCls(service, self._user_agent()) + + headers = {} + headers.update(self._get_auth_headers()) + headers.update(self._get_init_params(service, kwargs.get('id'))) + headers.update(self._get_object_mask(service, kwargs.get('object_mask'))) + params = [{'headers': headers}] + list(args) + + try: + return getattr(sl, method)(*params) + except xmlrpclib.Fault, e: + if e.faultCode == "SoftLayer_Account": + raise InvalidCredsError(e.faultString) + raise SoftLayerException(e) + + def _user_agent(self): + return 'libcloud/%s (%s)%s' % ( + libcloud.__version__, + self.driver.name, + "".join([" (%s)" % x for x in self.ua])) + + def user_agent_append(self, s): + self.ua.append(s) + + def _get_auth_headers(self): + return { + 'authenticate': { + 'username': self.user, + 'apiKey': self.key + } + } + + def _get_init_params(self, service, id): + if id is not None: + return { + '%sInitParameters' % service: {'id': id} + } + else: + return {} + + def _get_object_mask(self, service, mask): + if mask is not None: + return { + '%sObjectMask' % service: {'mask': mask} + } + else: + return {} + +class SoftLayerNodeDriver(NodeDriver): + """ + SoftLayer node driver + + Extra node attributes: + - password: root password + - hourlyRecurringFee: hourly price (if applicable) + - recurringFee : flat rate (if applicable) + - recurringMonths : The number of months in which the recurringFee will be incurred. + """ + connectionCls = SoftLayerConnection + name = 'SoftLayer' + type = Provider.SOFTLAYER + + features = {"create_node": ["generates_password"]} + + def __init__(self, key, secret=None, secure=False): + self.key = key + self.secret = secret + self.connection = self.connectionCls(key, secret) + self.connection.driver = self + + def _to_node(self, host): + try: + password = host['softwareComponents'][0]['passwords'][0]['password'] + except (IndexError, KeyError): + password = None + + hourlyRecurringFee = host.get('billingItem', {}).get('hourlyRecurringFee', 0) + recurringFee = host.get('billingItem', {}).get('recurringFee', 0) + recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) + + return Node( + id=host['id'], + name=host['hostname'], + state=NODE_STATE_MAP.get( + host['powerState']['keyName'], + NodeState.UNKNOWN + ), + public_ip=[host['primaryIpAddress']], + private_ip=[host['primaryBackendIpAddress']], + driver=self, + extra={ + 'password': password, + 'hourlyRecurringFee': hourlyRecurringFee, + 'recurringFee': recurringFee, + 'recurringMonths': recurringMonths, + } + ) + + def _to_nodes(self, hosts): + return [self._to_node(h) for h in hosts] + + def destroy_node(self, node): + billing_item = self.connection.request( + "SoftLayer_Virtual_Guest", + "getBillingItem", + id=node.id + ) + + if billing_item: + res = self.connection.request( + "SoftLayer_Billing_Item", + "cancelService", + id=billing_item['id'] + ) + return res + else: + return False + + def _get_order_information(self, order_id, timeout=1200, check_interval=5): + mask = { + 'orderTopLevelItems': { + 'billingItem': { + 'resource': { + 'softwareComponents': { + 'passwords': '' + }, + 'powerState': '', + } + }, + } + } + + for i in range(0, timeout, check_interval): + try: + res = self.connection.request( + "SoftLayer_Billing_Order", + "getObject", + id=order_id, + object_mask=mask + ) + item = res['orderTopLevelItems'][0]['billingItem']['resource'] + if item['softwareComponents'][0]['passwords']: + return item + + except (KeyError, IndexError): + pass + + time.sleep(check_interval) + + return None + + def create_node(self, **kwargs): + """Create a new SoftLayer node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_domain: e.g. libcloud.org + @type ex_domain: C{string} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + domain = kwargs.get('ex_domain') + location = kwargs['location'] + if domain == None: + if name.find(".") != -1: + domain = name[name.find('.')+1:] + + if domain == None: + # TODO: domain is a required argument for the Sofylayer API, but it + # it shouldn't be. + domain = "exmaple.com" + + res = {'prices': SL_TEMPLATES[size.id]['prices']} + res['packageId'] = DEFAULT_PACKAGE + res['prices'].append({'id': image.id}) # Add OS to order + res['location'] = location.id + res['complexType'] = 'SoftLayer_Container_Product_Order_Virtual_Guest' + res['quantity'] = 1 + res['useHourlyPricing'] = True + res['virtualGuests'] = [ + { + 'hostname': name, + 'domain': domain + } + ] + + res = self.connection.request( + "SoftLayer_Product_Order", + "placeOrder", + res + ) + + order_id = res['orderId'] + raw_node = self._get_order_information(order_id) + + return self._to_node(raw_node) + + def _to_image(self, img): + return NodeImage( + id=img['id'], + name=img['name'], + driver=self.connection.driver + ) + + def list_images(self, location=None): + return [self._to_image(i) for i in SL_IMAGES] + + def _to_size(self, id, size): + return NodeSize( + id=id, + name=size['name'], + ram=size['ram'], + disk=size['disk'], + bandwidth=size['bandwidth'], + price=None, + driver=self.connection.driver, + ) + + def list_sizes(self, location=None): + return [self._to_size(id, s['imagedata']) for id, s in SL_TEMPLATES.iteritems()] + + def _to_loc(self, loc): + return NodeLocation( + id=loc['id'], + name=loc['name'], + country=DATACENTERS[loc['name']]['country'], + driver=self + ) + + def list_locations(self): + res = self.connection.request( + "SoftLayer_Location_Datacenter", + "getDatacenters" + ) + + # checking "in DATACENTERS", because some of the locations returned by getDatacenters are not useable. + return [self._to_loc(l) for l in res if l['name'] in DATACENTERS] + + def list_nodes(self): + mask = { + 'virtualGuests': { + 'powerState': '', + 'softwareComponents': { + 'passwords': '' + }, + 'billingItem': '', + }, + } + res = self.connection.request( + "SoftLayer_Account", + "getVirtualGuests", + object_mask=mask + ) + nodes = self._to_nodes(res) + return nodes + + def reboot_node(self, node): + res = self.connection.request( + "SoftLayer_Virtual_Guest", + "rebootHard", + id=node.id + ) + return res diff --git a/trunk/libcloud/compute/drivers/vcloud.py b/trunk/libcloud/compute/drivers/vcloud.py new file mode 100644 index 0000000000..47eb848ce2 --- /dev/null +++ b/trunk/libcloud/compute/drivers/vcloud.py @@ -0,0 +1,613 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +VMware vCloud driver. +""" +import base64 +import httplib +import time + +from urlparse import urlparse +from xml.etree import ElementTree as ET +from xml.parsers.expat import ExpatError + +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver, NodeLocation +from libcloud.compute.base import NodeSize, NodeImage, NodeAuthPassword + +""" +From vcloud api "The VirtualQuantity element defines the number of MB +of memory. This should be either 512 or a multiple of 1024 (1 GB)." +""" +VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1,9)] + +DEFAULT_TASK_COMPLETION_TIMEOUT = 600 + +def get_url_path(url): + return urlparse(url.strip()).path + +def fixxpath(root, xpath): + """ElementTree wants namespaces in its xpaths, so here we add them.""" + namespace, root_tag = root.tag[1:].split("}", 1) + fixed_xpath = "/".join(["{%s}%s" % (namespace, e) + for e in xpath.split("/")]) + return fixed_xpath + +class InstantiateVAppXML(object): + + def __init__(self, name, template, net_href, cpus, memory, + password=None, row=None, group=None): + self.name = name + self.template = template + self.net_href = net_href + self.cpus = cpus + self.memory = memory + self.password = password + self.row = row + self.group = group + + self._build_xmltree() + + def tostring(self): + return ET.tostring(self.root) + + def _build_xmltree(self): + self.root = self._make_instantiation_root() + + self._add_vapp_template(self.root) + instantionation_params = ET.SubElement(self.root, + "InstantiationParams") + + # product and virtual hardware + self._make_product_section(instantionation_params) + self._make_virtual_hardware(instantionation_params) + + network_config_section = ET.SubElement(instantionation_params, + "NetworkConfigSection") + + network_config = ET.SubElement(network_config_section, + "NetworkConfig") + self._add_network_association(network_config) + + def _make_instantiation_root(self): + return ET.Element( + "InstantiateVAppTemplateParams", + {'name': self.name, + 'xml:lang': 'en', + 'xmlns': "http://www.vmware.com/vcloud/v0.8", + 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} + ) + + def _add_vapp_template(self, parent): + return ET.SubElement( + parent, + "VAppTemplate", + {'href': self.template} + ) + + def _make_product_section(self, parent): + prod_section = ET.SubElement( + parent, + "ProductSection", + {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8", + 'xmlns:ovf': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + + if self.password: + self._add_property(prod_section, 'password', self.password) + + if self.row: + self._add_property(prod_section, 'row', self.row) + + if self.group: + self._add_property(prod_section, 'group', self.group) + + return prod_section + + def _add_property(self, parent, ovfkey, ovfvalue): + return ET.SubElement( + parent, + "Property", + {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1', + 'ovf:key': ovfkey, + 'ovf:value': ovfvalue} + ) + + def _make_virtual_hardware(self, parent): + vh = ET.SubElement( + parent, + "VirtualHardwareSection", + {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8"} + ) + + self._add_cpu(vh) + self._add_memory(vh) + + return vh + + def _add_cpu(self, parent): + cpu_item = ET.SubElement( + parent, + "Item", + {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + self._add_instance_id(cpu_item, '1') + self._add_resource_type(cpu_item, '3') + self._add_virtual_quantity(cpu_item, self.cpus) + + return cpu_item + + def _add_memory(self, parent): + mem_item = ET.SubElement( + parent, + "Item", + {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + self._add_instance_id(mem_item, '2') + self._add_resource_type(mem_item, '4') + self._add_virtual_quantity(mem_item, self.memory) + + return mem_item + + def _add_instance_id(self, parent, id): + elm = ET.SubElement( + parent, + "InstanceID", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = id + return elm + + def _add_resource_type(self, parent, type): + elm = ET.SubElement( + parent, + "ResourceType", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = type + return elm + + def _add_virtual_quantity(self, parent, amount): + elm = ET.SubElement( + parent, + "VirtualQuantity", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = amount + return elm + + def _add_network_association(self, parent): + return ET.SubElement( + parent, + "NetworkAssociation", + {'href': self.net_href} + ) + +class VCloudResponse(XmlResponse): + + def success(self): + return self.status in (httplib.OK, httplib.CREATED, + httplib.NO_CONTENT, httplib.ACCEPTED) + +class VCloudConnection(ConnectionUserAndKey): + """ + Connection class for the vCloud driver + """ + + responseCls = VCloudResponse + token = None + host = None + + def request(self, *args, **kwargs): + self._get_auth_token() + return super(VCloudConnection, self).request(*args, **kwargs) + + def check_org(self): + # the only way to get our org is by logging in. + self._get_auth_token() + + def _get_auth_headers(self): + """Some providers need different headers than others""" + return { + 'Authorization': + "Basic %s" + % base64.b64encode('%s:%s' % (self.user_id, self.key)), + 'Content-Length': 0 + } + + def _get_auth_token(self): + if not self.token: + conn = self.conn_classes[self.secure](self.host, + self.port) + conn.request(method='POST', url='/api/v0.8/login', + headers=self._get_auth_headers()) + + resp = conn.getresponse() + headers = dict(resp.getheaders()) + body = ET.XML(resp.read()) + + try: + self.token = headers['set-cookie'] + except KeyError: + raise InvalidCredsError() + + self.driver.org = get_url_path( + body.find(fixxpath(body, 'Org')).get('href') + ) + + def add_default_headers(self, headers): + headers['Cookie'] = self.token + return headers + +class VCloudNodeDriver(NodeDriver): + """ + vCloud node driver + """ + + type = Provider.VCLOUD + name = "vCloud" + connectionCls = VCloudConnection + org = None + _vdcs = None + + NODE_STATE_MAP = {'0': NodeState.PENDING, + '1': NodeState.PENDING, + '2': NodeState.PENDING, + '3': NodeState.PENDING, + '4': NodeState.RUNNING} + + @property + def vdcs(self): + if not self._vdcs: + self.connection.check_org() # make sure the org is set. + res = self.connection.request(self.org) + self._vdcs = [ + get_url_path(i.get('href')) + for i + in res.object.findall(fixxpath(res.object, "Link")) + if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml' + ] + + return self._vdcs + + @property + def networks(self): + networks = [] + for vdc in self.vdcs: + res = self.connection.request(vdc).object + networks.extend( + [network + for network in res.findall( + fixxpath(res, "AvailableNetworks/Network") + )] + ) + + return networks + + def _to_image(self, image): + image = NodeImage(id=image.get('href'), + name=image.get('name'), + driver=self.connection.driver) + return image + + def _to_node(self, name, elm): + state = self.NODE_STATE_MAP[elm.get('status')] + public_ips = [] + private_ips = [] + + # Following code to find private IPs works for Terremark + connections = elm.findall('{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection/{http://www.vmware.com/vcloud/v0.8}NetworkConnection') + for connection in connections: + ips = [ip.text + for ip + in connection.findall(fixxpath(elm, "IpAddress"))] + if connection.get('Network') == 'Internal': + private_ips.extend(ips) + else: + public_ips.extend(ips) + + node = Node(id=elm.get('href'), + name=name, + state=state, + public_ip=public_ips, + private_ip=private_ips, + driver=self.connection.driver) + + return node + + def _get_catalog_hrefs(self): + res = self.connection.request(self.org) + catalogs = [ + get_url_path(i.get('href')) + for i in res.object.findall(fixxpath(res.object, "Link")) + if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml' + ] + + return catalogs + + def _wait_for_task_completion(self, task_href, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + start_time = time.time() + res = self.connection.request(task_href) + status = res.object.get('status') + while status != 'success': + if status == 'error': + raise Exception("Error status returned by task %s." + % task_href) + if status == 'canceled': + raise Exception("Canceled status returned by task %s." + % task_href) + if (time.time() - start_time >= timeout): + raise Exception("Timeout while waiting for task %s." + % task_href) + time.sleep(5) + res = self.connection.request(task_href) + status = res.object.get('status') + + def destroy_node(self, node): + node_path = get_url_path(node.id) + # blindly poweroff node, it will throw an exception if already off + try: + res = self.connection.request('%s/power/action/poweroff' + % node_path, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + except Exception: + pass + + try: + res = self.connection.request('%s/action/undeploy' % node_path, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + except ExpatError: + # The undeploy response is malformed XML atm. + # We can remove this whent he providers fix the problem. + pass + except Exception: + # Some vendors don't implement undeploy at all yet, + # so catch this and move on. + pass + + res = self.connection.request(node_path, method='DELETE') + return res.status == 202 + + def reboot_node(self, node): + res = self.connection.request('%s/power/action/reset' + % get_url_path(node.id), + method='POST') + return res.status == 202 or res.status == 204 + + def list_nodes(self): + nodes = [] + for vdc in self.vdcs: + res = self.connection.request(vdc) + elms = res.object.findall(fixxpath( + res.object, "ResourceEntities/ResourceEntity") + ) + vapps = [ + (i.get('name'), get_url_path(i.get('href'))) + for i in elms + if i.get('type') + == 'application/vnd.vmware.vcloud.vApp+xml' + and i.get('name') + ] + + for vapp_name, vapp_href in vapps: + res = self.connection.request( + vapp_href, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.vApp+xml' + } + ) + nodes.append(self._to_node(vapp_name, res.object)) + + return nodes + + def _to_size(self, ram): + ns = NodeSize( + id=None, + name="%s Ram" % ram, + ram=ram, + disk=None, + bandwidth=None, + price=None, + driver=self.connection.driver + ) + return ns + + def list_sizes(self, location=None): + sizes = [self._to_size(i) for i in VIRTUAL_MEMORY_VALS] + return sizes + + def _get_catalogitems_hrefs(self, catalog): + """Given a catalog href returns contained catalog item hrefs""" + res = self.connection.request( + catalog, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.catalog+xml' + } + ).object + + cat_items = res.findall(fixxpath(res, "CatalogItems/CatalogItem")) + cat_item_hrefs = [i.get('href') + for i in cat_items + if i.get('type') == + 'application/vnd.vmware.vcloud.catalogItem+xml'] + + return cat_item_hrefs + + def _get_catalogitem(self, catalog_item): + """Given a catalog item href returns elementree""" + res = self.connection.request( + catalog_item, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.catalogItem+xml' + } + ).object + + return res + + def list_images(self, location=None): + images = [] + for vdc in self.vdcs: + res = self.connection.request(vdc).object + res_ents = res.findall(fixxpath( + res, "ResourceEntities/ResourceEntity") + ) + images += [ + self._to_image(i) + for i in res_ents + if i.get('type') == + 'application/vnd.vmware.vcloud.vAppTemplate+xml' + ] + + for catalog in self._get_catalog_hrefs(): + for cat_item in self._get_catalogitems_hrefs(catalog): + res = self._get_catalogitem(cat_item) + res_ents = res.findall(fixxpath(res, 'Entity')) + images += [ + self._to_image(i) + for i in res_ents + if i.get('type') == + 'application/vnd.vmware.vcloud.vAppTemplate+xml' + ] + + return images + + def create_node(self, **kwargs): + """Creates and returns node. + + + See L{NodeDriver.create_node} for more keyword args. + + Non-standard optional keyword arguments: + @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7" + @type ex_network: C{string} + + @keyword ex_vdc: link to a "VDC" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/vdc/1" + @type ex_vdc: C{string} + + @keyword ex_cpus: number of virtual cpus (limit depends on provider) + @type ex_cpus: C{int} + + @keyword row: ???? + @type row: C{????} + + @keyword group: ???? + @type group: C{????} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + # Some providers don't require a network link + try: + network = kwargs.get('ex_network', self.networks[0].get('href')) + except IndexError: + network = '' + + password = None + if kwargs.has_key('auth'): + auth = kwargs['auth'] + if isinstance(auth, NodeAuthPassword): + password = auth.password + else: + raise ValueError('auth must be of NodeAuthPassword type') + + instantiate_xml = InstantiateVAppXML( + name=name, + template=image.id, + net_href=network, + cpus=str(kwargs.get('ex_cpus', 1)), + memory=str(size.ram), + password=password, + row=kwargs.get('ex_row', None), + group=kwargs.get('ex_group', None) + ) + + # Instantiate VM and get identifier. + res = self.connection.request( + '%s/action/instantiateVAppTemplate' + % kwargs.get('vdc', self.vdcs[0]), + data=instantiate_xml.tostring(), + method='POST', + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' + } + ) + vapp_name = res.object.get('name') + vapp_href = get_url_path(res.object.get('href')) + + # Deploy the VM from the identifier. + res = self.connection.request('%s/action/deploy' % vapp_href, + method='POST') + + self._wait_for_task_completion(res.object.get('href')) + + # Power on the VM. + res = self.connection.request('%s/power/action/powerOn' % vapp_href, + method='POST') + + res = self.connection.request(vapp_href) + node = self._to_node(vapp_name, res.object) + + return node + + features = {"create_node": ["password"]} + +class HostingComConnection(VCloudConnection): + """ + vCloud connection subclass for Hosting.com + """ + + host = "vcloud.safesecureweb.com" + + def _get_auth_headers(self): + """hosting.com doesn't follow the standard vCloud authentication API""" + return { + 'Authentication': + base64.b64encode('%s:%s' % (self.user_id, self.key)), + 'Content-Length': 0 + } + +class HostingComDriver(VCloudNodeDriver): + """ + vCloud node driver for Hosting.com + """ + connectionCls = HostingComConnection + +class TerremarkConnection(VCloudConnection): + """ + vCloud connection subclass for Terremark + """ + + host = "services.vcloudexpress.terremark.com" + +class TerremarkDriver(VCloudNodeDriver): + """ + vCloud node driver for Terremark + """ + + connectionCls = TerremarkConnection + + def list_locations(self): + return [NodeLocation(0, "Terremark Texas", 'US', self)] diff --git a/trunk/libcloud/compute/drivers/voxel.py b/trunk/libcloud/compute/drivers/voxel.py new file mode 100644 index 0000000000..390d8187d6 --- /dev/null +++ b/trunk/libcloud/compute/drivers/voxel.py @@ -0,0 +1,307 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Voxel VoxCloud driver +""" +import datetime +import hashlib + +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +VOXEL_API_HOST = "api.voxel.net" + +class VoxelResponse(XmlResponse): + + def __init__(self, response, connection): + self.parsed = None + super(VoxelResponse, self).__init__(response=response, + connection=connection) + + def parse_body(self): + if not self.body: + return None + if not self.parsed: + self.parsed = super(VoxelResponse, self).parse_body() + return self.parsed + + def parse_error(self): + err_list = [] + if not self.body: + return None + if not self.parsed: + self.parsed = super(VoxelResponse, self).parse_body() + for err in self.parsed.findall('err'): + code = err.get('code') + err_list.append("(%s) %s" % (code, err.get('msg'))) + # From voxel docs: + # 1: Invalid login or password + # 9: Permission denied: user lacks access rights for this method + if code == "1" or code == "9": + # sucks, but only way to detect + # bad authentication tokens so far + raise InvalidCredsError(err_list[-1]) + return "\n".join(err_list) + + def success(self): + if not self.parsed: + self.parsed = super(VoxelResponse, self).parse_body() + stat = self.parsed.get('stat') + if stat != "ok": + return False + return True + +class VoxelConnection(ConnectionUserAndKey): + """ + Connection class for the Voxel driver + """ + + host = VOXEL_API_HOST + responseCls = VoxelResponse + + def add_default_params(self, params): + params["key"] = self.user_id + params["timestamp"] = datetime.datetime.utcnow().isoformat()+"+0000" + + for param in params.keys(): + if params[param] is None: + del params[param] + + keys = params.keys() + keys.sort() + + md5 = hashlib.md5() + md5.update(self.key) + for key in keys: + if params[key]: + if not params[key] is None: + md5.update("%s%s"% (key, params[key])) + else: + md5.update(key) + params['api_sig'] = md5.hexdigest() + return params + +VOXEL_INSTANCE_TYPES = {} +RAM_PER_CPU = 2048 + +NODE_STATE_MAP = { + 'IN_PROGRESS': NodeState.PENDING, + 'QUEUED': NodeState.PENDING, + 'SUCCEEDED': NodeState.RUNNING, + 'shutting-down': NodeState.TERMINATED, + 'terminated': NodeState.TERMINATED, + 'unknown': NodeState.UNKNOWN, +} + +class VoxelNodeDriver(NodeDriver): + """ + Voxel VoxCLOUD node driver + """ + + connectionCls = VoxelConnection + type = Provider.VOXEL + name = 'Voxel VoxCLOUD' + + def _initialize_instance_types(): + for cpus in range(1,14): + if cpus == 1: + name = "Single CPU" + else: + name = "%d CPUs" % cpus + id = "%dcpu" % cpus + ram = cpus * RAM_PER_CPU + + VOXEL_INSTANCE_TYPES[id]= { + 'id': id, + 'name': name, + 'ram': ram, + 'disk': None, + 'bandwidth': None, + 'price': None} + + features = {"create_node": [], + "list_sizes": ["variable_disk"]} + + _initialize_instance_types() + + def list_nodes(self): + params = {"method": "voxel.devices.list"} + result = self.connection.request('/', params=params).object + return self._to_nodes(result) + + def list_sizes(self, location=None): + return [ NodeSize(driver=self.connection.driver, **i) + for i in VOXEL_INSTANCE_TYPES.values() ] + + def list_images(self, location=None): + params = {"method": "voxel.images.list"} + result = self.connection.request('/', params=params).object + return self._to_images(result) + + def create_node(self, **kwargs): + """Create Voxel Node + + @keyword name: the name to assign the node (mandatory) + @type name: C{str} + + @keyword image: distribution to deploy + @type image: L{NodeImage} + + @keyword size: the plan size to create (mandatory) + Requires size.disk (GB) to be set manually + @type size: L{NodeSize} + + @keyword location: which datacenter to create the node in + @type location: L{NodeLocation} + + @keyword ex_privateip: Backend IP address to assign to node; + must be chosen from the customer's + private VLAN assignment. + @type ex_privateip: C{str} + + @keyword ex_publicip: Public-facing IP address to assign to node; + must be chosen from the customer's + public VLAN assignment. + @type ex_publicip: C{str} + + @keyword ex_rootpass: Password for root access; generated if unset. + @type ex_rootpass: C{str} + + @keyword ex_consolepass: Password for remote console; + generated if unset. + @type ex_consolepass: C{str} + + @keyword ex_sshuser: Username for SSH access + @type ex_sshuser: C{str} + + @keyword ex_sshpass: Password for SSH access; generated if unset. + @type ex_sshpass: C{str} + + @keyword ex_voxel_access: Allow access Voxel administrative access. + Defaults to False. + @type ex_voxel_access: C{bool} + """ + + # assert that disk > 0 + if not kwargs["size"].disk: + raise ValueError("size.disk must be non-zero") + + # convert voxel_access to string boolean if needed + voxel_access = kwargs.get("ex_voxel_access", None) + if voxel_access is not None: + voxel_access = "true" if voxel_access else "false" + + params = { + 'method': 'voxel.voxcloud.create', + 'hostname': kwargs["name"], + 'disk_size': int(kwargs["size"].disk), + 'facility': kwargs["location"].id, + 'image_id': kwargs["image"].id, + 'processing_cores': kwargs["size"].ram / RAM_PER_CPU, + 'backend_ip': kwargs.get("ex_privateip", None), + 'frontend_ip': kwargs.get("ex_publicip", None), + 'admin_password': kwargs.get("ex_rootpass", None), + 'console_password': kwargs.get("ex_consolepass", None), + 'ssh_username': kwargs.get("ex_sshuser", None), + 'ssh_password': kwargs.get("ex_sshpass", None), + 'voxel_access': voxel_access, + } + + object = self.connection.request('/', params=params).object + + if self._getstatus(object): + return Node( + id = object.findtext("device/id"), + name = kwargs["name"], + state = NODE_STATE_MAP[object.findtext("device/status")], + public_ip = kwargs.get("publicip", None), + private_ip = kwargs.get("privateip", None), + driver = self.connection.driver + ) + else: + return None + + def reboot_node(self, node): + """ + Reboot the node by passing in the node object + """ + params = {'method': 'voxel.devices.power', + 'device_id': node.id, + 'power_action': 'reboot'} + return self._getstatus(self.connection.request('/', params=params).object) + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + params = {'method': 'voxel.voxcloud.delete', + 'device_id': node.id} + return self._getstatus(self.connection.request('/', params=params).object) + + def list_locations(self): + params = {"method": "voxel.voxcloud.facilities.list"} + result = self.connection.request('/', params=params).object + nodes = self._to_locations(result) + return nodes + + def _getstatus(self, element): + status = element.attrib["stat"] + return status == "ok" + + + def _to_locations(self, object): + return [NodeLocation(element.attrib["label"], + element.findtext("description"), + element.findtext("description"), + self) + for element in object.findall('facilities/facility')] + + def _to_nodes(self, object): + nodes = [] + for element in object.findall('devices/device'): + if element.findtext("type") == "Virtual Server": + try: + state = self.NODE_STATE_MAP[element.attrib['status']] + except KeyError: + state = NodeState.UNKNOWN + + public_ip = private_ip = None + ipassignments = element.findall("ipassignments/ipassignment") + for ip in ipassignments: + if ip.attrib["type"] =="frontend": + public_ip = ip.text + elif ip.attrib["type"] == "backend": + private_ip = ip.text + + nodes.append(Node(id= element.attrib['id'], + name=element.attrib['label'], + state=state, + public_ip= public_ip, + private_ip= private_ip, + driver=self.connection.driver)) + return nodes + + def _to_images(self, object): + images = [] + for element in object.findall("images/image"): + images.append(NodeImage(id = element.attrib["id"], + name = element.attrib["summary"], + driver = self.connection.driver)) + return images diff --git a/trunk/libcloud/compute/drivers/vpsnet.py b/trunk/libcloud/compute/drivers/vpsnet.py new file mode 100644 index 0000000000..c223dbbea6 --- /dev/null +++ b/trunk/libcloud/compute/drivers/vpsnet.py @@ -0,0 +1,183 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +VPS.net driver +""" +import base64 + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.base import ConnectionUserAndKey, JsonResponse +from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +API_HOST = 'api.vps.net' +API_VERSION = 'api10json' + +RAM_PER_NODE = 256 +DISK_PER_NODE = 10 +BANDWIDTH_PER_NODE = 250 + + +class VPSNetResponse(JsonResponse): + + def parse_body(self): + try: + return super(VPSNetResponse, self).parse_body() + except MalformedResponseError: + return self.body + + def success(self): + # vps.net wrongly uses 406 for invalid auth creds + if self.status == 406 or self.status == 403: + raise InvalidCredsError() + return True + + def parse_error(self): + try: + errors = super(VPSNetResponse, self).parse_body()['errors'][0] + except MalformedResponseError: + return self.body + else: + return "\n".join(errors) + +class VPSNetConnection(ConnectionUserAndKey): + """ + Connection class for the VPS.net driver + """ + + host = API_HOST + responseCls = VPSNetResponse + + def add_default_headers(self, headers): + user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + headers['Authorization'] = 'Basic %s' % (user_b64) + return headers + +class VPSNetNodeDriver(NodeDriver): + """ + VPS.net node driver + """ + + type = Provider.VPSNET + api_name = 'vps_net' + name = "vps.net" + connectionCls = VPSNetConnection + + def _to_node(self, vm): + if vm['running']: + state = NodeState.RUNNING + else: + state = NodeState.PENDING + + n = Node(id=vm['id'], + name=vm['label'], + state=state, + public_ip=[vm.get('primary_ip_address', None)], + private_ip=[], + extra={'slices_count':vm['slices_count']}, # Number of nodes consumed by VM + driver=self.connection.driver) + return n + + def _to_image(self, image, cloud): + image = NodeImage(id=image['id'], + name="%s: %s" % (cloud, image['label']), + driver=self.connection.driver) + + return image + + def _to_size(self, num): + size = NodeSize(id=num, + name="%d Node" % (num,), + ram=RAM_PER_NODE * num, + disk=DISK_PER_NODE, + bandwidth=BANDWIDTH_PER_NODE * num, + price=self._get_price_per_node(num) * num, + driver=self.connection.driver) + return size + + def _get_price_per_node(self, num): + single_node_price = self._get_size_price(size_id='1') + return num * single_node_price + + def create_node(self, name, image, size, **kwargs): + """Create a new VPS.net node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_backups_enabled: Enable automatic backups + @type ex_backups_enabled: C{bool} + + @keyword ex_fqdn: Fully Qualified domain of the node + @type ex_fqdn: C{string} + """ + headers = {'Content-Type': 'application/json'} + request = {'virtual_machine': + {'label': name, + 'fqdn': kwargs.get('ex_fqdn', ''), + 'system_template_id': image.id, + 'backups_enabled': kwargs.get('ex_backups_enabled', 0), + 'slices_required': size.id}} + + res = self.connection.request('/virtual_machines.%s' % (API_VERSION,), + data=json.dumps(request), + headers=headers, + method='POST') + node = self._to_node(res.object['virtual_machine']) + return node + + def reboot_node(self, node): + res = self.connection.request('/virtual_machines/%s/%s.%s' % + (node.id, 'reboot', API_VERSION), + method="POST") + node = self._to_node(res.object['virtual_machine']) + return True + + def list_sizes(self, location=None): + res = self.connection.request('/nodes.%s' % (API_VERSION,)) + available_nodes = len([size for size in res.object + if size['slice']['virtual_machine_id']]) + sizes = [self._to_size(i) for i in range(1, available_nodes + 1)] + return sizes + + def destroy_node(self, node): + res = self.connection.request('/virtual_machines/%s.%s' + % (node.id, API_VERSION), + method='DELETE') + return res.status == 200 + + def list_nodes(self): + res = self.connection.request('/virtual_machines.%s' % (API_VERSION,)) + return [self._to_node(i['virtual_machine']) for i in res.object] + + def list_images(self, location=None): + res = self.connection.request('/available_clouds.%s' % (API_VERSION,)) + + images = [] + for cloud in res.object: + label = cloud['cloud']['label'] + templates = cloud['cloud']['system_templates'] + images.extend([self._to_image(image, label) + for image in templates]) + + return images + + def list_locations(self): + return [NodeLocation(0, "VPS.net Western US", 'US', self)] diff --git a/trunk/libcloud/compute/providers.py b/trunk/libcloud/compute/providers.py new file mode 100644 index 0000000000..9a46c25e55 --- /dev/null +++ b/trunk/libcloud/compute/providers.py @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provider related utilities +""" + +from libcloud.utils import get_driver as _get_provider_driver +from libcloud.compute.types import Provider + +__all__ = [ + "Provider", + "DRIVERS", + "get_driver"] + +DRIVERS = { + Provider.DUMMY: + ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), + Provider.EC2_US_EAST: + ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'), + Provider.EC2_EU_WEST: + ('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'), + Provider.EC2_US_WEST: + ('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'), + Provider.EC2_US_WEST_OREGON: + ('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'), + Provider.EC2_AP_SOUTHEAST: + ('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'), + Provider.EC2_AP_NORTHEAST: + ('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'), + Provider.ECP: + ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'), + Provider.ELASTICHOSTS_UK1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'), + Provider.ELASTICHOSTS_UK2: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'), + Provider.ELASTICHOSTS_US1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'), + Provider.SKALICLOUD: + ('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'), + Provider.SERVERLOVE: + ('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'), + Provider.CLOUDSIGMA: + ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaZrhNodeDriver'), + Provider.GOGRID: + ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), + Provider.RACKSPACE: + ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), + Provider.RACKSPACE_UK: + ('libcloud.compute.drivers.rackspace', 'RackspaceUKNodeDriver'), + Provider.SLICEHOST: + ('libcloud.compute.drivers.slicehost', 'SlicehostNodeDriver'), + Provider.VPSNET: + ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), + Provider.LINODE: + ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'), + Provider.RIMUHOSTING: + ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'), + Provider.VOXEL: + ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'), + Provider.SOFTLAYER: + ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'), + Provider.EUCALYPTUS: + ('libcloud.compute.drivers.ec2', 'EucNodeDriver'), + Provider.IBM: + ('libcloud.compute.drivers.ibm_sbc', 'IBMNodeDriver'), + Provider.OPENNEBULA: + ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'), + Provider.DREAMHOST: + ('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'), + Provider.BRIGHTBOX: + ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'), + Provider.NIMBUS: + ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'), + Provider.BLUEBOX: + ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'), + Provider.GANDI: + ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'), + Provider.OPSOURCE: + ('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'), + Provider.OPENSTACK: + ('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'), + Provider.NINEFOLD: + ('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'), + Provider.TERREMARK: + ('libcloud.compute.drivers.vcloud', 'TerremarkDriver') +} + +def get_driver(provider): + return _get_provider_driver(DRIVERS, provider) diff --git a/trunk/libcloud/compute/ssh.py b/trunk/libcloud/compute/ssh.py new file mode 100644 index 0000000000..0f1b81da5b --- /dev/null +++ b/trunk/libcloud/compute/ssh.py @@ -0,0 +1,200 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Wraps multiple ways to communicate over SSH +""" +have_paramiko = False + +try: + import paramiko + have_paramiko = True +except ImportError: + pass + +# Depending on your version of Paramiko, it may cause a deprecation +# warning on Python 2.6. +# Ref: https://bugs.launchpad.net/paramiko/+bug/392973 + +from os.path import split as psplit + +class BaseSSHClient(object): + """ + Base class representing a connection over SSH/SCP to a remote node. + """ + + def __init__(self, hostname, port=22, username='root', password=None, + key=None, timeout=None): + """ + @type hostname: C{str} + @keyword hostname: Hostname or IP address to connect to. + + @type port: C{int} + @keyword port: TCP port to communicate on, defaults to 22. + + @type username: C{str} + @keyword username: Username to use, defaults to root. + + @type password: C{str} + @keyword password: Password to authenticate with. + + @type key: C{list} + @keyword key: Private SSH keys to authenticate with. + """ + self.hostname = hostname + self.port = port + self.username = username + self.password = password + self.key = key + self.timeout = timeout + + def connect(self): + """ + Connect to the remote node over SSH. + + @return: C{bool} + """ + raise NotImplementedError, \ + 'connect not implemented for this ssh client' + + def put(self, path, contents=None, chmod=None): + """ + Upload a file to the remote node. + + @type path: C{str} + @keyword path: File path on the remote node. + + @type contents: C{str} + @keyword contents: File Contents. + + @type chmod: C{int} + @keyword chmod: chmod file to this after creation. + """ + raise NotImplementedError, \ + 'put not implemented for this ssh client' + + def delete(self, path): + """ + Delete/Unlink a file on the remote node. + + @type path: C{str} + @keyword path: File path on the remote node. + """ + raise NotImplementedError, \ + 'delete not implemented for this ssh client' + + def run(self, cmd): + """ + Run a command on a remote node. + + @type cmd: C{str} + @keyword cmd: Command to run. + + @return C{list} of [stdout, stderr, exit_status] + """ + raise NotImplementedError, \ + 'run not implemented for this ssh client' + + def close(self): + """ + Shutdown connection to the remote node. + """ + raise NotImplementedError, \ + 'close not implemented for this ssh client' + +class ParamikoSSHClient(BaseSSHClient): + """ + A SSH Client powered by Paramiko. + """ + def __init__(self, hostname, port=22, username='root', password=None, + key=None, timeout=None): + super(ParamikoSSHClient, self).__init__(hostname, port, username, + password, key, timeout) + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + def connect(self): + conninfo = {'hostname': self.hostname, + 'port': self.port, + 'username': self.username, + 'allow_agent': False, + 'look_for_keys': False} + + if self.password: + conninfo['password'] = self.password + elif self.key: + conninfo['key_filename'] = self.key + else: + raise Exception('must specify either password or key_filename') + + if self.timeout: + conninfo['timeout'] = self.timeout + + self.client.connect(**conninfo) + return True + + def put(self, path, contents=None, chmod=None): + sftp = self.client.open_sftp() + # less than ideal, but we need to mkdir stuff otherwise file() fails + head, tail = psplit(path) + if path[0] == "/": + sftp.chdir("/") + for part in head.split("/"): + if part != "": + try: + sftp.mkdir(part) + except IOError: + # so, there doesn't seem to be a way to + # catch EEXIST consistently *sigh* + pass + sftp.chdir(part) + ak = sftp.file(tail, mode='w') + ak.write(contents) + if chmod is not None: + ak.chmod(chmod) + ak.close() + sftp.close() + + def delete(self, path): + sftp = self.client.open_sftp() + sftp.unlink(path) + sftp.close() + + def run(self, cmd): + # based on exec_command() + bufsize = -1 + t = self.client.get_transport() + chan = t.open_session() + chan.exec_command(cmd) + stdin = chan.makefile('wb', bufsize) + stdout = chan.makefile('rb', bufsize) + stderr = chan.makefile_stderr('rb', bufsize) + #stdin, stdout, stderr = self.client.exec_command(cmd) + stdin.close() + status = chan.recv_exit_status() + so = stdout.read() + se = stderr.read() + return [so, se, status] + + def close(self): + self.client.close() + +class ShellOutSSHClient(BaseSSHClient): + # TODO: write this one + pass + +SSHClient = ParamikoSSHClient +if not have_paramiko: + SSHClient = ShellOutSSHClient diff --git a/trunk/libcloud/compute/types.py b/trunk/libcloud/compute/types.py new file mode 100644 index 0000000000..a652423fb4 --- /dev/null +++ b/trunk/libcloud/compute/types.py @@ -0,0 +1,140 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base types used by other parts of libcloud +""" + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.types import InvalidCredsError, InvalidCredsException +__all__ = [ + "Provider", + "NodeState", + "DeploymentError", + "DeploymentException", + + # @@TR: should the unused imports below be exported? + "LibcloudError", + "MalformedResponseError", + "InvalidCredsError", + "InvalidCredsException" + ] +class Provider(object): + """ + Defines for each of the supported providers + + @cvar DUMMY: Example provider + @cvar EC2_US_EAST: Amazon AWS US N. Virgina + @cvar EC2_US_WEST: Amazon AWS US N. California + @cvar EC2_EU_WEST: Amazon AWS EU Ireland + @cvar RACKSPACE: Rackspace Cloud Servers + @cvar RACKSPACE_UK: Rackspace UK Cloud Servers + @cvar SLICEHOST: Slicehost.com + @cvar GOGRID: GoGrid + @cvar VPSNET: VPS.net + @cvar LINODE: Linode.com + @cvar VCLOUD: vmware vCloud + @cvar RIMUHOSTING: RimuHosting.com + @cvar ECP: Enomaly + @cvar IBM: IBM Developer Cloud + @cvar OPENNEBULA: OpenNebula.org + @cvar DREAMHOST: DreamHost Private Server + @cvar CLOUDSIGMA: CloudSigma + @cvar NIMBUS: Nimbus + @cvar BLUEBOX: Bluebox + @cvar OPSOURCE: Opsource Cloud + @cvar NINEFOLD: Ninefold + @cvar TERREMARK: Terremark + @cvar: EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon) + """ + DUMMY = 0 + EC2 = 1 # deprecated name + EC2_US_EAST = 1 + EC2_EU = 2 # deprecated name + EC2_EU_WEST = 2 + RACKSPACE = 3 + SLICEHOST = 4 + GOGRID = 5 + VPSNET = 6 + LINODE = 7 + VCLOUD = 8 + RIMUHOSTING = 9 + EC2_US_WEST = 10 + VOXEL = 11 + SOFTLAYER = 12 + EUCALYPTUS = 13 + ECP = 14 + IBM = 15 + OPENNEBULA = 16 + DREAMHOST = 17 + ELASTICHOSTS = 18 + ELASTICHOSTS_UK1 = 19 + ELASTICHOSTS_UK2 = 20 + ELASTICHOSTS_US1 = 21 + EC2_AP_SOUTHEAST = 22 + RACKSPACE_UK = 23 + BRIGHTBOX = 24 + CLOUDSIGMA = 25 + EC2_AP_NORTHEAST = 26 + NIMBUS = 27 + BLUEBOX = 28 + GANDI = 29 + OPSOURCE = 30 + OPENSTACK = 31 + SKALICLOUD = 32 + SERVERLOVE = 33 + NINEFOLD = 34 + TERREMARK = 35 + EC2_US_WEST_OREGON = 36 + +class NodeState(object): + """ + Standard states for a node + + @cvar RUNNING: Node is running + @cvar REBOOTING: Node is rebooting + @cvar TERMINATED: Node is terminated + @cvar PENDING: Node is pending + @cvar UNKNOWN: Node state is unknown + """ + RUNNING = 0 + REBOOTING = 1 + TERMINATED = 2 + PENDING = 3 + UNKNOWN = 4 + +class Architecture(object): + """ + Image and size architectures. + + @cvar I386: i386 (32 bt) + @cvar X86_64: x86_64 (64 bit) + """ + I386 = 0 + X86_X64 = 1 + +class DeploymentError(LibcloudError): + """ + Exception used when a Deployment Task failed. + + @ivar node: L{Node} on which this exception happened, you might want to call L{Node.destroy} + """ + def __init__(self, node, original_exception=None): + self.node = node + self.value = original_exception + def __str__(self): + return repr(self.value) + +"""Deprecated alias of L{DeploymentException}""" +DeploymentException = DeploymentError diff --git a/trunk/libcloud/data/pricing.json b/trunk/libcloud/data/pricing.json new file mode 100644 index 0000000000..c9eb1e263f --- /dev/null +++ b/trunk/libcloud/data/pricing.json @@ -0,0 +1,170 @@ +{ + "compute": { + "bluebox": { + "1gb": 0.15, + "2gb": 0.25, + "4gb": 0.35, + "8gb": 0.45 + }, + + "rackspace": { + "1": 0.015, + "2": 0.030, + "3": 0.060, + "4": 0.120, + "5": 0.240, + "6": 0.480, + "7": 0.960 + }, + + "dreamhost": { + "minimum": 15, + "maximum": 200, + "default": 115, + "low": 50, + "high": 150 + }, + + "ec2_us_east": { + "t1.micro": 0.02, + "m1.small": 0.085, + "m1.large": 0.34, + "m1.xlarge": 0.68, + "c1.medium": 0.17, + "c1.xlarge": 0.68, + "m2.xlarge": 0.50, + "m2.2xlarge": 1.0, + "m2.4xlarge": 2.0, + "cg1.4xlarge": 2.1, + "cc1.4xlarge": 1.6 + }, + + "ec2_us_west": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_us_west_oregon": { + "t1.micro": 0.02, + "m1.small": 0.085, + "m1.large": 0.34, + "m1.xlarge": 0.68, + "c1.medium": 0.17, + "c1.xlarge": 0.68, + "m2.xlarge": 0.50, + "m2.2xlarge": 1.0, + "m2.4xlarge": 2.0 + }, + + "ec2_eu_west": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_ap_southeast": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_ap_northeast": { + "t1.micro": 0.027, + "m1.small": 0.10, + "m1.large": 0.40, + "m1.xlarge": 0.80, + "c1.medium": 0.20, + "c1.xlarge": 0.80, + "m2.xlarge": 0.60, + "m2.2xlarge": 1.20, + "m2.4xlarge": 2.39 + }, + + "nimbus" : { + "m1.small": 0.0, + "m1.large": 0.0, + "m1.xlarge": 0.0 + }, + + "cloudsigma_zrh": { + "micro-regular": 0.0548, + "micro-high-cpu": 0.381, + "standard-small": 0.0796, + "standard-large": 0.381, + "standard-extra-large": 0.762, + "high-memory-extra-large": 0.642, + "high-memory-double-extra-large": 1.383, + "high-cpu-medium": 0.211, + "high-cpu-extra-large": 0.780 + }, + + "elastichosts": { + "small": 0.100, + "medium": 0.223, + "large": 0.378, + "extra-large": 0.579, + "high-cpu-medium": 0.180, + "high-cpu-extra-large": 0.770 + }, + + "skalicloud": { + "small": 0.136, + "medium": 0.301, + "large": 0.505, + "extra-large": 0.654, + "high-cpu-medium": 0.249, + "high-cpu-extra-large": 0.936 + }, + + "serverlove": { + "small": 0.161, + "medium": 0.404, + "large": 0.534, + "extra-large": 0.615, + "high-cpu-medium": 0.291, + "high-cpu-extra-large": 0.776 + }, + + "gogrid": { + "512MB": 0.095, + "1GB": 0.19, + "2GB": 0.38, + "4GB": 0.76, + "8GB": 1.52, + "16GB": 3.04, + "24GB": 4.56 + }, + + "gandi": { + "1": 0.02 + }, + + "vps_net": { + "1": 0.416 + } + }, + + "storage": { + }, + + "updated": 1309019791 +} diff --git a/trunk/libcloud/deployment.py b/trunk/libcloud/deployment.py new file mode 100644 index 0000000000..cbf51c8267 --- /dev/null +++ b/trunk/libcloud/deployment.py @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.deployment import ( # pylint: disable-msg=W0611 + Deployment, + SSHKeyDeployment, + ScriptDeployment, + MultiStepDeployment + ) + +__all__ = [ + "Deployment", + "SSHKeyDeployment", + "ScriptDeployment", + "MultiStepDeployment" + ] + +deprecated_warning(__name__) diff --git a/trunk/libcloud/dns/__init__.py b/trunk/libcloud/dns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/trunk/libcloud/dns/base.py b/trunk/libcloud/dns/base.py new file mode 100644 index 0000000000..95492c0950 --- /dev/null +++ b/trunk/libcloud/dns/base.py @@ -0,0 +1,301 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'Zone', + 'Record', + 'DNSDriver' +] + + +from libcloud.common.base import ConnectionUserAndKey, BaseDriver +from libcloud.dns.types import RecordType + + +class Zone(object): + """ + DNS zone. + """ + + def __init__(self, id, domain, type, ttl, driver, extra=None): + """ + @type id: C{str} + @param id: Zone id. + + @type domain: C{str} + @param domain: The name of the domain. + + @type type: C{string} + @param type: Zone type (master, slave). + + @type ttl: C{int} + @param ttl: Default TTL for records in this zone (in seconds). + + @type driver: C{DNSDriver} + @param driver: DNSDriver instance. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + self.id = str(id) if id else None + self.domain = domain + self.type = type + self.ttl = ttl or None + self.driver = driver + self.extra = extra or {} + + def list_records(self): + return self.driver.list_records(zone=self) + + def create_record(self, name, type, data, extra=None): + return self.driver.create_record(name=name, zone=self, type=type, + data=data, extra=extra) + + def update(self, domain=None, type=None, ttl=None, extra=None): + return self.driver.update_zone(zone=self, domain=domain, type=type, + ttl=ttl, extra=extra) + + def delete(self): + return self.driver.delete_zone(zone=self) + + def __repr__(self): + return ('' % + (self.domain, self.ttl, self.driver.name)) + + +class Record(object): + """ + Zone record / resource. + """ + + def __init__(self, id, name, type, data, zone, driver, extra=None): + """ + @type id: C{str} + @param id: Record id + + @type name: C{str} + @param name: Hostname or FQDN. + + @type type: C{RecordType} + @param type: DNS record type (A, AAAA, ...). + + @type data: C{str} + @param data: Data for the record (depends on the record type). + + @type zone: C{Zone} + @param zone: Zone instance. + + @type driver: C{DNSDriver} + @param driver: DNSDriver instance. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + self.id = str(id) if id else None + self.name = name + self.type = type + self.data = data + self.zone = zone + self.driver = driver + self.extra = extra or {} + + def update(self, name=None, type=None, data=None, extra=None): + return self.driver.update_record(record=self, name=name, type=type, + data=data, extra=extra) + + def delete(self): + return self.driver.delete_record(record=self) + + def __repr__(self): + return ('' % + (self.zone.id, self.name, RecordType.__repr__(self.type), + self.data, self.driver.name)) + + +class DNSDriver(BaseDriver): + """ + DNS driver. + """ + connectionCls = ConnectionUserAndKey + name = None + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port) + + def list_record_types(self): + """ + Return a list of RecordType objects supported by the provider. + + @return: A list of C{RecordType} instances. + """ + raise NotImplementedError( + 'list_record_types not implemented for this driver') + + def list_zones(self): + """ + Return a list of zones. + + @return: A list of C{Zone} instances. + """ + raise NotImplementedError( + 'list_zones not implemented for this driver') + + def list_records(self, zone): + """ + Return a list of records for the provided zone. + + @type zone: C{Zone} + @param zone: Zone to list records for. + + @return: A list of C{Record} instances. + """ + raise NotImplementedError( + 'list_records not implemented for this driver') + + def get_zone(self, zone_id): + """ + Return a Zone instance. + + @return: C{Zone} instance. + """ + raise NotImplementedError( + 'get_zone not implemented for this driver') + + def get_record(self, zone_id, record_id): + """ + Return a Record instance. + + @return: C{Record} instance. + """ + raise NotImplementedError( + 'get_record not implemented for this driver') + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + @type domain: C{string} + @param domain: Zone domain name. + + @type type: C{string} + @param type: Zone type (master / slave). + + @param ttl: C{int} + @param ttl: (optional) TTL for new records. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'create_zone not implemented for this driver') + + def update_zone(self, zone, domain, type='master', ttl=None, extra=None): + """ + Update en existing zone. + + @type zone: C{Zone} + @param zone: Zone to update. + + @type domain: C{string} + @param domain: Zone domain name. + + @type type: C{string} + @param type: Zone type (master / slave). + + @param ttl: C{int} + @param ttl: (optional) TTL for new records. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'update_zone not implemented for this driver') + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + @param name: C{string} + @type name: Hostname or FQDN. + + @type zone: C{Zone} + @param zone: Zone where the requested record is created. + + @type type: C{RecordType} + @param type: DNS record type (A, AAAA, ...). + + @type data: C{str} + @param data: Data for the record (depends on the record type). + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'create_record not implemented for this driver') + + def update_record(self, record, name, type, data, extra): + """ + Update an existing record. + + @param record: C{Record} + @type record: Record to update. + + @param name: C{string} + @type name: Hostname or FQDN. + + @type type: C{RecordType} + @param type: DNS record type (A, AAAA, ...). + + @type data: C{str} + @param data: Data for the record (depends on the record type). + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'update_record not implemented for this driver') + + def delete_zone(self, zone): + """ + Delete a zone. + + Note: This will delete all the records belonging to this zone. + + @param zone: C{Zone} + @type zone: Zone to delete. + """ + raise NotImplementedError( + 'delete_zone not implemented for this driver') + + def delete_record(self, record): + """ + Delete a record. + + @param record: C{Record} + @type record: Record to delete. + """ + raise NotImplementedError( + 'delete_record not implemented for this driver') + + def _string_to_record_type(self, string): + """ + Return a string representation of a DNS record type to a + libcloud RecordType ENUM. + """ + string = string.upper() + record_type = getattr(RecordType, string) + return record_type diff --git a/trunk/libcloud/dns/drivers/__init__.py b/trunk/libcloud/dns/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/trunk/libcloud/dns/drivers/dummy.py b/trunk/libcloud/dns/drivers/dummy.py new file mode 100644 index 0000000000..caf83d2ae7 --- /dev/null +++ b/trunk/libcloud/dns/drivers/dummy.py @@ -0,0 +1,181 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.dns.base import DNSDriver, Zone, Record +from libcloud.dns.types import RecordType +from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.types import RecordAlreadyExistsError + + +class DummyDNSDriver(DNSDriver): + """ + Dummy DNS driver. + + >>> from libcloud.dns.drivers.dummy import DummyDNSDriver + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.name + 'Dummy DNS Provider' + """ + + name = 'Dummy DNS Provider' + + def __init__(self, api_key, api_secret): + self._zones = {} + + def list_record_types(self): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.list_record_types() + [0] + """ + return [RecordType.A] + + def list_zones(self): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.list_zones() + [] + """ + + return [zone['zone'] for zone in self._zones.values()] + + def list_records(self, zone): + return self.get_zone(zone_id=zone.id).records + + def get_zone(self, zone_id): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.get_zone(zone_id='foobar') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + """ + + if zone_id not in self._zones: + raise ZoneDoesNotExistError(driver=self, value=None, + zone_id=zone_id) + + return self._zones[zone_id]['zone'] + + def get_record(self, zone_id, record_id): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> driver.get_record(zone_id='doesnotexist', record_id='exists') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + """ + + self.get_zone(zone_id=zone_id) + zone_records = self._zones[zone_id]['records'] + + if record_id not in zone_records: + raise RecordDoesNotExistError(record_id=record_id, value=None, + driver=self) + + return zone_records[record_id] + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> zone + + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneAlreadyExistsError: + """ + + id = 'id-%s' % (domain) + + if id in self._zones: + raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self) + + zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={}, + driver=self) + self._zones[id] = {'zone': zone, + 'records': {}} + return zone + + def create_record(self, name, zone, type, data, extra=None): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + >>> record #doctest: +ELLIPSIS + + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + ... #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + RecordAlreadyExistsError: + """ + id = 'id-%s' % (name) + + zone = self.get_zone(zone_id=zone.id) + + if id in self._zones[zone.id]['records']: + raise RecordAlreadyExistsError(record_id=id, value=None, + driver=self) + + record = Record(id=id, name=name, type=type, data=data, extra=extra, + zone=zone, driver=self) + self._zones[zone.id]['records'][id] = record + return record + + def delete_zone(self, zone): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> driver.delete_zone(zone) + True + >>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ZoneDoesNotExistError: + """ + self.get_zone(zone_id=zone.id) + + del self._zones[zone.id] + return True + + def delete_record(self, record): + """ + >>> driver = DummyDNSDriver('key', 'secret') + >>> zone = driver.create_zone(domain='apache.org', type='master', + ... ttl=100) + >>> record = driver.create_record(name='libcloud', zone=zone, + ... type=RecordType.A, data='127.0.0.1') + >>> driver.delete_record(record) + True + >>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + RecordDoesNotExistError: + """ + self.get_record(zone_id=record.zone.id, record_id=record.id) + + del self._zones[record.zone.id]['records'][record.id] + return True + + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/libcloud/dns/drivers/linode.py b/trunk/libcloud/dns/drivers/linode.py new file mode 100644 index 0000000000..4a8e320464 --- /dev/null +++ b/trunk/libcloud/dns/drivers/linode.py @@ -0,0 +1,270 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'LinodeDNSDriver' +] + + +from libcloud.utils import merge_valid_keys, get_new_obj +from libcloud.common.linode import (API_ROOT, LinodeException, + LinodeConnection, LinodeResponse, + LINODE_PLAN_IDS) +from libcloud.common.linode import API_HOST, API_ROOT, LinodeException +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec', + 'Expire_sec', 'status', 'master_ips'] + +VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol', + 'TTL_sec'] + +RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.TXT: 'TXT', + RecordType.SRV: 'SRV', +} + + +class LinodeDNSResponse(LinodeResponse): + def _make_excp(self, error): + result = super(LinodeDNSResponse, self)._make_excp(error) + if isinstance(result, LinodeException) and result.code == 5: + context = self.connection.context + + if context['resource'] == 'zone': + result = ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=context['id']) + + elif context['resource'] == 'record': + result = RecordDoesNotExistError(value='', + driver=self.connection.driver, + record_id=context['id']) + return result + + +class LinodeDNSConnection(LinodeConnection): + responseCls = LinodeDNSResponse + + +class LinodeDNSDriver(DNSDriver): + type = Provider.LINODE + name = 'Linode DNS' + connectionCls = LinodeDNSConnection + + def list_record_types(self): + return RECORD_TYPE_MAP.keys() + + def list_zones(self): + params = {'api_action': 'domain.list'} + data = self.connection.request(API_ROOT, params=params).objects[0] + zones = self._to_zones(data) + return zones + + def list_records(self, zone): + params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id} + + self.connection.set_context(context={'resource': 'zone', + 'id': zone.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + records = self._to_records(items=data, zone=zone) + return records + + def get_zone(self, zone_id): + params = {'api_action': 'domain.list', 'DomainID': zone_id} + data = self.connection.request(API_ROOT, params=params).objects[0] + zones = self._to_zones(data) + + if len(zones) != 1: + raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id) + + return zones[0] + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + params = {'api_action': 'domain.resource.list', 'DomainID': zone_id, + 'ResourceID': record_id} + data = self.connection.request(API_ROOT, params=params).objects[0] + records = self._to_records(items=data, zone=zone) + + if len(records) != 1: + raise RecordDoesNotExistError(value='', driver=self, + record_id=record_id) + + return records[0] + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + API docs: http://www.linode.com/api/dns/domain.create + """ + params = {'api_action': 'domain.create', 'Type': type, + 'Domain': domain} + + if ttl: + params['TTL_sec'] = ttl + + merged = merge_valid_keys(params=params, + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + data = self.connection.request(API_ROOT, params=params).objects[0] + zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl, + extra=merged, driver=self) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + """ + Update an existing zone. + + API docs: http://www.linode.com/api/dns/domain.update + """ + params = {'api_action': 'domain.update', 'DomainID': zone.id} + + if type: + params['Type'] = type + + if domain: + params['Domain'] = domain + + if ttl: + params['TTL_sec'] = ttl + + merged = merge_valid_keys(params=params, + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + data = self.connection.request(API_ROOT, params=params).objects[0] + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'domain': domain, + 'type': type, 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + API docs: http://www.linode.com/api/dns/domain.resource.create + """ + params = {'api_action': 'domain.resource.create', 'DomainID': zone.id, + 'Name': name, 'Target': data, 'Type': RECORD_TYPE_MAP[type]} + merged = merge_valid_keys(params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + + result = self.connection.request(API_ROOT, params=params).objects[0] + record = Record(id=result['ResourceID'], name=name, type=type, + data=data, extra=merged, zone=zone, driver=self) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + """ + Update an existing record. + + API docs: http://www.linode.com/api/dns/domain.resource.update + """ + params = {'api_action': 'domain.resource.update', + 'ResourceID': record.id, 'DomainID': record.zone.id} + + if name: + params['Name'] = name + + if data: + params['Target'] = data + + if type: + params['Type'] = RECORD_TYPE_MAP[type] + + merged = merge_valid_keys(params=params, + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + + result = self.connection.request(API_ROOT, params=params).objects[0] + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'name': name, 'data': data, + 'type': type, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + params = {'api_action': 'domain.delete', 'DomainID': zone.id} + + self.connection.set_context(context={'resource': 'zone', + 'id': zone.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + + return 'DomainID' in data + + def delete_record(self, record): + params = {'api_action': 'domain.resource.delete', + 'DomainID': record.zone.id, 'ResourceID': record.id} + + self.connection.set_context(context={'resource': 'record', + 'id': record.id}) + data = self.connection.request(API_ROOT, params=params).objects[0] + + return 'ResourceID' in data + + def _to_zones(self, items): + """ + Convert a list of items to the Zone objects. + """ + zones = [] + + for item in items: + zones.append(self._to_zone(item)) + + return zones + + def _to_zone(self, item): + """ + Build an Zone object from the item dictionary. + """ + extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'], + 'description': item['DESCRIPTION']} + zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'], + type=item['TYPE'], ttl=item['TTL_SEC'], driver=self, + extra=extra) + return zone + + def _to_records(self, items, zone=None): + """ + Convert a list of items to the Record objects. + """ + records = [] + + for item in items: + records.append(self._to_record(item=item, zone=zone)) + + return records + + def _to_record(self, item, zone=None): + """ + Build a Record object from the item dictionary. + """ + extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'], + 'port': item['PORT'], 'weight': item['WEIGHT']} + type = self._string_to_record_type(item['TYPE']) + record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type, + data=item['TARGET'], zone=zone, driver=self, + extra=extra) + return record diff --git a/trunk/libcloud/dns/drivers/rackspace.py b/trunk/libcloud/dns/drivers/rackspace.py new file mode 100644 index 0000000000..7a166726bd --- /dev/null +++ b/trunk/libcloud/dns/drivers/rackspace.py @@ -0,0 +1,351 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'RackspaceUSDNSDriver', + 'RackspaceUKDNSDriver' +] + +import httplib +import copy + +from libcloud.common.base import PollingConnection +from libcloud.common.types import LibcloudError +from libcloud.utils import merge_valid_keys, get_new_obj +from libcloud.common.rackspace import AUTH_URL_US, AUTH_URL_UK +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_Response + +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + +VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1'] +VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment'] + +RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.TXT: 'TXT', + RecordType.SRV: 'SRV', +} + + +class RackspaceDNSResponse(OpenStack_1_1_Response): + """ + Rackspace DNS Response class. + """ + + def parse_error(self): + status = int(self.status) + context = self.connection.context + body = self.parse_body() + + if status == httplib.NOT_FOUND: + if context['resource'] == 'zone': + raise ZoneDoesNotExistError(value='', driver=self, + zone_id=context['id']) + elif context['resource'] == 'record': + raise RecordDoesNotExistError(value='', driver=self, + record_id=context['id']) + + if 'code' and 'message' in body: + err = '%s - %s (%s)' % (body['code'], body['message'], + body['details']) + elif 'validationErrors' in body: + errors = [m for m in body['validationErrors']['messages']] + err = 'Validation errors: %s' % ', '.join(errors) + + return err + + +class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): + """ + Rackspace DNS Connection class. + """ + + responseCls = RackspaceDNSResponse + _url_key = 'dns_url' + XML_NAMESPACE = None + + def get_poll_request_kwargs(self, response, context): + job_id = response.object['jobId'] + kwargs = {'action': '/status/%s' % (job_id), + 'params': {'showDetails': True}} + return kwargs + + def has_completed(self, response): + status = response.object['status'] + if status == 'ERROR': + raise LibcloudError(response.object['error']['message'], + driver=self.driver) + + return status == 'COMPLETED' + + +class RackspaceUSDNSConnection(RackspaceDNSConnection): + auth_url = AUTH_URL_US + + +class RackspaceUKDNSConnection(RackspaceDNSConnection): + auth_url = AUTH_URL_UK + + +class RackspaceDNSDriver(DNSDriver): + def list_record_types(self): + return RECORD_TYPE_MAP.keys() + + def list_zones(self): + response = self.connection.request(action='/domains') + zones = self._to_zones(data=response.object['domains']) + return zones + + def list_records(self, zone): + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(action='/domains/%s' % (zone.id), + params={'showRecord': True}).object + records = self._to_records(data=response['recordsList']['records'], + zone=zone) + return records + + def get_zone(self, zone_id): + self.connection.set_context({'resource': 'zone', 'id': zone_id}) + response = self.connection.request(action='/domains/%s' % (zone_id)) + zone = self._to_zone(data=response.object) + return zone + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + self.connection.set_context({'resource': 'record', 'id': record_id}) + response = self.connection.request(action='/domains/%s/records/%s' % + (zone_id, record_id)).object + record = self._to_record(data=response, zone=zone) + return record + + def create_zone(self, domain, type='master', ttl=None, extra=None): + extra = extra if extra else {} + + # Email address is required + if not 'email' in extra: + raise ValueError('"email" key must be present in extra dictionary') + + payload = {'name': domain, 'emailAddress': extra['email'], + 'recordsList': {'records': []}} + + if ttl: + payload['ttl'] = ttl + + if 'comment' in extra: + payload['comment'] = extra['comment'] + + data = {'domains': [payload]} + response = self.connection.async_request(action='/domains', + method='POST', data=data) + zone = self._to_zone(data=response.object['response']['domains'][0]) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + # Only ttl, comment and email address can be changed + extra = extra if extra else {} + + if domain: + raise LibcloudError('Domain cannot be changed', driver=self) + + data = {} + + if ttl: + data['ttl'] = int(ttl) + + if 'email' in extra: + data['emailAddress'] = extra['email'] + + if 'comment' in extra: + data['comment'] = extra['comment'] + + type = type if type else zone.type + ttl = ttl if ttl else zone.ttl + + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + self.connection.async_request(action='/domains/%s' % (zone.id), + method='PUT', data=data) + merged = merge_valid_keys(params=copy.deepcopy(zone.extra), + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'type': type, + 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + # Name must be a FQDN - e.g. if domain is "foo.com" then a record + # name is "bar.foo.com" + extra = extra if extra else {} + + name = self._to_full_record_name(domain=zone.domain, name=name) + data = {'name': name, 'type': RECORD_TYPE_MAP[type], 'data': data} + + if 'ttl' in extra: + data['ttl'] = int(extra['ttl']) + + payload = {'records': [data]} + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.async_request(action='/domains/%s/records' + % (zone.id), data=payload, + method='POST').object + record = self._to_record(data=response['response']['records'][0], + zone=zone) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + # Only data, ttl, and comment attributes can be modified, but name + # attribute must always be present. + extra = extra if extra else {} + + name = self._to_full_record_name(domain=record.zone.domain, + name=record.name) + payload = {'name': name} + + if data: + payload['data'] = data + + if 'ttl' in extra: + payload['ttl'] = extra['ttl'] + + if 'comment' in extra: + payload['comment'] = extra['comment'] + + type = type if type else record.type + data = data if data else record.data + + self.connection.set_context({'resource': 'record', 'id': record.id}) + self.connection.async_request(action='/domains/%s/records/%s' % + (record.zone.id, record.id), + method='PUT', data=payload) + + merged = merge_valid_keys(params=copy.deepcopy(record.extra), + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'type': type, + 'data': data, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + self.connection.async_request(action='/domains/%s' % (zone.id), + method='DELETE') + return True + + def delete_record(self, record): + self.connection.set_context({'resource': 'record', 'id': record.id}) + self.connection.async_request(action='/domains/%s/records/%s' % + (record.zone.id, record.id), + method='DELETE') + return True + + def _to_zones(self, data): + zones = [] + for item in data: + zone = self._to_zone(data=item) + zones.append(zone) + + return zones + + def _to_zone(self, data): + id = data['id'] + domain = data['name'] + type = 'master' + ttl = data.get('ttl', 0) + extra = {} + + if 'emailAddress' in data: + extra['email'] = data['emailAddress'] + + if 'comment' in data: + extra['comment'] = data['comment'] + + zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), + driver=self, extra=extra) + return zone + + def _to_records(self, data, zone): + records = [] + for item in data: + record = self._to_record(data=item, zone=zone) + records.append(record) + + return records + + def _to_record(self, data, zone): + id = data['id'] + fqdn = data['name'] + name = self._to_partial_record_name(domain=zone.domain, name=fqdn) + type = self._string_to_record_type(data['type']) + record_data = data['data'] + extra = {'fqdn': fqdn} + + if 'ttl' in data: + extra['ttl'] = data['ttl'] + + if 'comment' in data: + extra['comment'] = data['comment'] + + record = Record(id=str(id), name=name, type=type, data=record_data, + zone=zone, driver=self, extra=extra) + return record + + def _to_full_record_name(self, domain, name): + """ + Build a FQDN from a domain and record name. + + @param domain: Domain name. + @type domain: C{str} + + @param name: Record name. + @type name: C{str} + """ + name = '%s.%s' % (name, domain) + return name + + def _to_partial_record_name(self, domain, name): + """ + Strip domain portion from the record name. + + @param domain: Domain name. + @type domain: C{str} + + @param name: Full record name (fqdn). + @type name: C{str} + """ + name = name.replace('.%s' % (domain), '') + return name + + +class RackspaceUSDNSDriver(RackspaceDNSDriver): + name = 'Rackspace DNS (US)' + type = Provider.RACKSPACE_US + connectionCls = RackspaceUSDNSConnection + + +class RackspaceUKDNSDriver(RackspaceDNSDriver): + name = 'Rackspace DNS (UK)' + type = Provider.RACKSPACE_UK + connectionCls = RackspaceUKDNSConnection diff --git a/trunk/libcloud/dns/drivers/zerigo.py b/trunk/libcloud/dns/drivers/zerigo.py new file mode 100644 index 0000000000..c0dcd762f2 --- /dev/null +++ b/trunk/libcloud/dns/drivers/zerigo.py @@ -0,0 +1,451 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'ZerigoDNSDriver' +] + + +import copy +import base64 +import httplib + +from xml.etree import ElementTree as ET + +from libcloud.utils import fixxpath, findtext, findattr, findall +from libcloud.utils import merge_valid_keys, get_new_obj +from libcloud.common.base import XmlResponse, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.types import MalformedResponseError, LazyList +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + +API_HOST = 'ns.zerigo.com' +API_VERSION = '1.1' +API_ROOT = '/api/%s/' % (API_VERSION) + +VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers'] +VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority'] + +# Number of items per page (maximum limit is 1000) +ITEMS_PER_PAGE = 100 + +RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.REDIRECT: 'REDIRECT', + RecordType.TXT: 'TXT', + RecordType.SRV: 'SRV', + RecordType.NAPTR: 'NAPTR', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.SPF: 'SPF', +} + + +class ZerigoError(LibcloudError): + def __init__(self, code, errors): + self.code = code + self.errors = errors or [] + + def __str__(self): + return 'Errors: %s' % (', '.join(self.errors)) + + def __repr__(self): + return ('' % + (self.code, + len(self.errors))) + + +class ZerigoDNSResponse(XmlResponse): + def success(self): + return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED] + + def parse_error(self): + status = int(self.status) + + if status == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + elif status == 404: + context = self.connection.context + if context['resource'] == 'zone': + raise ZoneDoesNotExistError(value='', driver=self, + zone_id=context['id']) + elif context['resource'] == 'record': + raise RecordDoesNotExistError(value='', driver=self, + record_id=context['id']) + elif status != 503: + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError('Failed to parse XML', + body=self.body) + + errors = [] + for error in findall(element=body, xpath='error'): + errors.append(error.text) + + raise ZerigoError(code=status, errors=errors) + + return self.body + + +class ZerigoDNSConnection(ConnectionUserAndKey): + host = API_HOST + secure = True + responseCls = ZerigoDNSResponse + + def add_default_headers(self, headers): + auth_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + headers['Authorization'] = 'Basic %s' % (auth_b64) + return headers + + def request(self, action, params=None, data='', headers=None, + method='GET'): + if not headers: + headers = {} + if not params: + params = {} + + if method in ("POST", "PUT"): + headers = {'Content-Type': 'application/xml; charset=UTF-8'} + return super(ZerigoDNSConnection, self).request(action=action, + params=params, + data=data, + method=method, + headers=headers) + + +class ZerigoDNSDriver(DNSDriver): + type = Provider.ZERIGO + name = 'Zerigo DNS' + connectionCls = ZerigoDNSConnection + + def list_record_types(self): + return RECORD_TYPE_MAP.keys() + + def list_zones(self): + value_dict = {'type': 'zones'} + return LazyList(get_more=self._get_more, value_dict=value_dict) + + def list_records(self, zone): + value_dict = {'type': 'records', 'zone': zone} + return LazyList(get_more=self._get_more, value_dict=value_dict) + + def get_zone(self, zone_id): + path = API_ROOT + 'zones/%s.xml' % (zone_id) + self.connection.set_context({'resource': 'zone', 'id': zone_id}) + data = self.connection.request(path).object + zone = self._to_zone(elem=data) + return zone + + def get_record(self, zone_id, record_id): + zone = self.get_zone(zone_id=zone_id) + self.connection.set_context({'resource': 'record', 'id': record_id}) + path = API_ROOT + 'hosts/%s.xml' % (record_id) + data = self.connection.request(path).object + record = self._to_record(elem=data, zone=zone) + return record + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/zones/create + """ + path = API_ROOT + 'zones.xml' + zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, + extra=extra) + data = self.connection.request(action=path, + data=ET.tostring(zone_elem), + method='POST').object + zone = self._to_zone(elem=data) + return zone + + def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None): + """ + Update an existing zone. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/zones/update + """ + if domain: + raise LibcloudError('Domain cannot be changed', driver=self) + + path = API_ROOT + 'zones/%s.xml' % (zone.id) + zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(zone_elem), + method='PUT') + assert response.status == httplib.OK + + merged = merge_valid_keys(params=copy.deepcopy(zone.extra), + valid_keys=VALID_ZONE_EXTRA_PARAMS, + extra=extra) + updated_zone = get_new_obj(obj=zone, klass=Zone, + attributes={'type': type, + 'ttl': ttl, + 'extra': merged}) + return updated_zone + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + Provider API docs: + https://www.zerigo.com/docs/apis/dns/1.1/hosts/create + """ + path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) + record_elem = self._to_record_elem(name=name, type=type, data=data, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(record_elem), + method='POST') + assert response.status == httplib.CREATED + record = self._to_record(elem=response.object, zone=zone) + return record + + def update_record(self, record, name=None, type=None, data=None, + extra=None): + path = API_ROOT + 'hosts/%s.xml' % (record.id) + record_elem = self._to_record_elem(name=name, type=type, data=data, + extra=extra) + response = self.connection.request(action=path, + data=ET.tostring(record_elem), + method='PUT') + assert response.status == httplib.OK + + merged = merge_valid_keys(params=copy.deepcopy(record.extra), + valid_keys=VALID_RECORD_EXTRA_PARAMS, + extra=extra) + updated_record = get_new_obj(obj=record, klass=Record, + attributes={'type': type, + 'data': data, + 'extra': merged}) + return updated_record + + def delete_zone(self, zone): + path = API_ROOT + 'zones/%s.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(action=path, method='DELETE') + return response.status == httplib.OK + + def delete_record(self, record): + path = API_ROOT + 'hosts/%s.xml' % (record.id) + self.connection.set_context({'resource': 'record', 'id': record.id}) + response = self.connection.request(action=path, method='DELETE') + return response.status == httplib.OK + + def ex_get_zone_by_domain(self, domain): + """ + Retrieve a zone object by the domain name. + """ + path = API_ROOT + 'zones/%s.xml' % (domain) + self.connection.set_context({'resource': 'zone', 'id': domain}) + data = self.connection.request(path).object + zone = self._to_zone(elem=data) + return zone + + def ex_force_slave_axfr(self, zone): + """ + Force a zone transfer. + """ + path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(path, method='POST') + assert response.status == httplib.ACCEPTED + return zone + + def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None): + zone_elem = ET.Element('zone', {}) + + if domain: + domain_elem = ET.SubElement(zone_elem, 'domain') + domain_elem.text = domain + + if type: + ns_type_elem = ET.SubElement(zone_elem, 'ns-type') + + if type == 'master': + ns_type_elem.text = 'pri_sec' + elif type == 'slave': + if not extra or 'ns1' not in extra: + raise LibcloudError('ns1 extra attribute is required ' + + 'when zone type is slave', driver=self) + + ns_type_elem.text = 'sec' + ns1_elem = ET.SubElement(zone_elem, 'ns1') + ns1_elem.text = extra['ns1'] + elif type == 'std_master': + # TODO: Each driver should provide supported zone types + # Slave name servers are elsewhere + if not extra or 'slave-nameservers' not in extra: + raise LibcloudError('slave-nameservers extra ' + + 'attribute is required whenzone ' + + 'type is std_master', driver=self) + + ns_type_elem.text = 'pri' + slave_nameservers_elem = ET.SubElement(zone_elem, + 'slave-nameservers') + slave_nameservers_elem.text = extra['slave-nameservers'] + + if ttl: + default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl') + default_ttl_elem.text = str(ttl) + + if extra and 'tag-list' in extra: + tags = extra['tag-list'] + + tags_elem = ET.SubElement(zone_elem, 'tag-list') + tags_elem.text = ' '.join(tags) + + return zone_elem + + def _to_record_elem(self, name=None, type=None, data=None, extra=None): + record_elem = ET.Element('host', {}) + + if name: + name_elem = ET.SubElement(record_elem, 'hostname') + name_elem.text = name + + if type: + type_elem = ET.SubElement(record_elem, 'host-type') + type_elem.text = RECORD_TYPE_MAP[type] + + if data: + data_elem = ET.SubElement(record_elem, 'data') + data_elem.text = data + + if extra: + if 'ttl' in extra: + ttl_elem = ET.SubElement(record_elem, 'ttl', + {'type': 'integer'}) + ttl_elem.text = str(extra['ttl']) + + if 'priority' in extra: + # Only MX and SRV records support priority + priority_elem = ET.SubElement(record_elem, 'priority', + {'type': 'integer'}) + + priority_elem.text = str(extra['priority']) + + if 'notes' in extra: + notes_elem = ET.SubElement(record_elem, 'notes') + notes_elem.text = extra['notes'] + + return record_elem + + def _to_zones(self, elem): + zones = [] + + for item in findall(element=elem, xpath='zone'): + zone = self._to_zone(elem=item) + zones.append(zone) + + return zones + + def _to_zone(self, elem): + id = findtext(element=elem, xpath='id') + domain = findtext(element=elem, xpath='domain') + type = findtext(element=elem, xpath='ns-type') + type = 'master' if type.find('pri') == 0 else 'slave' + ttl = findtext(element=elem, xpath='default-ttl') + + hostmaster = findtext(element=elem, xpath='hostmaster') + custom_ns = findtext(element=elem, xpath='custom-ns') + custom_nameservers = findtext(element=elem, xpath='custom-nameservers') + notes = findtext(element=elem, xpath='notes') + nx_ttl = findtext(element=elem, xpath='nx-ttl') + slave_nameservers = findtext(element=elem, xpath='slave-nameservers') + tags = findtext(element=elem, xpath='tag-list') + tags = tags.split(' ') if tags else [] + + extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns, + 'custom-nameservers': custom_nameservers, 'notes': notes, + 'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers, + 'tags': tags} + zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl), + driver=self, extra=extra) + return zone + + def _to_records(self, elem, zone): + records = [] + + for item in findall(element=elem, xpath='host'): + record = self._to_record(elem=item, zone=zone) + records.append(record) + + return records + + def _to_record(self, elem, zone): + id = findtext(element=elem, xpath='id') + name = findtext(element=elem, xpath='hostname') + type = findtext(element=elem, xpath='host-type') + type = self._string_to_record_type(type) + data = findtext(element=elem, xpath='data') + + notes = findtext(element=elem, xpath='notes') + state = findtext(element=elem, xpath='state') + fqdn = findtext(element=elem, xpath='fqdn') + priority = findtext(element=elem, xpath='priority') + + extra = {'notes': notes, 'state': state, 'fqdn': fqdn, + 'priority': priority} + + record = Record(id=id, name=name, type=type, data=data, + zone=zone, driver=self, extra=extra) + return record + + def _get_more(self, last_key, value_dict): + # Note: last_key in this case really is a "last_page". + # TODO: Update base driver and change last_key to something more + # generic - e.g. marker + params = {} + params['per_page'] = ITEMS_PER_PAGE + params['page'] = last_key + 1 if last_key else 1 + transform_func_kwargs = {} + + if value_dict['type'] == 'zones': + path = API_ROOT + 'zones.xml' + response = self.connection.request(path) + transform_func = self._to_zones + elif value_dict['type'] == 'records': + zone = value_dict['zone'] + path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id) + self.connection.set_context({'resource': 'zone', 'id': zone.id}) + response = self.connection.request(path, params=params) + transform_func = self._to_records + transform_func_kwargs['zone'] = value_dict['zone'] + + exhausted = False + result_count = int(response.headers.get('x-query-count', 0)) + transform_func_kwargs['elem'] = response.object + + if (params['page'] * ITEMS_PER_PAGE) >= result_count: + exhausted = True + + if response.status == httplib.OK: + items = transform_func(**transform_func_kwargs) + return items, params['page'], exhausted + else: + return [], None, True diff --git a/trunk/libcloud/dns/providers.py b/trunk/libcloud/dns/providers.py new file mode 100644 index 0000000000..594269ff30 --- /dev/null +++ b/trunk/libcloud/dns/providers.py @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import get_driver as get_provider_driver +from libcloud.dns.types import Provider + +DRIVERS = { + Provider.DUMMY: + ('libcloud.dns.drivers.dummy', 'DummyDNSDriver'), + Provider.LINODE: + ('libcloud.dns.drivers.linode', 'LinodeDNSDriver'), + Provider.ZERIGO: + ('libcloud.dns.drivers.zerigo', 'ZerigoDNSDriver'), + Provider.RACKSPACE_US: + ('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'), + Provider.RACKSPACE_UK: + ('libcloud.dns.drivers.rackspace', 'RackspaceUKDNSDriver') +} + + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) diff --git a/trunk/libcloud/dns/types.py b/trunk/libcloud/dns/types.py new file mode 100644 index 0000000000..efcd410162 --- /dev/null +++ b/trunk/libcloud/dns/types.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError + +__all__ = [ + 'Provider', + 'RecordType', + 'ZoneError', + 'ZoneDoesNotExistError', + 'ZoneAlreadyExistsError', + 'RecordError', + 'RecordDoesNotExistError', + 'RecordAlreadyExistsError' +] + + +class Provider(object): + DUMMY = 0 + LINODE = 1 + ZERIGO = 2 + RACKSPACE_US = 3 + RACKSPACE_UK = 4 + + +class RecordType(object): + """ + DNS record type. + """ + A = 0 + AAAA = 1 + MX = 2 + NS = 3 + CNAME = 4 + DNAME = 5 + TXT = 6 + PTR = 7 + SOA = 8 + SPF = 9 + SRV = 10 + PTR = 11 + NAPTR = 12 + REDIRECT = 13 + + @classmethod + def __repr__(self, value): + reverse = dict((v, k) for k, v in RecordType.__dict__.items()) + return reverse[value] + + +class ZoneError(LibcloudError): + error_type = 'ZoneError' + + def __init__(self, value, driver, zone_id): + self.zone_id = zone_id + super(ZoneError, self).__init__(value=value, driver=driver) + + def __str__(self): + return ('<%s in %s, zone_id=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.zone_id, self.value)) + + +class ZoneDoesNotExistError(ZoneError): + error_type = 'ZoneDoesNotExistError' + + +class ZoneAlreadyExistsError(ZoneError): + error_type = 'ZoneAlreadyExistsError' + + +class RecordError(LibcloudError): + error_type = 'RecordError' + + def __init__(self, value, driver, record_id): + self.record_id = record_id + super(RecordError, self).__init__(value=value, driver=driver) + + def __str__(self): + return ('<%s in %s, record_id=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.record_id, self.value)) + + +class RecordDoesNotExistError(RecordError): + error_type = 'RecordDoesNotExistError' + + +class RecordAlreadyExistsError(RecordError): + error_type = 'RecordAlreadyExistsError' diff --git a/trunk/libcloud/drivers/__init__.py b/trunk/libcloud/drivers/__init__.py new file mode 100644 index 0000000000..b4fc782dc8 --- /dev/null +++ b/trunk/libcloud/drivers/__init__.py @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'brightbox', + 'dummy', + 'ec2', + 'ecp', + 'elastichosts', + 'cloudsigma', + 'gogrid', + 'ibm_sbc', + 'linode', + 'opennebula', + 'rackspace', + 'rimuhosting', + 'slicehost', + 'softlayer', + 'vcloud', + 'voxel', + 'vpsnet' +] diff --git a/trunk/libcloud/drivers/brightbox.py b/trunk/libcloud/drivers/brightbox.py new file mode 100644 index 0000000000..ea99d03596 --- /dev/null +++ b/trunk/libcloud/drivers/brightbox.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.brightbox import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/cloudsigma.py b/trunk/libcloud/drivers/cloudsigma.py new file mode 100644 index 0000000000..f4587fc32d --- /dev/null +++ b/trunk/libcloud/drivers/cloudsigma.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning + +from libcloud.compute.drivers.cloudsigma import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/dreamhost.py b/trunk/libcloud/drivers/dreamhost.py new file mode 100644 index 0000000000..beeb37c677 --- /dev/null +++ b/trunk/libcloud/drivers/dreamhost.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.dreamhost import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/dummy.py b/trunk/libcloud/drivers/dummy.py new file mode 100644 index 0000000000..2b1a645623 --- /dev/null +++ b/trunk/libcloud/drivers/dummy.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.dummy import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/ec2.py b/trunk/libcloud/drivers/ec2.py new file mode 100644 index 0000000000..f654704b9c --- /dev/null +++ b/trunk/libcloud/drivers/ec2.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ec2 import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/ecp.py b/trunk/libcloud/drivers/ecp.py new file mode 100644 index 0000000000..570fad98ce --- /dev/null +++ b/trunk/libcloud/drivers/ecp.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ecp import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/elastichosts.py b/trunk/libcloud/drivers/elastichosts.py new file mode 100644 index 0000000000..e5804f7eaa --- /dev/null +++ b/trunk/libcloud/drivers/elastichosts.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.elastichosts import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/gogrid.py b/trunk/libcloud/drivers/gogrid.py new file mode 100644 index 0000000000..d4327f1817 --- /dev/null +++ b/trunk/libcloud/drivers/gogrid.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.gogrid import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/ibm_sbc.py b/trunk/libcloud/drivers/ibm_sbc.py new file mode 100644 index 0000000000..f9a51df030 --- /dev/null +++ b/trunk/libcloud/drivers/ibm_sbc.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ibm_sbc import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/linode.py b/trunk/libcloud/drivers/linode.py new file mode 100644 index 0000000000..0d61de3ca8 --- /dev/null +++ b/trunk/libcloud/drivers/linode.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.linode import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/opennebula.py b/trunk/libcloud/drivers/opennebula.py new file mode 100644 index 0000000000..3ae3a25e4c --- /dev/null +++ b/trunk/libcloud/drivers/opennebula.py @@ -0,0 +1,22 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.opennebula import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/rackspace.py b/trunk/libcloud/drivers/rackspace.py new file mode 100644 index 0000000000..e5c4e1bbfc --- /dev/null +++ b/trunk/libcloud/drivers/rackspace.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.rackspace import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/rimuhosting.py b/trunk/libcloud/drivers/rimuhosting.py new file mode 100644 index 0000000000..f55ed823ff --- /dev/null +++ b/trunk/libcloud/drivers/rimuhosting.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.rimuhosting import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/slicehost.py b/trunk/libcloud/drivers/slicehost.py new file mode 100644 index 0000000000..fb4e46682f --- /dev/null +++ b/trunk/libcloud/drivers/slicehost.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.slicehost import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/softlayer.py b/trunk/libcloud/drivers/softlayer.py new file mode 100644 index 0000000000..6c279625d0 --- /dev/null +++ b/trunk/libcloud/drivers/softlayer.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.softlayer import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/vcloud.py b/trunk/libcloud/drivers/vcloud.py new file mode 100644 index 0000000000..e24af6fc00 --- /dev/null +++ b/trunk/libcloud/drivers/vcloud.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.vcloud import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/voxel.py b/trunk/libcloud/drivers/voxel.py new file mode 100644 index 0000000000..3ad05692ca --- /dev/null +++ b/trunk/libcloud/drivers/voxel.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.voxel import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/drivers/vpsnet.py b/trunk/libcloud/drivers/vpsnet.py new file mode 100644 index 0000000000..747dc1536d --- /dev/null +++ b/trunk/libcloud/drivers/vpsnet.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.vpsnet import * + +deprecated_warning(__name__) diff --git a/trunk/libcloud/httplib_ssl.py b/trunk/libcloud/httplib_ssl.py new file mode 100644 index 0000000000..d7277db018 --- /dev/null +++ b/trunk/libcloud/httplib_ssl.py @@ -0,0 +1,161 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Subclass for httplib.HTTPSConnection with optional certificate name +verification, depending on libcloud.security settings. +""" +import httplib +import os +import re +import socket +import ssl +import warnings + +import libcloud.security + +class LibcloudHTTPSConnection(httplib.HTTPSConnection): + """LibcloudHTTPSConnection + + Subclass of HTTPSConnection which verifies certificate names + if and only if CA certificates are available. + """ + verify = False # does not verify + ca_cert = None # no default CA Certificate + + def __init__(self, *args, **kwargs): + """Constructor + """ + self._setup_verify() + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + + def _setup_verify(self): + """Setup Verify SSL or not + + Reads security module's VERIFY_SSL_CERT and toggles whether + the class overrides the connect() class method or runs the + inherited httplib.HTTPSConnection connect() + """ + self.verify = libcloud.security.VERIFY_SSL_CERT + self.strict = libcloud.security.VERIFY_SSL_CERT_STRICT + + if self.verify: + self._setup_ca_cert() + else: + warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG) + + def _setup_ca_cert(self): + """Setup CA Certs + + Search in CA_CERTS_PATH for valid candidates and + return first match. Otherwise, complain about certs + not being available. + """ + if not self.verify: + return + + ca_certs_available = [cert + for cert in libcloud.security.CA_CERTS_PATH + if os.path.exists(cert)] + if ca_certs_available: + # use first available certificate + self.ca_cert = ca_certs_available[0] + else: + if self.strict: + raise RuntimeError(libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG) + else: + # no certificates found; toggle verify to False + warnings.warn(libcloud.security.CA_CERTS_UNAVAILABLE_WARNING_MSG) + self.ca_cert = None + self.verify = False + + def connect(self): + """Connect + + Checks if verification is toggled; if not, just call + httplib.HTTPSConnection's connect + """ + if not self.verify: + return httplib.HTTPSConnection.connect(self) + + # otherwise, create a connection and verify the hostname + # use socket.create_connection (in 2.6+) if possible + if getattr(socket, 'create_connection', None): + sock = socket.create_connection((self.host, self.port), + self.timeout) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((self.host, self.port)) + self.sock = ssl.wrap_socket(sock, + self.key_file, + self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_cert, + ssl_version=ssl.PROTOCOL_TLSv1) + cert = self.sock.getpeercert() + if not self._verify_hostname(self.host, cert): + raise ssl.SSLError('Failed to verify hostname') + + def _verify_hostname(self, hostname, cert): + """Verify hostname against peer cert + + Check both commonName and entries in subjectAltName, using a + rudimentary glob to dns regex check to find matches + """ + common_name = self._get_common_name(cert) + alt_names = self._get_subject_alt_names(cert) + + # replace * with alphanumeric and dash + # replace . with literal . + valid_patterns = [ + re.compile( + pattern.replace( + r".", r"\." + ).replace( + r"*", r"[0-9A-Za-z]+" + ) + ) + for pattern + in (set(common_name) | set(alt_names)) + ] + + return any( + pattern.search(hostname) + for pattern in valid_patterns + ) + + def _get_subject_alt_names(self, cert): + """Get SubjectAltNames + + Retrieve 'subjectAltName' attributes from cert data structure + """ + if 'subjectAltName' not in cert: + values = [] + else: + values = [value + for field, value in cert['subjectAltName'] + if field == 'DNS'] + return values + + def _get_common_name(self, cert): + """Get Common Name + + Retrieve 'commonName' attribute from cert data structure + """ + if 'subject' not in cert: + return None + values = [value[0][1] + for value in cert['subject'] + if value[0][0] == 'commonName'] + return values diff --git a/trunk/libcloud/loadbalancer/__init__.py b/trunk/libcloud/loadbalancer/__init__.py new file mode 100644 index 0000000000..2f08b8acf5 --- /dev/null +++ b/trunk/libcloud/loadbalancer/__init__.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Module for working with Load Balancers +""" + +__all__ = [ + 'base', + 'providers', + 'types', + 'drivers' +] + diff --git a/trunk/libcloud/loadbalancer/base.py b/trunk/libcloud/loadbalancer/base.py new file mode 100644 index 0000000000..0f651cdf44 --- /dev/null +++ b/trunk/libcloud/loadbalancer/base.py @@ -0,0 +1,227 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import ConnectionKey, BaseDriver +from libcloud.common.types import LibcloudError + +__all__ = [ + "Member", + "LoadBalancer", + "Driver", + "Algorithm" + ] + + +class Member(object): + + def __init__(self, id, ip, port): + self.id = str(id) if id else None + self.ip = ip + self.port = port + + def __repr__(self): + return ('' % (self.id, + self.ip, self.port)) + + +class Algorithm(object): + RANDOM = 0 + ROUND_ROBIN = 1 + LEAST_CONNECTIONS = 2 + +DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN + + +class LoadBalancer(object): + """ + Provide a common interface for handling Load Balancers. + """ + + def __init__(self, id, name, state, ip, port, driver): + self.id = str(id) if id else None + self.name = name + self.state = state + self.ip = ip + self.port = port + self.driver = driver + + def attach_compute_node(self, node): + return self.driver.balancer_attach_compute_node(balancer=self, + node=node) + + def attach_member(self, member): + return self.driver.balancer_attach_member(balancer=self, + member=member) + + def detach_member(self, member): + return self.driver.balancer_detach_member(balancer=self, + member=member) + + def list_members(self): + return self.driver.balancer_list_members(balancer=self) + + def destroy(self): + return self.driver.destroy_balancer(balancer=self) + + def __repr__(self): + return ('' % (self.id, + self.name, self.state)) + + +class Driver(BaseDriver): + """ + A base LBDriver class to derive from + + This class is always subclassed by a specific driver. + + """ + + connectionCls = ConnectionKey + _ALGORITHM_TO_VALUE_MAP = {} + _VALUE_TO_ALGORITHM_MAP = {} + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + super(Driver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port) + + def list_protocols(self): + """ + Return a list of supported protocols. + """ + + raise NotImplementedError( + 'list_protocols not implemented for this driver') + + def list_balancers(self): + """ + List all loadbalancers + + @return: C{list} of L{LoadBalancer} objects + + """ + + raise NotImplementedError( + 'list_balancers not implemented for this driver') + + def create_balancer(self, name, port, protocol, algorithm, members): + """ + Create a new load balancer instance + + @keyword name: Name of the new load balancer (required) + @type name: C{str} + @keyword members: C{list} ofL{Member}s to attach to balancer + @type: C{list} of L{Member}s + @keyword protocol: Loadbalancer protocol, defaults to http. + @type: C{str} + @keyword port: Port the load balancer should listen on, defaults to 80 + @type port: C{str} + @keyword algorithm: Load balancing algorithm, defaults to + LBAlgorithm.ROUND_ROBIN + @type algorithm: C{LBAlgorithm} + + """ + + raise NotImplementedError( + 'create_balancer not implemented for this driver') + + def destroy_balancer(self, balancer): + """Destroy a load balancer + + @return: C{bool} True if the destroy was successful, otherwise False + + """ + + raise NotImplementedError( + 'destroy_balancer not implemented for this driver') + + def get_balancer(self, balancer_id): + """ + Return a C{LoadBalancer} object. + + @keyword balancer_id: id of a load balancer you want to fetch + @type balancer_id: C{str} + + @return: C{LoadBalancer} + """ + + raise NotImplementedError( + 'get_balancer not implemented for this driver') + + def balancer_attach_compute_node(self, balancer, node): + """ + Attach a compute node as a member to the load balancer. + + @keyword node: Member to join to the balancer + @type member: C{libcloud.compute.base.Node} + @return {Member} Member after joining the balancer. + """ + + return self.balancer_attach_member(balancer, Member(None, + node.public_ip[0], + balancer.port)) + + def balancer_attach_member(self, balancer, member): + """ + Attach a member to balancer + + @keyword member: Member to join to the balancer + @type member: C{Member} + @return {Member} Member after joining the balancer. + """ + + raise NotImplementedError( + 'balancer_attach_member not implemented for this driver') + + def balancer_detach_member(self, balancer, member): + """ + Detach member from balancer + + @return: C{bool} True if member detach was successful, otherwise False + + """ + + raise NotImplementedError( + 'balancer_detach_member not implemented for this driver') + + def balancer_list_members(self, balancer): + """ + Return list of members attached to balancer + + @return: C{list} of L{Member}s + + """ + + raise NotImplementedError( + 'balancer_list_members not implemented for this driver') + + def _value_to_algorithm(self, value): + """ + Return C{LBAlgorithm} based on the value. + """ + try: + return self._VALUE_TO_ALGORITHM_MAP[value] + except KeyError: + raise LibcloudError(value='Invalid value: %s' % (value), + driver=self) + + def _algorithm_to_value(self, algorithm): + """ + Return value based in the algorithm (C{LBAlgorithm}). + """ + try: + return self._ALGORITHM_TO_VALUE_MAP[algorithm] + except KeyError: + raise LibcloudError(value='Invalid algorithm: %s' % (algorithm), + driver=self) diff --git a/trunk/libcloud/loadbalancer/drivers/__init__.py b/trunk/libcloud/loadbalancer/drivers/__init__.py new file mode 100644 index 0000000000..f4fdb8666a --- /dev/null +++ b/trunk/libcloud/loadbalancer/drivers/__init__.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'rackspace', + 'gogrid' +] diff --git a/trunk/libcloud/loadbalancer/drivers/cloudstack.py b/trunk/libcloud/loadbalancer/drivers/cloudstack.py new file mode 100644 index 0000000000..12fb10086d --- /dev/null +++ b/trunk/libcloud/loadbalancer/drivers/cloudstack.py @@ -0,0 +1,123 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.cloudstack import CloudStackConnection, \ + CloudStackDriverMixIn +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import State, LibcloudLBImmutableError +from libcloud.utils import reverse_dict + +class CloudStackLBDriver(CloudStackDriverMixIn, Driver): + """Driver for CloudStack load balancers.""" + + api_name = 'cloudstack_lb' + + _VALUE_TO_ALGORITHM_MAP = { + 'roundrobin': Algorithm.ROUND_ROBIN, + 'leastconn': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + LB_STATE_MAP = { + 'Active': State.RUNNING, + } + + def list_protocols(self): + """We don't actually have any protocol awareness beyond TCP.""" + return [ 'tcp' ] + + def list_balancers(self): + balancers = self._sync_request('listLoadBalancerRules') + balancers = balancers.get('loadbalancerrule', []) + return [self._to_balancer(balancer) for balancer in balancers] + + def get_balancer(self, balancer_id): + balancer = self._sync_request('listLoadBalancerRules', id=balancer_id) + balancer = balancer.get('loadbalancerrule', []) + if not balancer: + raise Exception("no such load balancer: " + str(balancer_id)) + return self._to_balancer(balancer[0]) + + def create_balancer(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM, location=None, + private_port=None): + if location is None: + locations = self._sync_request('listZones') + location = locations['zone'][0]['id'] + else: + location = location.id + if private_port is None: + private_port = port + + result = self._async_request('associateIpAddress', zoneid=location) + public_ip = result['ipaddress'] + + result = self._sync_request('createLoadBalancerRule', + algorithm=self._ALGORITHM_TO_VALUE_MAP[algorithm], + name=name, + privateport=private_port, + publicport=port, + publicipid=public_ip['id'], + ) + + balancer = self._to_balancer(result['loadbalancer']) + + for member in members: + balancer.attach_member(member) + + return balancer + + def destroy_balancer(self, balancer): + self._async_request('deleteLoadBalancerRule', id=balancer.id) + self._async_request('disassociateIpAddress', + id=balancer.ex_public_ip_id) + + def balancer_attach_member(self, balancer, member): + member.port = balancer.ex_private_port + self._async_request('assignToLoadBalancerRule', id=balancer.id, + virtualmachineids=member.id) + return True + + def balancer_detach_member(self, balancer, member): + self._async_request('removeFromLoadBalancerRule', id=balancer.id, + virtualmachineids=member.id) + return True + + def balancer_list_members(self, balancer): + members = self._sync_request('listLoadBalancerRuleInstances', + id=balancer.id) + members = members['loadbalancerruleinstance'] + return [self._to_member(m, balancer.ex_private_port) for m in members] + + def _to_balancer(self, obj): + balancer = LoadBalancer( + id=obj['id'], + name=obj['name'], + state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN), + ip=obj['publicip'], + port=obj['publicport'], + driver=self.connection.driver + ) + balancer.ex_private_port = obj['privateport'] + balancer.ex_public_ip_id = obj['publicipid'] + return balancer + + def _to_member(self, obj, port): + return Member( + id=obj['id'], + ip=obj['nic'][0]['ipaddress'], + port=port + ) diff --git a/trunk/libcloud/loadbalancer/drivers/gogrid.py b/trunk/libcloud/loadbalancer/drivers/gogrid.py new file mode 100644 index 0000000000..1882e9fb42 --- /dev/null +++ b/trunk/libcloud/loadbalancer/drivers/gogrid.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import LibcloudError +from libcloud.utils import reverse_dict +from libcloud.common.gogrid import GoGridConnection, GoGridResponse, BaseGoGridDriver +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import State, LibcloudLBImmutableError + +class GoGridLBResponse(GoGridResponse): + def success(self): + if self.status == httplib.INTERNAL_SERVER_ERROR: + # Hack, but at least this error message is more useful than + # "unexpected server error" + body = json.loads(self.body) + if body['method'] == '/grid/loadbalancer/add' and \ + len(body['list']) >= 1 and \ + body['list'][0]['message'].find('unexpected server error') != -1: + raise LibcloudError(value='You mostly likely tried to add a ' + + 'member with an IP address not assigned ' + + 'to your account', driver=self) + return super(GoGridLBResponse, self).success() + +class GoGridLBConnection(GoGridConnection): + """ + Connection class for the GoGrid load-balancer driver. + """ + responseCls = GoGridLBResponse + +class GoGridLBDriver(BaseGoGridDriver, Driver): + connectionCls = GoGridLBConnection + api_name = 'gogrid_lb' + name = 'GoGrid LB' + + LB_STATE_MAP = { 'On': State.RUNNING, + 'Unknown': State.UNKNOWN } + _VALUE_TO_ALGORITHM_MAP = { + 'round robin': Algorithm.ROUND_ROBIN, + 'least connect': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + def list_protocols(self): + # GoGrid only supports http + return [ 'http' ] + + def list_balancers(self): + return self._to_balancers( + self.connection.request('/api/grid/loadbalancer/list').object) + + def ex_create_balancer_nowait(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM): + algorithm = self._algorithm_to_value(algorithm) + + params = {'name': name, + 'loadbalancer.type': algorithm, + 'virtualip.ip': self._get_first_ip(), + 'virtualip.port': port} + params.update(self._members_to_params(members)) + + resp = self.connection.request('/api/grid/loadbalancer/add', + method='GET', + params=params) + return self._to_balancers(resp.object)[0] + + def create_balancer(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM): + balancer = self.ex_create_balancer_nowait(name, members, protocol, + port, algorithm) + + timeout = 60 * 20 + waittime = 0 + interval = 2 * 15 + + if balancer.id is not None: + return balancer + else: + while waittime < timeout: + balancers = self.list_balancers() + + for i in balancers: + if i.name == balancer.name and i.id is not None: + return i + + waittime += interval + time.sleep(interval) + + raise Exception('Failed to get id') + + def destroy_balancer(self, balancer): + try: + resp = self.connection.request('/api/grid/loadbalancer/delete', + method='POST', params={'id': balancer.id}) + except Exception, err: + if "Update request for LoadBalancer" in str(err): + raise LibcloudLBImmutableError("Cannot delete immutable object", + GoGridLBDriver) + else: + raise + + return resp.status == 200 + + def get_balancer(self, **kwargs): + params = {} + + try: + params['name'] = kwargs['ex_balancer_name'] + except KeyError: + balancer_id = kwargs['balancer_id'] + params['id'] = balancer_id + + resp = self.connection.request('/api/grid/loadbalancer/get', + params=params) + + return self._to_balancers(resp.object)[0] + + def balancer_attach_member(self, balancer, member): + members = self.balancer_list_members(balancer) + members.append(member) + + params = {"id": balancer.id} + + params.update(self._members_to_params(members)) + + resp = self._update_balancer(params) + + return [ m for m in + self._to_members(resp.object["list"][0]["realiplist"]) + if m.ip == member.ip ][0] + + def balancer_detach_member(self, balancer, member): + members = self.balancer_list_members(balancer) + + remaining_members = [n for n in members if n.id != member.id] + + params = {"id": balancer.id} + params.update(self._members_to_params(remaining_members)) + + resp = self._update_balancer(params) + + return resp.status == 200 + + def balancer_list_members(self, balancer): + resp = self.connection.request('/api/grid/loadbalancer/get', + params={'id': balancer.id}) + return self._to_members(resp.object["list"][0]["realiplist"]) + + def _update_balancer(self, params): + try: + return self.connection.request('/api/grid/loadbalancer/edit', + method='POST', + params=params) + except Exception, err: + if "Update already pending" in str(err): + raise LibcloudLBImmutableError("Balancer is immutable", GoGridLBDriver) + + raise LibcloudError(value='Exception: %s' % str(err), driver=self) + + def _members_to_params(self, members): + """ + Helper method to convert list of L{Member} objects + to GET params. + + """ + + params = {} + + i = 0 + for member in members: + params["realiplist.%s.ip" % i] = member.ip + params["realiplist.%s.port" % i] = member.port + i += 1 + + return params + + def _to_balancers(self, object): + return [ self._to_balancer(el) for el in object["list"] ] + + def _to_balancer(self, el): + lb = LoadBalancer(id=el.get("id"), + name=el["name"], + state=self.LB_STATE_MAP.get( + el["state"]["name"], State.UNKNOWN), + ip=el["virtualip"]["ip"]["ip"], + port=el["virtualip"]["port"], + driver=self.connection.driver) + return lb + + def _to_members(self, object): + return [ self._to_member(el) for el in object ] + + def _to_member(self, el): + member = Member(id=el["ip"]["id"], + ip=el["ip"]["ip"], + port=el["port"]) + return member diff --git a/trunk/libcloud/loadbalancer/drivers/ninefold.py b/trunk/libcloud/loadbalancer/drivers/ninefold.py new file mode 100644 index 0000000000..d9d434ea3a --- /dev/null +++ b/trunk/libcloud/loadbalancer/drivers/ninefold.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.loadbalancer.providers import Provider + +from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver + +class NinefoldLBDriver(CloudStackLBDriver): + "Driver for load balancers on Ninefold's Compute platform." + + host = 'api.ninefold.com' + path = '/compute/v1.0/' + + type = Provider.NINEFOLD + name = 'Ninefold LB' diff --git a/trunk/libcloud/loadbalancer/drivers/rackspace.py b/trunk/libcloud/loadbalancer/drivers/rackspace.py new file mode 100644 index 0000000000..5900e08793 --- /dev/null +++ b/trunk/libcloud/loadbalancer/drivers/rackspace.py @@ -0,0 +1,186 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils import reverse_dict +from libcloud.common.base import JsonResponse +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import State +from libcloud.common.openstack import OpenStackBaseConnection +from libcloud.common.rackspace import ( + AUTH_URL_US, AUTH_URL_UK) + +class RackspaceResponse(JsonResponse): + + def parse_body(self): + if not self.body: + return None + return super(RackspaceResponse, self).parse_body() + + def success(self): + return 200 <= int(self.status) <= 299 + + +class RackspaceConnection(OpenStackBaseConnection): + responseCls = RackspaceResponse + auth_url = AUTH_URL_US + _url_key = "lb_url" + + def __init__(self, user_id, key, secure=True): + super(RackspaceConnection, self).__init__(user_id, key, secure) + self.api_version = 'v1.0' + self.accept_format = 'application/json' + + def request(self, action, params=None, data='', headers=None, method='GET'): + if not headers: + headers = {} + if not params: + params = {} + + if method in ('POST', 'PUT'): + headers['Content-Type'] = 'application/json' + if method == 'GET': + params['cache-busing'] = os.urandom(8).encode('hex') + + return super(RackspaceConnection, self).request(action=action, + params=params, data=data, method=method, headers=headers) + + +class RackspaceUKConnection(RackspaceConnection): + auth_url = AUTH_URL_UK + + +class RackspaceLBDriver(Driver): + connectionCls = RackspaceConnection + api_name = 'rackspace_lb' + name = 'Rackspace LB' + + LB_STATE_MAP = { 'ACTIVE': State.RUNNING, + 'BUILD': State.PENDING } + _VALUE_TO_ALGORITHM_MAP = { + 'RANDOM': Algorithm.RANDOM, + 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, + 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + def list_protocols(self): + return self._to_protocols( + self.connection.request('/loadbalancers/protocols').object) + + def list_balancers(self): + return self._to_balancers( + self.connection.request('/loadbalancers').object) + + def create_balancer(self, name, members, protocol='http', + port=80, algorithm=DEFAULT_ALGORITHM): + algorithm = self._algorithm_to_value(algorithm) + + balancer_object = {"loadBalancer": + {"name": name, + "port": port, + "algorithm": algorithm, + "protocol": protocol.upper(), + "virtualIps": [{"type": "PUBLIC"}], + "nodes": [{"address": member.ip, + "port": member.port, + "condition": "ENABLED"} for member in members], + } + } + + resp = self.connection.request('/loadbalancers', + method='POST', + data=json.dumps(balancer_object)) + return self._to_balancer(resp.object["loadBalancer"]) + + def destroy_balancer(self, balancer): + uri = '/loadbalancers/%s' % (balancer.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == 202 + + def get_balancer(self, balancer_id): + uri = '/loadbalancers/%s' % (balancer_id) + resp = self.connection.request(uri) + + return self._to_balancer(resp.object["loadBalancer"]) + + def balancer_attach_member(self, balancer, member): + ip = member.ip + port = member.port + + member_object = {"nodes": + [{"port": port, + "address": ip, + "condition": "ENABLED"}] + } + + uri = '/loadbalancers/%s/nodes' % (balancer.id) + resp = self.connection.request(uri, method='POST', + data=json.dumps(member_object)) + return self._to_members(resp.object)[0] + + def balancer_detach_member(self, balancer, member): + # Loadbalancer always needs to have at least 1 member. + # Last member cannot be detached. You can only disable it or destroy the + # balancer. + uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == 202 + + def balancer_list_members(self, balancer): + uri = '/loadbalancers/%s/nodes' % (balancer.id) + return self._to_members( + self.connection.request(uri).object) + + def _to_protocols(self, object): + protocols = [] + for item in object["protocols"]: + protocols.append(item['name'].lower()) + return protocols + + def _to_balancers(self, object): + return [ self._to_balancer(el) for el in object["loadBalancers"] ] + + def _to_balancer(self, el): + lb = LoadBalancer(id=el["id"], + name=el["name"], + state=self.LB_STATE_MAP.get( + el["status"], State.UNKNOWN), + ip=el["virtualIps"][0]["address"], + port=el["port"], + driver=self.connection.driver) + return lb + + def _to_members(self, object): + return [ self._to_member(el) for el in object["nodes"] ] + + def _to_member(self, el): + lbmember = Member(id=el["id"], + ip=el["address"], + port=el["port"]) + return lbmember + + +class RackspaceUKLBDriver(RackspaceLBDriver): + connectionCls = RackspaceUKConnection diff --git a/trunk/libcloud/loadbalancer/providers.py b/trunk/libcloud/loadbalancer/providers.py new file mode 100644 index 0000000000..7f063f6997 --- /dev/null +++ b/trunk/libcloud/loadbalancer/providers.py @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import get_driver as get_provider_driver +from libcloud.loadbalancer.types import Provider + +__all__ = [ + "Provider", + "DRIVERS", + "get_driver", + ] + +DRIVERS = { + Provider.RACKSPACE_US: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), + Provider.RACKSPACE_UK: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceUKLBDriver'), + Provider.GOGRID: + ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'), + Provider.NINEFOLD: + ('libcloud.loadbalancer.drivers.ninefold', 'NinefoldLBDriver'), +} + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) diff --git a/trunk/libcloud/loadbalancer/types.py b/trunk/libcloud/loadbalancer/types.py new file mode 100644 index 0000000000..a0bc20df3b --- /dev/null +++ b/trunk/libcloud/loadbalancer/types.py @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "Provider", + "State", + "LibcloudLBError", + "LibcloudLBImmutableError", + ] + +from libcloud.common.types import LibcloudError + + +class LibcloudLBError(LibcloudError): + pass + + +class LibcloudLBImmutableError(LibcloudLBError): + pass + + +class Provider(object): + RACKSPACE_US = 0 + GOGRID = 1 + NINEFOLD = 2 + RACKSPACE_UK = 3 + + +class State(object): + """ + Standard states for a loadbalancer + + @cvar RUNNING: loadbalancer is running and ready to use + @cvar UNKNOWN: loabalancer state is unknown + """ + + RUNNING = 0 + PENDING = 1 + UNKNOWN = 2 diff --git a/trunk/libcloud/pricing.py b/trunk/libcloud/pricing.py new file mode 100644 index 0000000000..a47157d460 --- /dev/null +++ b/trunk/libcloud/pricing.py @@ -0,0 +1,139 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import with_statement +""" +A class which handles loading the pricing files. +""" + +try: + import simplejson as json +except ImportError: + import json + +import os.path +from os.path import join as pjoin + +PRICING_FILE_PATH = 'data/pricing.json' + +PRICING_DATA = {} + +VALID_PRICING_DRIVER_TYPES = [ 'compute', 'storage' ] + +def clear_pricing_data(): + PRICING_DATA.clear() + PRICING_DATA.update({ + 'compute': {}, + 'storage': {}, + }) +clear_pricing_data() + +def get_pricing_file_path(file_path=None): + pricing_directory = os.path.dirname(os.path.abspath(__file__)) + pricing_file_path = pjoin(pricing_directory, PRICING_FILE_PATH) + + return pricing_file_path + +def get_pricing(driver_type, driver_name, pricing_file_path=None): + """ + Return pricing for the provided driver. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @rtype: C{dict} + @return: Dictionary with pricing where a key name is size ID and + the value is a price. + """ + if not driver_type in VALID_PRICING_DRIVER_TYPES: + raise AttributeError('Invalid driver type: %s', driver_type) + + if driver_name in PRICING_DATA[driver_type]: + return PRICING_DATA[driver_type][driver_name] + + if not pricing_file_path: + pricing_file_path = get_pricing_file_path(file_path=pricing_file_path) + + with open(pricing_file_path) as fp: + content = fp.read() + + pricing_data = json.loads(content) + size_pricing = pricing_data[driver_type][driver_name] + + for driver_type in VALID_PRICING_DRIVER_TYPES: + pricing = pricing_data.get(driver_type, None) + if pricing: + PRICING_DATA[driver_type] = pricing + + return size_pricing + +def set_pricing(driver_type, driver_name, pricing): + """ + Populate the driver pricing dictionary. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @type pricing: C{dict} + @param pricing: Dictionary where a key is a size ID and a value is a price. + """ + + PRICING_DATA[driver_type][driver_name] = pricing + +def get_size_price(driver_type, driver_name, size_id): + """ + Return price for the provided size. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @type size_id: C{int/str} + @param size_id: Unique size ID (can be an integer or a string - depends on + the driver) + + @rtype: C{int} + @return: Size price. + """ + pricing = get_pricing(driver_type=driver_type, driver_name=driver_name) + price = float(pricing[size_id]) + return price + +def invalidate_pricing_cache(): + """ + Invalidate the cache for all the drivers. + """ + PRICING_DATA['compute'] = {} + PRICING_DATA['storage'] = {} + +def invalidate_module_pricing_cache(driver_type, driver_name): + """ + Invalidate the cache for the specified driver. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + """ + if driver_name in PRICING_DATA[driver_type]: + del PRICING_DATA[driver_type][driver_name] diff --git a/trunk/libcloud/providers.py b/trunk/libcloud/providers.py new file mode 100644 index 0000000000..e27a31fdf7 --- /dev/null +++ b/trunk/libcloud/providers.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.providers import ( + DRIVERS, + Provider, + get_driver, + ) +__all__ = [ + "DRIVERS", + "Provider", + "get_driver", + ] +deprecated_warning(__name__) diff --git a/trunk/libcloud/security.py b/trunk/libcloud/security.py new file mode 100644 index 0000000000..c6dacf327a --- /dev/null +++ b/trunk/libcloud/security.py @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Security (SSL) Settings + +Usage: + import libcloud.security + libcloud.security.VERIFY_SSL_CERT = True + + # optional + libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt") +""" + +VERIFY_SSL_CERT = True +VERIFY_SSL_CERT_STRICT = True + +# File containing one or more PEM-encoded CA certificates +# concatenated together +CA_CERTS_PATH = [ + # centos/fedora: openssl + '/etc/pki/tls/certs/ca-bundle.crt', + + # debian/ubuntu/arch/gentoo: ca-certificates + '/etc/ssl/certs/ca-certificates.crt', + + # freebsd: ca_root_nss + '/usr/local/share/certs/ca-root-nss.crt', + + # macports: curl-ca-bundle + '/opt/local/share/curl/curl-ca-bundle.crt', +] + +CA_CERTS_UNAVAILABLE_WARNING_MSG = ( + 'Warning: No CA Certificates were found in CA_CERTS_PATH. ' + 'Toggling VERIFY_SSL_CERT to False.' +) + +CA_CERTS_UNAVAILABLE_ERROR_MSG = ( + 'No CA Certificates were found in CA_CERTS_PATH. ' +) + +VERIFY_SSL_DISABLED_MSG = ( + 'SSL certificate verification is disabled, this can pose a ' + 'security risk. For more information how to enable the SSL ' + 'certificate verification, please visit the libcloud ' + 'documentation.' +) diff --git a/trunk/libcloud/ssh.py b/trunk/libcloud/ssh.py new file mode 100644 index 0000000000..8901d7cab4 --- /dev/null +++ b/trunk/libcloud/ssh.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.ssh import ( + BaseSSHClient, + ParamikoSSHClient, + ShellOutSSHClient, + SSHClient, + have_paramiko) + +__all__ = [ + "BaseSSHClient", + "ParamikoSSHClient", + "ShellOutSSHClient", + "SSHClient", + "have_paramiko"] +deprecated_warning(__name__) diff --git a/trunk/libcloud/storage/__init__.py b/trunk/libcloud/storage/__init__.py new file mode 100644 index 0000000000..f73ddf07a7 --- /dev/null +++ b/trunk/libcloud/storage/__init__.py @@ -0,0 +1,3 @@ +""" +Module for working with Storage +""" diff --git a/trunk/libcloud/storage/base.py b/trunk/libcloud/storage/base.py new file mode 100644 index 0000000000..0bacff4ff7 --- /dev/null +++ b/trunk/libcloud/storage/base.py @@ -0,0 +1,729 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides base classes for working with storage +""" + +# Backward compatibility for Python 2.5 +from __future__ import with_statement + +import httplib +import os.path # pylint: disable-msg=W0404 +import hashlib +from os.path import join as pjoin + +from libcloud import utils +from libcloud.common.types import LibcloudError +from libcloud.common.base import ConnectionUserAndKey, BaseDriver +from libcloud.storage.types import ObjectDoesNotExistError + +CHUNK_SIZE = 8096 + +class Object(object): + """ + Represents an object (BLOB). + """ + + def __init__(self, name, size, hash, extra, meta_data, container, + driver): + """ + @type name: C{str} + @param name: Object name (must be unique per container). + + @type size: C{int} + @param size: Object size in bytes. + + @type hash: C{string} + @param hash Object hash. + + @type container: C{Container} + @param container: Object container. + + @type extra: C{dict} + @param extra: Extra attributes. + + @type meta_data: C{dict} + @param meta_data: Optional object meta data. + + @type driver: C{StorageDriver} + @param driver: StorageDriver instance. + """ + + self.name = name + self.size = size + self.hash = hash + self.container = container + self.extra = extra or {} + self.meta_data = meta_data or {} + self.driver = driver + + def get_cdn_url(self): + return self.driver.get_object_cdn_url(obj=self) + + def enable_cdn(self): + return self.driver.enable_object_cdn(obj=self) + + def download(self, destination_path, overwrite_existing=False, + delete_on_failure=True): + return self.driver.download_object(self, destination_path, + overwrite_existing, + delete_on_failure) + + def as_stream(self, chunk_size=None): + return self.driver.download_object_as_stream(self, chunk_size) + + def delete(self): + return self.driver.delete_object(self) + + def __repr__(self): + return ('' % + (self.name, self.size, self.hash, self.driver.name)) + +class Container(object): + """ + Represents a container (bucket) which can hold multiple objects. + """ + + def __init__(self, name, extra, driver): + """ + @type name: C{str} + @param name: Container name (must be unique). + + @type extra: C{dict} + @param extra: Extra attributes. + + @type driver: C{StorageDriver} + @param driver: StorageDriver instance. + """ + + self.name = name + self.extra = extra or {} + self.driver = driver + + def list_objects(self): + return self.driver.list_container_objects(container=self) + + def get_cdn_url(self): + return self.driver.get_container_cdn_url(container=self) + + def enable_cdn(self): + return self.driver.enable_container_cdn(container=self) + + def get_object(self, object_name): + return self.driver.get_object(container_name=self.name, + object_name=object_name) + + def upload_object(self, file_path, object_name, extra=None): + return self.driver.upload_object( + file_path, self, object_name, extra) + + def upload_object_via_stream(self, iterator, object_name, extra=None): + return self.driver.upload_object_via_stream( + iterator, self, object_name, extra) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + return self.driver.download_object(obj, destination_path) + + def download_object_as_stream(self, obj, chunk_size=None): + return self.driver.download_object_as_stream(obj, chunk_size) + + def delete_object(self, obj): + return self.driver.delete_object(obj) + + def delete(self): + return self.driver.delete_container(self) + + def __repr__(self): + return ('' + % (self.name, self.driver.name)) + +class StorageDriver(BaseDriver): + """ + A base StorageDriver to derive from. + """ + + connectionCls = ConnectionUserAndKey + name = None + hash_type = 'md5' + supports_chunked_encoding = False + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + super(StorageDriver, self).__init__(key=key, secret=secret, secure=secure, + host=host, port=port) + + def list_containters(self): + """ + Return a list of containers. + + @return: A list of Container instances. + """ + raise NotImplementedError( + 'list_containers not implemented for this driver') + + def list_container_objects(self, container): + """ + Return a list of objects for the given container. + + @type container: C{Container} + @param container: Container instance + + @return: A list of Object instances. + """ + raise NotImplementedError( + 'list_objects not implemented for this driver') + + def get_container(self, container_name): + """ + Return a container instance. + + @type container_name: C{str} + @param container_name: Container name. + + @return: C{Container} instance. + """ + raise NotImplementedError( + 'get_object not implemented for this driver') + + def get_container_cdn_url(self, container): + """ + Return a container CDN URL. + + @type container: C{Container} + @param container: Container instance + + @return: A CDN URL for this container. + """ + raise NotImplementedError( + 'get_container_cdn_url not implemented for this driver') + + def get_object(self, container_name, object_name): + """ + Return an object instance. + + @type container_name: C{str} + @param container_name: Container name. + + @type object_name: C{str} + @param object_name: Object name. + + @return: C{Object} instance. + """ + raise NotImplementedError( + 'get_object not implemented for this driver') + + def get_object_cdn_url(self, obj): + """ + Return a container CDN URL. + + @type obj: C{Object} + @param obj: Object instance + + @return: A CDN URL for this object. + """ + raise NotImplementedError( + 'get_object_cdn_url not implemented for this driver') + + def enable_container_cdn(self, container): + raise NotImplementedError( + 'enable_container_cdn not implemented for this driver') + + def enable_object_cdn(self, obj): + raise NotImplementedError( + 'enable_object_cdn not implemented for this driver') + + def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): + """ + Download an object to the specified destination path. + + @type obj: C{Object} + @param obj: Object instance. + + @type destination_path: C{str} + @param destination_path: Full path to a file or a directory where the + incoming file will be saved. + + @type overwrite_existing: C{bool} + @param overwrite_existing: True to overwrite an existing file, defaults to False. + + @type delete_on_failure: C{bool} + @param delete_on_failure: True to delete a partially downloaded file if + the download was not successful (hash mismatch / file size). + + @rtype: C{bool} + @return: True if an object has been successfully downloaded, False + otherwise. + """ + raise NotImplementedError( + 'download_object not implemented for this driver') + + def download_object_as_stream(self, obj, chunk_size=None): + """ + Return a generator which yields object data. + + @type obj: C{Object} + @param obj: Object instance + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (in bytes). + """ + raise NotImplementedError( + 'download_object_as_stream not implemented for this driver') + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + """ + Upload an object currently located on a disk. + + @type file_path: C{str} + @param file_path: Path to the object on disk. + + @type container: C{Container} + @param container: Destination container. + + @type object_name: C{str} + @param object_name: Object name. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'upload_object not implemented for this driver') + + def upload_object_via_stream(self, iterator, container, + object_name, + extra=None): + """ + Upload an object using an iterator. + + If a provider supports it, chunked transfer encoding is used and you + don't need to know in advance the amount of data to be uploaded. + + Otherwise if a provider doesn't support it, iterator will be exhausted + so a total size for data to be uploaded can be determined. + + Note: Exhausting the iterator means that the whole data must be buffered + in memory which might result in memory exhausting when uploading a very + large object. + + If a file is located on a disk you are advised to use upload_object + function which uses fs.stat function to determine the file size and it + doesn't need to buffer whole object in the memory. + + @type iterator: C{object} + @param iterator: An object which implements the iterator interface. + + @type container: C{Container} + @param container: Destination container. + + @type object_name: C{str} + @param object_name: Object name. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + + Note: This dictionary must contain a 'content_type' key which represents + a content type of the stored object. + """ + raise NotImplementedError( + 'upload_object_via_stream not implemented for this driver') + + def delete_object(self, obj): + """ + Delete an object. + + @type obj: C{Object} + @param obj: Object instance. + + @return: C{bool} True on success. + """ + raise NotImplementedError( + 'delete_object not implemented for this driver') + + def create_container(self, container_name): + """ + Create a new container. + + @type container_name: C{str} + @param container_name: Container name. + + @return: C{Container} instance on success. + """ + raise NotImplementedError( + 'create_container not implemented for this driver') + + def delete_container(self, container): + """ + Delete a container. + + @type container: C{Container} + @param container: Container instance + + @rtype: C{bool} + @return: True on success, False otherwise. + """ + raise NotImplementedError( + 'delete_container not implemented for this driver') + + def _get_object(self, obj, callback, callback_kwargs, response, + success_status_code=None): + """ + Call passed callback and start transfer of the object' + + @type obj: C{Object} + @param obj: Object instance. + + @type callback: C{Function} + @param callback: Function which is called with the passed callback_kwargs + + @type callback_kwargs: C{dict} + @param callback_kwargs: Keyword arguments which are passed to the callback. + + @typed response: C{Response} + @param response: Response instance. + + @type success_status_code: C{int} + @param success_status_code: Status code which represents a successful + transfer (defaults to httplib.OK) + + @rtype: C{bool} + @return: True on success, False otherwise. + """ + success_status_code = success_status_code or httplib.OK + + if response.status == success_status_code: + return callback(**callback_kwargs) + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(object_name=obj.name, + value='', driver=self) + + raise LibcloudError(value='Unexpected status code: %s' % + (response.status), + driver=self) + + def _save_object(self, response, obj, destination_path, + overwrite_existing=False, delete_on_failure=True, + chunk_size=None): + """ + Save object to the provided path. + + @type response: C{RawResponse} + @param response: RawResponse instance. + + @type obj: C{Object} + @param obj: Object instance. + + @type destination_path: C{Str} + @param destination_path: Destination directory. + + @type delete_on_failure: C{bool} + @param delete_on_failure: True to delete partially downloaded object if + the download fails. + @type overwrite_existing: C{bool} + @param overwrite_existing: True to overwrite a local path if it already + exists. + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to L{libcloud.storage.base.CHUNK_SIZE}, 8kb) + + @rtype: C{bool} + @return: True on success, False otherwise. + """ + + chunk_size = chunk_size or CHUNK_SIZE + + base_name = os.path.basename(destination_path) + + if not base_name and not os.path.exists(destination_path): + raise LibcloudError( + value='Path %s does not exist' % (destination_path), + driver=self) + + if not base_name: + file_path = pjoin(destination_path, obj.name) + else: + file_path = destination_path + + if os.path.exists(file_path) and not overwrite_existing: + raise LibcloudError( + value='File %s already exists, but ' % (file_path) + + 'overwrite_existing=False', + driver=self) + + stream = utils.read_in_chunks(response, chunk_size) + + try: + data_read = stream.next() + except StopIteration: + # Empty response? + return False + + bytes_transferred = 0 + + with open(file_path, 'wb') as file_handle: + while len(data_read) > 0: + file_handle.write(data_read) + bytes_transferred += len(data_read) + + try: + data_read = stream.next() + except StopIteration: + data_read = '' + + if int(obj.size) != int(bytes_transferred): + # Transfer failed, support retry? + if delete_on_failure: + try: + os.unlink(file_path) + except Exception: + pass + + return False + + return True + + def _upload_object(self, object_name, content_type, upload_func, + upload_func_kwargs, request_path, request_method='PUT', + headers=None, file_path=None, iterator=None): + """ + Helper function for setting common request headers and calling the + passed in callback which uploads an object. + """ + headers = headers or {} + + if file_path and not os.path.exists(file_path): + raise OSError('File %s does not exist' % (file_path)) + + if iterator is not None and not hasattr(iterator, 'next'): + raise AttributeError('iterator object must implement next() ' + + 'method.') + + if not content_type: + if file_path: + name = file_path + else: + name = object_name + content_type, _ = utils.guess_file_mime_type(name) + + if not content_type: + raise AttributeError( + 'File content-type could not be guessed and' + + ' no content_type value provided') + + file_size = None + + if iterator: + if self.supports_chunked_encoding: + headers['Transfer-Encoding'] = 'chunked' + upload_func_kwargs['chunked'] = True + else: + # Chunked transfer encoding is not supported. Need to buffer all + # the data in memory so we can determine file size. + iterator = utils.read_in_chunks(iterator=iterator) + data = utils.exhaust_iterator(iterator=iterator) + + file_size = len(data) + upload_func_kwargs['data'] = data + else: + file_size = os.path.getsize(file_path) + upload_func_kwargs['chunked'] = False + + if file_size: + headers['Content-Length'] = file_size + + headers['Content-Type'] = content_type + response = self.connection.request(request_path, + method=request_method, data=None, + headers=headers, raw=True) + + upload_func_kwargs['response'] = response + success, data_hash, bytes_transferred = upload_func(**upload_func_kwargs) + + if not success: + raise LibcloudError(value='Object upload failed, Perhaps a timeout?', + driver=self) + + result_dict = { 'response': response, 'data_hash': data_hash, + 'bytes_transferred': bytes_transferred } + return result_dict + + def _upload_data(self, response, data, calculate_hash=True): + """ + Upload data stored in a string. + + @type response: C{RawResponse} + @param response: RawResponse object. + + @type data: C{str} + @param data: Data to upload. + + @type calculate_hash: C{boolean} + @param calculate_hash: True to calculate hash of the transfered data. + (defauls to True). + + @rtype: C{tuple} + @return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + bytes_transferred = 0 + data_hash = None + + if calculate_hash: + data_hash = self._get_hash_function() + data_hash.update(data) + + try: + response.connection.connection.send(data) + except Exception: + # TODO: let this exception propagate + # Timeout, etc. + return False, None, bytes_transferred + + bytes_transferred = len(data) + + if calculate_hash: + data_hash = data_hash.hexdigest() + + return True, data_hash, bytes_transferred + + def _stream_data(self, response, iterator, chunked=False, + calculate_hash=True, chunk_size=None): + """ + Stream a data over an http connection. + + @type response: C{RawResponse} + @param response: RawResponse object. + + @type iterator: C{} + @param response: An object which implements an iterator interface + or a File like object with read method. + + @type chunked: C{boolean} + @param chunked: True if the chunked transfer encoding should be used + (defauls to False). + + @type calculate_hash: C{boolean} + @param calculate_hash: True to calculate hash of the transfered data. + (defauls to True). + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) + + @rtype: C{tuple} + @return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + + chunk_size = chunk_size or CHUNK_SIZE + + data_hash = None + if calculate_hash: + data_hash = self._get_hash_function() + + generator = utils.read_in_chunks(iterator, chunk_size) + + bytes_transferred = 0 + try: + chunk = generator.next() + except StopIteration: + # Special case when StopIteration is thrown on the first iteration - + # create a 0-byte long object + chunk = '' + if chunked: + response.connection.connection.send('%X\r\n' % + (len(chunk))) + response.connection.connection.send(chunk) + response.connection.connection.send('\r\n') + response.connection.connection.send('0\r\n\r\n') + else: + response.connection.connection.send(chunk) + return True, data_hash.hexdigest(), bytes_transferred + + while len(chunk) > 0: + try: + if chunked: + response.connection.connection.send('%X\r\n' % + (len(chunk))) + response.connection.connection.send(chunk) + response.connection.connection.send('\r\n') + else: + response.connection.connection.send(chunk) + except Exception: + # TODO: let this exception propagate + # Timeout, etc. + return False, None, bytes_transferred + + bytes_transferred += len(chunk) + if calculate_hash: + data_hash.update(chunk) + + try: + chunk = generator.next() + except StopIteration: + chunk = '' + + if chunked: + response.connection.connection.send('0\r\n\r\n') + + if calculate_hash: + data_hash = data_hash.hexdigest() + + return True, data_hash, bytes_transferred + + def _upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + """ + Upload a file to the server. + + @type response: C{RawResponse} + @param response: RawResponse object. + + @type file_path: C{str} + @param file_path: Path to a local file. + + @type iterator: C{} + @param response: An object which implements an iterator interface (File + object, etc.) + + @rtype: C{tuple} + @return: First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + with open (file_path, 'rb') as file_handle: + success, data_hash, bytes_transferred = ( + self._stream_data( + response=response, + iterator=iter(file_handle), + chunked=chunked, + calculate_hash=calculate_hash)) + + return success, data_hash, bytes_transferred + + def _get_hash_function(self): + """ + Return instantiated hash function for the hash type supported by + the provider. + """ + try: + func = getattr(hashlib, self.hash_type)() + except AttributeError: + raise RuntimeError('Invalid or unsupported hash type: %s' % + (self.hash_type)) + + return func diff --git a/trunk/libcloud/storage/drivers/__init__.py b/trunk/libcloud/storage/drivers/__init__.py new file mode 100644 index 0000000000..fe8b04f388 --- /dev/null +++ b/trunk/libcloud/storage/drivers/__init__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'dummy', + 'cloudfiles' +] diff --git a/trunk/libcloud/storage/drivers/atmos.py b/trunk/libcloud/storage/drivers/atmos.py new file mode 100644 index 0000000000..08ffc2c44a --- /dev/null +++ b/trunk/libcloud/storage/drivers/atmos.py @@ -0,0 +1,414 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import hashlib +import hmac +import httplib +import time +import urllib +import urlparse + +from xml.etree import ElementTree + +from libcloud import utils +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.common.types import LazyList + +from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE +from libcloud.storage.types import ContainerAlreadyExistsError, \ + ContainerDoesNotExistError, \ + ContainerIsNotEmptyError, \ + ObjectDoesNotExistError + +def collapse(s): + return ' '.join([x for x in s.split(' ') if x]) + +class AtmosError(Exception): + def __init__(self, code, message): + self.code = code + self.message = message + +class AtmosResponse(XmlResponse): + def success(self): + return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT, + httplib.PARTIAL_CONTENT) + + def parse_error(self): + tree = self.parse_body() + + if tree is None: + return None + + code = int(tree.find('Code').text) + message = tree.find('Message').text + raise AtmosError(code, message) + +class AtmosConnection(ConnectionUserAndKey): + responseCls = AtmosResponse + + def add_default_headers(self, headers): + headers['x-emc-uid'] = self.user_id + headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', + time.gmtime()) + headers['x-emc-date'] = headers['Date'] + + if 'Content-Type' not in headers: + headers['Content-Type'] = 'application/octet-stream' + if 'Accept' not in headers: + headers['Accept'] = '*/*' + + return headers + + def pre_connect_hook(self, params, headers): + headers['x-emc-signature'] = self._calculate_signature(params, headers) + + return params, headers + + def _calculate_signature(self, params, headers): + pathstring = self.action + if pathstring.startswith(self.driver.path): + pathstring = pathstring[len(self.driver.path):] + if params: + if type(params) is dict: + params = params.items() + pathstring += '?' + urllib.urlencode(params) + pathstring = pathstring.lower() + + xhdrs = [(k, v) for k, v in headers.items() if k.startswith('x-emc-')] + xhdrs.sort(key=lambda x: x[0]) + + signature = [ + self.method, + headers.get('Content-Type', ''), + headers.get('Range', ''), + headers.get('Date', ''), + pathstring, + ] + signature.extend([k + ':' + collapse(v) for k, v in xhdrs]) + signature = '\n'.join(signature) + key = base64.b64decode(self.key) + signature = hmac.new(key, signature, hashlib.sha1).digest() + return base64.b64encode(signature) + +class AtmosDriver(StorageDriver): + connectionCls = AtmosConnection + + host = None + path = None + api_name = 'atmos' + supports_chunked_encoding = True + + DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + host = host or self.host + super(AtmosDriver, self).__init__(key, secret, secure, host, port) + + def list_containers(self): + result = self.connection.request(self._namespace_path('')) + entries = self._list_objects(result.object, object_type='directory') + containers = [] + for entry in entries: + extra = { + 'object_id': entry['id'] + } + containers.append(Container(entry['name'], extra, self)) + return containers + + def get_container(self, container_name): + path = self._namespace_path(container_name + '/?metadata/system') + try: + result = self.connection.request(path) + except AtmosError, e: + if e.code != 1003: + raise + raise ContainerDoesNotExistError(e, self, container_name) + meta = self._emc_meta(result) + extra = { + 'object_id': meta['objectid'] + } + return Container(container_name, extra, self) + + def create_container(self, container_name): + path = self._namespace_path(container_name + '/') + try: + result = self.connection.request(path, method='POST') + except AtmosError, e: + if e.code != 1016: + raise + raise ContainerAlreadyExistsError(e, self, container_name) + return self.get_container(container_name) + + def delete_container(self, container): + try: + self.connection.request(self._namespace_path(container.name + '/'), + method='DELETE') + except AtmosError, e: + if e.code == 1003: + raise ContainerDoesNotExistError(e, self, container.name) + elif e.code == 1023: + raise ContainerIsNotEmptyError(e, self, container.name) + return True + + def get_object(self, container_name, object_name): + container = self.get_container(container_name) + path = container_name + '/' + object_name + path = self._namespace_path(path) + + try: + result = self.connection.request(path + '?metadata/system') + system_meta = self._emc_meta(result) + + result = self.connection.request(path + '?metadata/user') + user_meta = self._emc_meta(result) + except AtmosError, e: + if e.code != 1003: + raise + raise ObjectDoesNotExistError(e, self, object_name) + + last_modified = time.strptime(system_meta['mtime'], + '%Y-%m-%dT%H:%M:%SZ') + last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT', + last_modified) + extra = { + 'object_id': system_meta['objectid'], + 'last_modified': last_modified + } + data_hash = user_meta.pop('md5', '') + return Object(object_name, int(system_meta['size']), data_hash, extra, + user_meta, container, self) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + upload_func = self._upload_file + upload_func_kwargs = { 'file_path': file_path } + method = 'PUT' + + extra = extra or {} + request_path = container.name + '/' + object_name + request_path = self._namespace_path(request_path) + content_type = extra.get('content_type', None) + + try: + self.connection.request(request_path + '?metadata/system') + except AtmosError, e: + if e.code != 1003: + raise + method = 'POST' + + result_dict = self._upload_object(object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method=method, + headers={}, file_path=file_path) + + response = result_dict['response'].response + bytes_transferred = result_dict['bytes_transferred'] + + if extra is None: + meta_data = {} + else: + meta_data = extra.get('meta_data', {}) + meta_data['md5'] = result_dict['data_hash'] + user_meta = ', '.join([k + '=' + str(v) for k, v in meta_data.items()]) + self.connection.request(request_path + '?metadata/user', method='POST', + headers={'x-emc-meta': user_meta}) + result = self.connection.request(request_path + '?metadata/system') + meta = self._emc_meta(result) + del meta_data['md5'] + extra = { + 'object_id': meta['objectid'], + 'meta_data': meta_data, + } + + return Object(object_name, bytes_transferred, result_dict['data_hash'], + extra, meta_data, container, self) + + def upload_object_via_stream(self, iterator, container, object_name, + extra=None): + if isinstance(iterator, file): + iterator = iter(iterator) + + data_hash = hashlib.md5() + generator = utils.read_in_chunks(iterator, CHUNK_SIZE, True) + bytes_transferred = 0 + try: + chunk = generator.next() + except StopIteration: + chunk = '' + + path = self._namespace_path(container.name + '/' + object_name) + + while True: + end = bytes_transferred + len(chunk) - 1 + data_hash.update(chunk) + headers = { + 'x-emc-meta': 'md5=' + data_hash.hexdigest(), + } + if len(chunk) > 0: + headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end) + result = self.connection.request(path, method='PUT', data=chunk, + headers=headers) + bytes_transferred += len(chunk) + + try: + chunk = generator.next() + except StopIteration: + break + if len(chunk) == 0: + break + + data_hash = data_hash.hexdigest() + + if extra is None: + meta_data = {} + else: + meta_data = extra.get('meta_data', {}) + meta_data['md5'] = data_hash + user_meta = ', '.join([k + '=' + str(v) for k, v in meta_data.items()]) + self.connection.request(path + '?metadata/user', method='POST', + headers={'x-emc-meta': user_meta}) + + result = self.connection.request(path + '?metadata/system') + + meta = self._emc_meta(result) + extra = { + 'object_id': meta['objectid'], + 'meta_data': meta_data, + } + + return Object(object_name, bytes_transferred, data_hash, extra, + meta_data, container, self) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + path = self._namespace_path(obj.container.name + '/' + obj.name) + response = self.connection.request(path, method='GET', raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={ + 'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure + }, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + path = self._namespace_path(obj.container.name + '/' + obj.name) + response = self.connection.request(path, method='GET', raw=True) + + return self._get_object(obj=obj, callback=utils.read_in_chunks, + response=response, + callback_kwargs={ + 'iterator': response.response, + 'chunk_size': chunk_size + }, + success_status_code=httplib.OK) + + def delete_object(self, obj): + path = self._namespace_path(obj.container.name + '/' + obj.name) + try: + self.connection.request(path, method='DELETE') + except AtmosError, e: + if e.code != 1003: + raise + raise ObjectDoesNotExistError(e, self, obj.name) + return True + + def list_container_objects(self, container): + value_dict = {'container': container} + return LazyList(get_more=self._get_more, value_dict=value_dict) + + def enable_object_cdn(self, obj): + return True + + def get_object_cdn_url(self, obj, expiry=None, use_object=False): + if use_object: + path = '/rest/objects' + obj.meta_data['object_id'] + else: + path = '/rest/namespace/' + obj.container.name + '/' + obj.name + + if self.secure: + protocol = 'https' + else: + protocol = 'http' + + expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL) + params = [ + ('uid', self.key), + ('expires', expiry), + ] + params.append(('signature', self._cdn_signature(path, params))) + + params = urllib.urlencode(params) + path = self.path + path + return urlparse.urlunparse((protocol, self.host, path, '', params, '')) + + def _cdn_signature(self, path, params): + key = base64.b64decode(self.secret) + signature = '\n'.join(['GET', path.lower(), self.key, expiry]) + signature = hmac.new(key, signature, hashlib.sha1).digest() + + return base64.b64encode(signature) + + def _list_objects(self, tree, object_type=None): + listing = tree.find(self._emc_tag('DirectoryList')) + entries = [] + for entry in listing.findall(self._emc_tag('DirectoryEntry')): + file_type = entry.find(self._emc_tag('FileType')).text + if object_type is not None and object_type != file_type: + continue + entries.append({ + 'id': entry.find(self._emc_tag('ObjectID')).text, + 'type': file_type, + 'name': entry.find(self._emc_tag('Filename')).text + }) + return entries + + def _namespace_path(self, path): + return self.path + '/rest/namespace/' + path + + def _object_path(self, object_id): + return self.path + '/rest/objects/' + object_id + + @staticmethod + def _emc_tag(tag): + return '{http://www.emc.com/cos/}' + tag + + def _emc_meta(self, response): + meta = response.headers.get('x-emc-meta', '') + if len(meta) == 0: + return {} + meta = meta.split(', ') + return dict([x.split('=', 1) for x in meta]) + + def _get_more(self, last_key, value_dict): + container = value_dict['container'] + headers = {'x-emc-include-meta': '1'} + path = self._namespace_path(container.name + '/') + result = self.connection.request(path, headers=headers) + entries = self._list_objects(result.object, object_type='regular') + objects = [] + for entry in entries: + metadata = {'object_id': entry['id']} + objects.append(Object(entry['name'], 0, '', {}, metadata, container, + self)) + return objects, None, True diff --git a/trunk/libcloud/storage/drivers/cloudfiles.py b/trunk/libcloud/storage/drivers/cloudfiles.py new file mode 100644 index 0000000000..df857108bd --- /dev/null +++ b/trunk/libcloud/storage/drivers/cloudfiles.py @@ -0,0 +1,522 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import urllib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.utils import read_in_chunks +from libcloud.common.types import MalformedResponseError, LibcloudError +from libcloud.common.base import Response, RawResponse + +from libcloud.storage.providers import Provider +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.common.types import LazyList +from libcloud.common.openstack import OpenStackBaseConnection + +from libcloud.common.rackspace import ( + AUTH_URL_US, AUTH_URL_UK) + +CDN_HOST = 'cdn.clouddrive.com' +API_VERSION = 'v1.0' + + +class CloudFilesResponse(Response): + + valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT ] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_body(self): + if not self.body: + return None + + if 'content-type' in self.headers: + key = 'content-type' + elif 'Content-Type' in self.headers: + key = 'Content-Type' + else: + raise LibcloudError('Missing content-type header') + + content_type = self.headers[key] + if content_type.find(';') != -1: + content_type = content_type.split(';')[0] + + if content_type == 'application/json': + try: + data = json.loads(self.body) + except: + raise MalformedResponseError('Failed to parse JSON', + body=self.body, + driver=CloudFilesStorageDriver) + elif content_type == 'text/plain': + data = self.body + else: + data = self.body + + return data + +class CloudFilesRawResponse(CloudFilesResponse, RawResponse): + pass + +class CloudFilesConnection(OpenStackBaseConnection): + """ + Base connection class for the Cloudfiles driver. + """ + + auth_url = AUTH_URL_US + responseCls = CloudFilesResponse + rawResponseCls = CloudFilesRawResponse + _url_key = "storage_url" + + def __init__(self, user_id, key, secure=True): + super(CloudFilesConnection, self).__init__(user_id, key, secure=secure) + self.api_version = API_VERSION + self.accept_format = 'application/json' + + def request(self, action, params=None, data='', headers=None, method='GET', + raw=False, cdn_request=False): + if not headers: + headers = {} + if not params: + params = {} + + if cdn_request: + host = self._get_host(url_key='cdn_management_url') + else: + host = None + + params['format'] = 'json' + + if method in [ 'POST', 'PUT' ]: + headers.update({'Content-Type': 'application/json; charset=UTF-8'}) + + return super(CloudFilesConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers, + raw=raw) + + +class CloudFilesUSConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles US endpoint. + """ + + auth_url = AUTH_URL_US + + +class CloudFilesUKConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles UK endpoint. + """ + + auth_url = AUTH_URL_UK + + +class CloudFilesStorageDriver(StorageDriver): + """ + Base CloudFiles driver. + + You should never create an instance of this class directly but use US/US + class. + """ + name = 'CloudFiles' + + connectionCls = CloudFilesConnection + hash_type = 'md5' + supports_chunked_encoding = True + + def list_containers(self): + response = self.connection.request('') + + if response.status == httplib.NO_CONTENT: + return [] + elif response.status == httplib.OK: + return self._to_container_list(json.loads(response.body)) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def list_container_objects(self, container): + value_dict = { 'container': container } + return LazyList(get_more=self._get_more, value_dict=value_dict) + + def get_container(self, container_name): + response = self.connection.request('/%s' % (container_name), + method='HEAD') + + if response.status == httplib.NO_CONTENT: + container = self._headers_to_container( + container_name, response.headers) + return container + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(None, self, container_name) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_object(self, container_name, object_name): + container = self.get_container(container_name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='HEAD') + if response.status in [ httplib.OK, httplib.NO_CONTENT ]: + obj = self._headers_to_object( + object_name, container, response.headers) + return obj + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(None, self, object_name) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_container_cdn_url(self, container): + container_name = container.name + response = self.connection.request('/%s' % (container_name), + method='HEAD', + cdn_request=True) + + if response.status == httplib.NO_CONTENT: + cdn_url = response.headers['x-cdn-uri'] + return cdn_url + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value='', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_object_cdn_url(self, obj): + container_cdn_url = self.get_container_cdn_url(container=obj.container) + return '%s/%s' % (container_cdn_url, obj.name) + + def enable_container_cdn(self, container): + container_name = container.name + response = self.connection.request('/%s' % (container_name), + method='PUT', + cdn_request=True) + + if response.status in [ httplib.CREATED, httplib.ACCEPTED ]: + return True + + return False + + def create_container(self, container_name): + container_name = self._clean_container_name(container_name) + response = self.connection.request( + '/%s' % (container_name), method='PUT') + + if response.status == httplib.CREATED: + # Accepted mean that container is not yet created but it will be + # eventually + extra = { 'object_count': 0 } + container = Container(name=container_name, extra=extra, driver=self) + + return container + elif response.status == httplib.ACCEPTED: + error = ContainerAlreadyExistsError(None, self, container_name) + raise error + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def delete_container(self, container): + name = self._clean_container_name(container.name) + + # Only empty container can be deleted + response = self.connection.request('/%s' % (name), method='DELETE') + + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value='', + container_name=name, driver=self) + elif response.status == httplib.CONFLICT: + # @TODO: Add "delete_all_objects" parameter? + raise ContainerIsNotEmptyError(value='', + container_name=name, driver=self) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + container_name = obj.container.name + object_name = obj.name + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + container_name = obj.container.name + object_name = obj.name + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={ 'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + """ + Upload an object. + + Note: This will override file with a same name if it already exists. + """ + upload_func = self._upload_file + upload_func_kwargs = { 'file_path': file_path } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, file_path=file_path, + verify_hash=verify_hash) + + def upload_object_via_stream(self, iterator, + container, object_name, extra=None): + if isinstance(iterator, file): + iterator = iter(iterator) + + upload_func = self._stream_data + upload_func_kwargs = { 'iterator': iterator } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, iterator=iterator) + + def delete_object(self, obj): + container_name = self._clean_container_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + + response = self.connection.request( + '/%s/%s' % (container_name, object_name), method='DELETE') + + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value='', object_name=object_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def ex_get_meta_data(self): + response = self.connection.request('', method='HEAD') + + if response.status == httplib.NO_CONTENT: + container_count = response.headers.get( + 'x-account-container-count', 'unknown') + object_count = response.headers.get( + 'x-account-object-count', 'unknown') + bytes_used = response.headers.get( + 'x-account-bytes-used', 'unknown') + + return { 'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used) } + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def _get_more(self, last_key, value_dict): + container = value_dict['container'] + params = {} + + if last_key: + params['marker'] = last_key + + response = self.connection.request('/%s' % (container.name), + params=params) + + if response.status == httplib.NO_CONTENT: + # Empty or inexistent container + return [], None, True + elif response.status == httplib.OK: + objects = self._to_object_list(json.loads(response.body), container) + + # TODO: Is this really needed? + if len(objects) == 0: + return [], None, True + + return objects, objects[-1].name, False + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def _put_object(self, container, object_name, upload_func, + upload_func_kwargs, extra=None, file_path=None, + iterator=None, verify_hash=True): + extra = extra or {} + container_name_cleaned = self._clean_container_name(container.name) + object_name_cleaned = self._clean_object_name(object_name) + content_type = extra.get('content_type', None) + meta_data = extra.get('meta_data', None) + + headers = {} + if meta_data: + for key, value in meta_data.iteritems(): + key = 'X-Object-Meta-%s' % (key) + headers[key] = value + + request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + result_dict = self._upload_object(object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method='PUT', + headers=headers, file_path=file_path, + iterator=iterator) + + response = result_dict['response'].response + bytes_transferred = result_dict['bytes_transferred'] + server_hash = result_dict['response'].headers.get('etag', None) + + if response.status == httplib.EXPECTATION_FAILED: + raise LibcloudError(value='Missing content-type header', + driver=self) + elif verify_hash and not server_hash: + raise LibcloudError(value='Server didn\'t return etag', + driver=self) + elif (verify_hash and result_dict['data_hash'] != server_hash): + raise ObjectHashMismatchError( + value=('MD5 hash checksum does not match (expected=%s, ' + + 'actual=%s)') % (result_dict['data_hash'], server_hash), + object_name=object_name, driver=self) + elif response.status == httplib.CREATED: + obj = Object( + name=object_name, size=bytes_transferred, hash=server_hash, + extra=None, meta_data=meta_data, container=container, + driver=self) + + return obj + else: + # @TODO: Add test case for this condition (probably 411) + raise LibcloudError('status_code=%s' % (response.status), + driver=self) + + def _clean_container_name(self, name): + """ + Clean container name. + """ + if name.startswith('/'): + name = name[1:] + name = urllib.quote(name) + + if name.find('/') != -1: + raise InvalidContainerNameError(value='Container name cannot' + ' contain slashes', + container_name=name, driver=self) + + if len(name) > 256: + raise InvalidContainerNameError(value='Container name cannot be' + ' longer than 256 bytes', + container_name=name, driver=self) + + + return name + + def _clean_object_name(self, name): + name = urllib.quote(name) + return name + + def _to_container_list(self, response): + # @TODO: Handle more then 10k containers - use "lazy list"? + containers = [] + + for container in response: + extra = { 'object_count': int(container['count']), + 'size': int(container['bytes'])} + containers.append(Container(name=container['name'], extra=extra, + driver=self)) + + return containers + + def _to_object_list(self, response, container): + objects = [] + + for obj in response: + name = obj['name'] + size = int(obj['bytes']) + hash = obj['hash'] + extra = { 'content_type': obj['content_type'], + 'last_modified': obj['last_modified'] } + objects.append(Object( + name=name, size=size, hash=hash, extra=extra, + meta_data=None, container=container, driver=self)) + + return objects + + def _headers_to_container(self, name, headers): + size = int(headers.get('x-container-bytes-used', 0)) + object_count = int(headers.get('x-container-object-count', 0)) + + extra = { 'object_count': object_count, + 'size': size } + container = Container(name=name, extra=extra, driver=self) + return container + + def _headers_to_object(self, name, container, headers): + size = int(headers.pop('content-length', 0)) + last_modified = headers.pop('last-modified', None) + etag = headers.pop('etag', None) + content_type = headers.pop('content-type', None) + + meta_data = {} + for key, value in headers.iteritems(): + if key.find('x-object-meta-') != -1: + key = key.replace('x-object-meta-', '') + meta_data[key] = value + + extra = { 'content_type': content_type, 'last_modified': last_modified } + + obj = Object(name=name, size=size, hash=etag, extra=extra, + meta_data=meta_data, container=container, driver=self) + return obj + +class CloudFilesUSStorageDriver(CloudFilesStorageDriver): + """ + Cloudfiles storage driver for the US endpoint. + """ + + type = Provider.CLOUDFILES_US + name = 'CloudFiles (US)' + connectionCls = CloudFilesUSConnection + +class CloudFilesUKStorageDriver(CloudFilesStorageDriver): + """ + Cloudfiles storage driver for the UK endpoint. + """ + + type = Provider.CLOUDFILES_UK + name = 'CloudFiles (UK)' + connectionCls = CloudFilesUKConnection diff --git a/trunk/libcloud/storage/drivers/dummy.py b/trunk/libcloud/storage/drivers/dummy.py new file mode 100644 index 0000000000..274225d0b9 --- /dev/null +++ b/trunk/libcloud/storage/drivers/dummy.py @@ -0,0 +1,410 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path +import random +import hashlib + +from libcloud.common.types import LibcloudError + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError + + +class DummyFileObject(file): + def __init__(self, yield_count=5, chunk_len=10): + self._yield_count = yield_count + self._chunk_len = chunk_len + + def read(self, size): + i = 0 + + while i < self._yield_count: + yield self._get_chunk(self._chunk_len) + i += 1 + + raise StopIteration + + def _get_chunk(self, chunk_len): + chunk = [str(x) for x in random.randint(97, 120)] + return chunk + + def __len__(self): + return self._yield_count * self._chunk_len + +class DummyIterator(object): + def __init__(self, data=None): + self.hash = hashlib.md5() + self._data = data or [] + self._current_item = 0 + + def get_md5_hash(self): + return self.hash.hexdigest() + + def next(self): + if self._current_item == len(self._data): + raise StopIteration + + value = self._data[self._current_item] + self.hash.update(value) + self._current_item += 1 + return value + +class DummyStorageDriver(StorageDriver): + """ + Dummy Storage driver. + + >>> from libcloud.storage.drivers.dummy import DummyStorageDriver + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container') + >>> container + + >>> container.name + 'test container' + >>> container.extra['object_count'] + 0 + """ + + name = 'Dummy Storage Provider' + + def __init__(self, api_key, api_secret): + self._containers = {} + + def get_meta_data(self): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_meta_data() + {'object_count': 0, 'container_count': 0, 'bytes_used': 0} + >>> container = driver.create_container(container_name='test container 1') + >>> container = driver.create_container(container_name='test container 2') + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> driver.get_meta_data() + {'object_count': 1, 'container_count': 2, 'bytes_used': 50} + """ + + container_count = len(self._containers) + object_count = sum([ len(self._containers[container]['objects']) for + container in self._containers ]) + + bytes_used = 0 + for container in self._containers: + objects = self._containers[container]['objects'] + for _, obj in objects.iteritems(): + bytes_used += obj.size + + return { 'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used) } + + def list_containers(self): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.list_containers() + [] + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> container = driver.create_container(container_name='test container 2') + >>> container + + >>> container = driver.create_container( + ... container_name='test container 2') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerAlreadyExistsError: + >>> container_list=driver.list_containers() + >>> sorted([container.name for container in container_list]) + ['test container 1', 'test container 2'] + """ + + return [container['container'] for container in + self._containers.values()] + + def list_container_objects(self, container): + container = self.get_container(container.name) + + return container.objects + + def get_container(self, container_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> driver.get_container('test container 1') + + """ + + if container_name not in self._containers: + raise ContainerDoesNotExistError(driver=self, value=None, + container_name=container_name) + + return self._containers[container_name]['container'] + + def get_container_cdn_url(self, container): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> container.get_cdn_url() + 'http://www.test.com/container/test_container_1' + """ + + if container.name not in self._containers: + raise ContainerDoesNotExistError(driver=self, value=None, + container_name=container.name) + + return self._containers[container.name]['cdn_url'] + + def get_object(self, container_name, object_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_object('unknown', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> driver.get_object( + ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ObjectDoesNotExistError: + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj + + """ + + self.get_container(container_name) + container_objects = self._containers[container_name]['objects'] + if object_name not in container_objects: + raise ObjectDoesNotExistError(object_name=object_name, value=None, + driver=self) + + return container_objects[object_name] + + def get_object_cdn_url(self, obj): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> obj = container.upload_object_via_stream(object_name='test object 5', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj + + >>> obj.get_cdn_url() + 'http://www.test.com/object/test_object_5' + """ + + container_name = obj.container.name + container_objects = self._containers[container_name]['objects'] + if obj.name not in container_objects: + raise ObjectDoesNotExistError(object_name=obj.name, value=None, + driver=self) + + return container_objects[obj.name].meta_data['cdn_url'] + + + def create_container(self, container_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerAlreadyExistsError: + """ + + if container_name in self._containers: + raise ContainerAlreadyExistsError(container_name=container_name, + value=None, driver=self) + + extra = { 'object_count': 0 } + container = Container(name=container_name, extra=extra, driver=self) + + self._containers[container_name] = { 'container': container, + 'objects': {}, + 'cdn_url': + 'http://www.test.com/container/%s' % + (container_name.replace(' ', '_')) + } + return container + + def delete_container(self, container): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = Container(name = 'test container', + ... extra={'object_count': 0}, driver=driver) + >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> len(driver._containers) + 1 + >>> driver.delete_container(container=container) + True + >>> len(driver._containers) + 0 + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerIsNotEmptyError: + """ + + container_name = container.name + if container_name not in self._containers: + raise ContainerDoesNotExistError(container_name=container_name, + value=None, driver=self) + + container = self._containers[container_name] + if len(container['objects']) > 0: + raise ContainerIsNotEmptyError(container_name=container_name, + value=None, driver=self) + + del self._containers[container_name] + return True + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + kwargs_dict = {'obj': obj, + 'response': DummyFileObject(), + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure} + + return self._save_object(**kwargs_dict) + + def download_object_as_stream(self, obj, chunk_size=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> stream = container.download_object_as_stream(obj) + >>> stream #doctest: +ELLIPSIS + ' at 0x...> + """ + + return DummyFileObject() + + def upload_object(self, file_path, container, object_name, extra=None, + file_hash=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container.upload_object(file_path='/tmp/inexistent.file', + ... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + LibcloudError: + >>> file_path = path = os.path.abspath(__file__) + >>> file_size = os.path.getsize(file_path) + >>> obj = container.upload_object(file_path=file_path, object_name='test') + >>> obj #doctest: +ELLIPSIS + + >>> obj.size == file_size + True + """ + + if not os.path.exists(file_path): + raise LibcloudError(value='File %s does not exist' % (file_path), + driver=self) + + size = os.path.getsize(file_path) + return self._add_object(container=container, object_name=object_name, + size=size, extra=extra) + + def upload_object_via_stream(self, iterator, container, + object_name, extra=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> obj #doctest: +ELLIPSIS + + """ + + size = len(iterator) + return self._add_object(container=container, object_name=object_name, + size=size, extra=extra) + + def delete_object(self, obj): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj #doctest: +ELLIPSIS + + >>> container.delete_object(obj=obj) + True + >>> obj = Object(name='test object 2', + ... size=1000, hash=None, extra=None, + ... meta_data=None, container=container,driver=None) + >>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ObjectDoesNotExistError: + """ + + container_name = obj.container.name + object_name = obj.name + obj = self.get_object(container_name=container_name, + object_name=object_name) + + del self._containers[container_name]['objects'][object_name] + return True + + def _add_object(self, container, object_name, size, extra=None): + container = self.get_container(container.name) + + extra = extra or {} + meta_data = extra.get('meta_data', {}) + meta_data.update({'cdn_url': 'http://www.test.com/object/%s' % + (object_name.replace(' ', '_'))}) + obj = Object(name=object_name, size=size, extra=extra, hash=None, + meta_data=meta_data, container=container, driver=self) + + self._containers[container.name]['objects'][object_name] = obj + return obj + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/libcloud/storage/drivers/google_storage.py b/trunk/libcloud/storage/drivers/google_storage.py new file mode 100644 index 0000000000..d0d7af602a --- /dev/null +++ b/trunk/libcloud/storage/drivers/google_storage.py @@ -0,0 +1,135 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import httplib +import urllib +import copy +import base64 +import hmac + +from hashlib import sha1 +from email.utils import formatdate + +from libcloud.common.base import ConnectionUserAndKey + +from libcloud.storage.drivers.s3 import S3StorageDriver, S3Response +from libcloud.storage.drivers.s3 import S3RawResponse + +SIGNATURE_IDENTIFIER = 'GOOG1' + +# Docs are a lie. Actual namespace returned is different that the one listed in +# the docs. +AUTH_HOST = 'commondatastorage.googleapis.com' +API_VERSION = '2006-03-01' +NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION) + + +class GoogleStorageConnection(ConnectionUserAndKey): + """ + Repersents a single connection to the Google storage API endpoint. + """ + + host = AUTH_HOST + responseCls = S3Response + rawResponseCls = S3RawResponse + + def add_default_headers(self, headers): + date = formatdate(usegmt=True) + headers['Date'] = date + return headers + + def pre_connect_hook(self, params, headers): + signature = self._get_aws_auth_param(method=self.method, + headers=headers, + params=params, + expires=None, + secret_key=self.key, + path=self.action) + headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER, + self.user_id, signature) + return params, headers + + def _get_aws_auth_param(self, method, headers, params, expires, + secret_key, path='/'): + # TODO: Refactor and re-use in S3 driver + """ + Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, + UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + CanonicalizedHeaders + + CanonicalizedResource; + """ + special_header_keys = ['content-md5', 'content-type', 'date'] + special_header_values = {} + extension_header_values = {} + + headers_copy = copy.deepcopy(headers) + for key, value in headers_copy.iteritems(): + if key.lower() in special_header_keys: + if key.lower() == 'date': + value = value.strip() + else: + value = value.lower().strip() + special_header_values[key.lower()] = value + elif key.lower().startswith('x-goog-'): + extension_header_values[key.lower()] = value.strip() + + if not 'content-md5' in special_header_values: + special_header_values['content-md5'] = '' + + if not 'content-type' in special_header_values: + special_header_values['content-type'] = '' + + keys_sorted = special_header_values.keys() + keys_sorted.sort() + + buf = [method] + for key in keys_sorted: + value = special_header_values[key] + buf.append(value) + string_to_sign = '\n'.join(buf) + + keys_sorted = extension_header_values.keys() + keys_sorted.sort() + + extension_header_string = [] + for key in keys_sorted: + value = extension_header_values[key] + extension_header_string.append('%s:%s' % (key, value)) + extension_header_string = '\n'.join(extension_header_string) + + values_to_sign = [] + for value in [string_to_sign, extension_header_string, path]: + if value: + values_to_sign.append(value) + + string_to_sign = '\n'.join(values_to_sign) + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha1).digest() + ) + return b64_hmac + + +class GoogleStorageDriver(S3StorageDriver): + name = 'Google Storage' + connectionCls = GoogleStorageConnection + hash_type = 'md5' + namespace = NAMESPACE + supports_chunked_encoding = False diff --git a/trunk/libcloud/storage/drivers/ninefold.py b/trunk/libcloud/storage/drivers/ninefold.py new file mode 100644 index 0000000000..f7ee3f2708 --- /dev/null +++ b/trunk/libcloud/storage/drivers/ninefold.py @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.storage.providers import Provider +from libcloud.storage.drivers.atmos import AtmosDriver + +class NinefoldStorageDriver(AtmosDriver): + host = 'api.ninefold.com' + path = '/storage/v1.0' + + type = Provider.NINEFOLD + name = 'Ninefold' diff --git a/trunk/libcloud/storage/drivers/s3.py b/trunk/libcloud/storage/drivers/s3.py new file mode 100644 index 0000000000..ad00282f0c --- /dev/null +++ b/trunk/libcloud/storage/drivers/s3.py @@ -0,0 +1,512 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import httplib +import urllib +import copy +import base64 +import hmac + +from hashlib import sha1 +from xml.etree.ElementTree import Element, SubElement, tostring + +from libcloud.utils import fixxpath, findtext, in_development_warning +from libcloud.utils import read_in_chunks +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.base import ConnectionUserAndKey, RawResponse +from libcloud.common.aws import AWSBaseResponse + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.common.types import LazyList + +# How long before the token expires +EXPIRATION_SECONDS = 15 * 60 + +S3_US_STANDARD_HOST = 's3.amazonaws.com' +S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com' +S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com' +S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com' +S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com' +S3_AP_NORTHEAST_HOST = 's3-ap-northeast-1.amazonaws.com' + +API_VERSION = '2006-03-01' +NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION) + + +class S3Response(AWSBaseResponse): + + valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST ] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_error(self): + if self.status in [ httplib.UNAUTHORIZED, httplib.FORBIDDEN ]: + raise InvalidCredsError(self.body) + elif self.status == httplib.MOVED_PERMANENTLY: + raise LibcloudError('This bucket is located in a different ' + + 'region. Please use the correct driver.', + driver=S3StorageDriver) + raise LibcloudError('Unknown error. Status code: %d' % (self.status), + driver=S3StorageDriver) + +class S3RawResponse(S3Response, RawResponse): + pass + +class S3Connection(ConnectionUserAndKey): + """ + Repersents a single connection to the EC2 Endpoint + """ + + host = 's3.amazonaws.com' + responseCls = S3Response + rawResponseCls = S3RawResponse + + def add_default_params(self, params): + expires = str(int(time.time()) + EXPIRATION_SECONDS) + params['AWSAccessKeyId'] = self.user_id + params['Expires'] = expires + return params + + def pre_connect_hook(self, params, headers): + params['Signature'] = self._get_aws_auth_param(method=self.method, + headers=headers, + params=params, + expires=params['Expires'], + secret_key=self.key, + path=self.action) + return params, headers + + def _get_aws_auth_param(self, method, headers, params, expires, + secret_key, path='/'): + """ + Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Expires + "\n" + + CanonicalizedAmzHeaders + + CanonicalizedResource; + """ + special_header_keys = [ 'content-md5', 'content-type', 'date' ] + special_header_values = { 'date': '' } + amz_header_values = {} + + headers_copy = copy.deepcopy(headers) + for key, value in headers_copy.iteritems(): + if key.lower() in special_header_keys: + special_header_values[key.lower()] = value.lower().strip() + elif key.lower().startswith('x-amz-'): + amz_header_values[key.lower()] = value.strip() + + if not special_header_values.has_key('content-md5'): + special_header_values['content-md5'] = '' + + if not special_header_values.has_key('content-type'): + special_header_values['content-type'] = '' + + if expires: + special_header_values['date'] = str(expires) + + keys_sorted = special_header_values.keys() + keys_sorted.sort() + + buf = [ method ] + for key in keys_sorted: + value = special_header_values[key] + buf.append(value) + string_to_sign = '\n'.join(buf) + + keys_sorted = amz_header_values.keys() + keys_sorted.sort() + + amz_header_string = [] + for key in keys_sorted: + value = amz_header_values[key] + amz_header_string.append('%s:%s' % (key, value)) + amz_header_string = '\n'.join(amz_header_string) + + values_to_sign = [] + for value in [ string_to_sign, amz_header_string, path]: + if value: + values_to_sign.append(value) + + string_to_sign = '\n'.join(values_to_sign) + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha1).digest() + ) + return b64_hmac + +class S3StorageDriver(StorageDriver): + name = 'Amazon S3 (standard)' + connectionCls = S3Connection + hash_type = 'md5' + supports_chunked_encoding = False + ex_location_name = '' + namespace = NAMESPACE + + def list_containers(self): + response = self.connection.request('/') + if response.status == httplib.OK: + containers = self._to_containers(obj=response.object, + xpath='Buckets/Bucket') + return containers + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def list_container_objects(self, container): + value_dict = { 'container': container } + return LazyList(get_more=self._get_more, value_dict=value_dict) + + def get_container(self, container_name): + # This is very inefficient, but afaik it's the only way to do it + containers = self.list_containers() + + try: + container = [ c for c in containers if c.name == container_name ][0] + except IndexError: + raise ContainerDoesNotExistError(value=None, driver=self, + container_name=container_name) + + return container + + def get_object(self, container_name, object_name): + # TODO: Figure out what is going on when the object or container does not exist + # - it seems that Amazon just keeps the connection open and doesn't return a + # response. + container = self.get_container(container_name=container_name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='HEAD') + if response.status == httplib.OK: + obj = self._headers_to_object(object_name=object_name, + container=container, + headers=response.headers) + return obj + + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=object_name) + + def create_container(self, container_name): + if self.ex_location_name: + root = Element('CreateBucketConfiguration') + child = SubElement(root, 'LocationConstraint') + child.text = self.ex_location_name + data = tostring(root) + else: + data = '' + + response = self.connection.request('/%s' % (container_name), + data=data, + method='PUT') + + if response.status == httplib.OK: + container = Container(name=container_name, extra=None, driver=self) + return container + elif response.status == httplib.CONFLICT: + raise InvalidContainerNameError(value='Container with this name ' + + 'already exists. The name must be unique among ' + 'all the containers in the system', + container_name=container_name, driver=self) + elif response.status == httplib.BAD_REQUEST: + raise InvalidContainerNameError(value='Container name contains ' + + 'invalid characters.', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def delete_container(self, container): + # Note: All the objects in the container must be deleted first + response = self.connection.request('/%s' % (container.name), + method='DELETE') + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.CONFLICT: + raise ContainerIsNotEmptyError(value='Container must be empty' + + ' before it can be deleted.', + container_name=container.name, + driver=self) + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + return False + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + container_name = self._clean_object_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', + raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + container_name = self._clean_object_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={ 'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True, ex_storage_class=None): + upload_func = self._upload_file + upload_func_kwargs = { 'file_path': file_path } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, file_path=file_path, + verify_hash=verify_hash, + storage_class=ex_storage_class) + + def upload_object_via_stream(self, iterator, container, object_name, + extra=None, ex_storage_class=None): + # Amazon S3 does not support chunked transfer encoding so the whole data + # is read into memory before uploading the object. + upload_func = self._upload_data + upload_func_kwargs = {} + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, iterator=iterator, + verify_hash=False, + storage_class=ex_storage_class) + + def delete_object(self, obj): + object_name = self._clean_object_name(name=obj.name) + response = self.connection.request('/%s/%s' % (obj.container.name, + object_name), + method='DELETE') + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=obj.name) + + return False + + def _clean_object_name(self, name): + name = urllib.quote(name) + return name + + def _get_more(self, last_key, value_dict): + container = value_dict['container'] + params = {} + + if last_key: + params['marker'] = last_key + + response = self.connection.request('/%s' % (container.name), + params=params) + + if response.status == httplib.OK: + objects = self._to_objs(obj=response.object, + xpath='Contents', container=container) + is_truncated = response.object.findtext(fixxpath(xpath='IsTruncated', + namespace=self.namespace)).lower() + exhausted = (is_truncated == 'false') + + if (len(objects) > 0): + last_key = objects[-1].name + else: + last_key = None + return objects, last_key, exhausted + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def _put_object(self, container, object_name, upload_func, + upload_func_kwargs, extra=None, file_path=None, + iterator=None, verify_hash=True, storage_class=None): + headers = {} + extra = extra or {} + storage_class = storage_class or 'standard' + if storage_class not in ['standard', 'reduced_redundancy']: + raise ValueError('Invalid storage class value: %s' % (storage_class)) + + headers['x-amz-storage-class'] = storage_class.upper() + + container_name_cleaned = container.name + object_name_cleaned = self._clean_object_name(object_name) + content_type = extra.get('content_type', None) + meta_data = extra.get('meta_data', None) + + if meta_data: + for key, value in meta_data.iteritems(): + key = 'x-amz-meta-%s' % (key) + headers[key] = value + + request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE + # here. + # SIGPIPE is thrown if the provided container does not exist or the user + # does not have correct permission + result_dict = self._upload_object(object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method='PUT', + headers=headers, file_path=file_path, + iterator=iterator) + + response = result_dict['response'] + bytes_transferred = result_dict['bytes_transferred'] + headers = response.headers + response = response.response + server_hash = headers['etag'].replace('"', '') + + if (verify_hash and result_dict['data_hash'] != server_hash): + raise ObjectHashMismatchError( + value='MD5 hash checksum does not match', + object_name=object_name, driver=self) + elif response.status == httplib.OK: + obj = Object( + name=object_name, size=bytes_transferred, hash=server_hash, + extra=None, meta_data=meta_data, container=container, + driver=self) + + return obj + else: + raise LibcloudError('Unexpected status code, status_code=%s' % (response.status), + driver=self) + + def _to_containers(self, obj, xpath): + return [ self._to_container(element) for element in \ + obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] + + def _to_objs(self, obj, xpath, container): + return [ self._to_obj(element, container) for element in \ + obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))] + + def _to_container(self, element): + extra = { + 'creation_date': findtext(element=element, xpath='CreationDate', + namespace=self.namespace) + } + + container = Container( + name=findtext(element=element, xpath='Name', + namespace=self.namespace), + extra=extra, + driver=self + ) + + return container + + def _headers_to_object(self, object_name, container, headers): + meta_data = { 'content_type': headers['content-type'] } + hash = headers['etag'].replace('"', '') + + obj = Object(name=object_name, size=headers['content-length'], + hash=hash, extra=None, + meta_data=meta_data, + container=container, + driver=self) + return obj + + def _to_obj(self, element, container): + owner_id = findtext(element=element, xpath='Owner/ID', + namespace=self.namespace) + owner_display_name = findtext(element=element, + xpath='Owner/DisplayName', + namespace=self.namespace) + meta_data = { 'owner': { 'id': owner_id, + 'display_name':owner_display_name }} + + obj = Object(name=findtext(element=element, xpath='Key', + namespace=self.namespace), + size=int(findtext(element=element, xpath='Size', + namespace=self.namespace)), + hash=findtext(element=element, xpath='ETag', + namespace=self.namespace).replace('"', ''), + extra=None, + meta_data=meta_data, + container=container, + driver=self + ) + + return obj + +class S3USWestConnection(S3Connection): + host = S3_US_WEST_HOST + +class S3USWestStorageDriver(S3StorageDriver): + name = 'Amazon S3 (us-west-1)' + connectionCls = S3USWestConnection + ex_location_name = 'us-west-1' + +class S3USWestOregonConnection(S3Connection): + host = S3_US_WEST_OREGON_HOST + +class S3USWestOregonStorageDriver(S3StorageDriver): + name = 'Amazon S3 (us-west-2)' + connectionCls = S3USWestOregonConnection + ex_location_name = 'us-west-2' + +class S3EUWestConnection(S3Connection): + host = S3_EU_WEST_HOST + +class S3EUWestStorageDriver(S3StorageDriver): + name = 'Amazon S3 (eu-west-1)' + connectionCls = S3EUWestConnection + ex_location_name = 'EU' + +class S3APSEConnection(S3Connection): + host = S3_AP_SOUTHEAST_HOST + +class S3APSEStorageDriver(S3StorageDriver): + name = 'Amazon S3 (ap-southeast-1)' + connectionCls = S3APSEConnection + ex_location_name = 'ap-southeast-1' + +class S3APNEConnection(S3Connection): + host = S3_AP_NORTHEAST_HOST + +class S3APNEStorageDriver(S3StorageDriver): + name = 'Amazon S3 (ap-northeast-1)' + connectionCls = S3APNEConnection + ex_location_name = 'ap-northeast-1' diff --git a/trunk/libcloud/storage/providers.py b/trunk/libcloud/storage/providers.py new file mode 100644 index 0000000000..3b46e24bf4 --- /dev/null +++ b/trunk/libcloud/storage/providers.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import get_driver as get_provider_driver +from libcloud.storage.types import Provider + +DRIVERS = { + Provider.DUMMY: + ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), + Provider.CLOUDFILES_US: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), + Provider.CLOUDFILES_UK: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'), + Provider.S3: + ('libcloud.storage.drivers.s3', 'S3StorageDriver'), + Provider.S3_US_WEST: + ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'), + Provider.S3_US_WEST_OREGON: + ('libcloud.storage.drivers.s3', 'S3USWestOregonStorageDriver'), + Provider.S3_EU_WEST: + ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'), + Provider.S3_AP_SOUTHEAST: + ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'), + Provider.S3_AP_NORTHEAST: + ('libcloud.storage.drivers.s3', 'S3APNEStorageDriver'), + Provider.NINEFOLD: + ('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'), + Provider.GOOGLE_STORAGE: + ('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver') +} + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) diff --git a/trunk/libcloud/storage/types.py b/trunk/libcloud/storage/types.py new file mode 100644 index 0000000000..fd1300ae43 --- /dev/null +++ b/trunk/libcloud/storage/types.py @@ -0,0 +1,95 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError + +__all__ = ['Provider', + 'ContainerError', + 'ObjectError', + 'ContainerAlreadyExistsError', + 'ContainerDoesNotExistError', + 'ContainerIsNotEmptyError', + 'ObjectDoesNotExistError', + 'ObjectHashMismatchError', + 'InvalidContainerNameError'] + +class Provider(object): + """ + Defines for each of the supported providers + + @cvar DUMMY: Example provider + @cvar CLOUDFILES_US: CloudFiles US + @cvar CLOUDFILES_UK: CloudFiles UK + @cvar S3: Amazon S3 US + @cvar S3_US_WEST: Amazon S3 US West (Northern California) + @cvar S3_EU_WEST: Amazon S3 EU West (Ireland) + @cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore) + @cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo) + @cvar NINEFOLD: Ninefold + @cvar GOOGLE_STORAGE Google Storage + @cvar: S3_US_WEST_OREGON: Amazon S3 US West 2 (Oregon) + """ + DUMMY = 0 + CLOUDFILES_US = 1 + CLOUDFILES_UK = 2 + S3 = 3 + S3_US_WEST = 4 + S3_EU_WEST = 5 + S3_AP_SOUTHEAST = 6 + S3_AP_NORTHEAST = 7 + NINEFOLD = 8 + GOOGLE_STORAGE = 9 + S3_US_WEST_OREGON = 10 + +class ContainerError(LibcloudError): + error_type = 'ContainerError' + + def __init__(self, value, driver, container_name): + self.container_name = container_name + super(ContainerError, self).__init__(value=value, driver=driver) + + def __str__(self): + return ('<%s in %s, container=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.container_name, self.value)) + +class ObjectError(LibcloudError): + error_type = 'ContainerError' + + def __init__(self, value, driver, object_name): + self.object_name = object_name + super(ObjectError, self).__init__(value=value, driver=driver) + + def __str__(self): + return '<%s in %s, value=%s, object = %s>' % (self.error_type, repr(self.driver), + self.value, self.object_name) + +class ContainerAlreadyExistsError(ContainerError): + error_type = 'ContainerAlreadyExistsError' + +class ContainerDoesNotExistError(ContainerError): + error_type = 'ContainerDoesNotExistError' + +class ContainerIsNotEmptyError(ContainerError): + error_type = 'ContainerIsNotEmptyError' + +class ObjectDoesNotExistError(ObjectError): + error_type = 'ObjectDoesNotExistError' + +class ObjectHashMismatchError(ObjectError): + error_type = 'ObjectHashMismatchError' + +class InvalidContainerNameError(ContainerError): + error_type = 'InvalidContainerNameError' diff --git a/trunk/libcloud/types.py b/trunk/libcloud/types.py new file mode 100644 index 0000000000..18aebe4e3b --- /dev/null +++ b/trunk/libcloud/types.py @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.types import InvalidCredsError, InvalidCredsException +from libcloud.compute.types import Provider, NodeState, DeploymentError +from libcloud.compute.types import DeploymentException + +from libcloud.utils import deprecated_warning + +__all__ = ["LibcloudError", "MalformedResponseError", + "InvalidCredsError", "InvalidCredsException", + "Provider", "NodeState", "DeploymentError", + "DeploymentException" + ] +deprecated_warning(__name__) diff --git a/trunk/libcloud/utils.py b/trunk/libcloud/utils.py new file mode 100644 index 0000000000..9c382e57ca --- /dev/null +++ b/trunk/libcloud/utils.py @@ -0,0 +1,318 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import mimetypes +import warnings +from httplib import HTTPResponse + +SHOW_DEPRECATION_WARNING = True +SHOW_IN_DEVELOPMENT_WARNING = True +OLD_API_REMOVE_VERSION = '0.7.0' +CHUNK_SIZE = 8096 + + +def read_in_chunks(iterator, chunk_size=None, fill_size=False): + """ + Return a generator which yields data in chunks. + + @type iterator: C{Iterator} + @param response: An object which implements an iterator interface + or a File like object with read method. + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) + + @type fill_size: C{bool} + @param fill_size: If True, make sure chunks are chunk_size in length + (except for last chunk). + """ + chunk_size = chunk_size or CHUNK_SIZE + + if isinstance(iterator, (file, HTTPResponse)): + get_data = iterator.read + args = (chunk_size, ) + else: + get_data = iterator.next + args = () + + data = '' + empty = False + + while not empty or len(data) > 0: + if not empty: + try: + chunk = str(get_data(*args)) + if len(chunk) > 0: + data += chunk + else: + empty = True + except StopIteration: + empty = True + + if len(data) == 0: + raise StopIteration + + if fill_size: + if empty or len(data) >= chunk_size: + yield data[:chunk_size] + data = data[chunk_size:] + else: + yield data + data = '' + + +def exhaust_iterator(iterator): + """ + Exhaust an iterator and return all data returned by it. + + @type iterator: C{Iterator} + @param response: An object which implements an iterator interface + or a File like object with read method. + + @rtype C{str} + @return Data returned by the iterator. + """ + data = '' + + try: + chunk = str(iterator.next()) + except StopIteration: + chunk = '' + + while len(chunk) > 0: + data += chunk + + try: + chunk = str(iterator.next()) + except StopIteration: + chunk = '' + + return data + + +def guess_file_mime_type(file_path): + filename = os.path.basename(file_path) + (mimetype, encoding) = mimetypes.guess_type(filename) + return mimetype, encoding + + +def deprecated_warning(module): + if SHOW_DEPRECATION_WARNING: + warnings.warn('This path has been deprecated and the module' + ' is now available at "libcloud.compute.%s".' + ' This path will be fully removed in libcloud %s.' % + (module, OLD_API_REMOVE_VERSION), + category=DeprecationWarning) + + +def in_development_warning(module): + if SHOW_IN_DEVELOPMENT_WARNING: + warnings.warn('The module %s is in development and your are advised ' + 'against using it in production.' % (module), + category=FutureWarning) + + +def str2dicts(data): + """ + Create a list of dictionaries from a whitespace and newline delimited text. + + For example, this: + cpu 1100 + ram 640 + + cpu 2200 + ram 1024 + + becomes: + [{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}] + """ + list_data = [] + list_data.append({}) + d = list_data[-1] + + lines = data.split('\n') + for line in lines: + line = line.strip() + + if not line: + d = {} + list_data.append(d) + d = list_data[-1] + continue + + whitespace = line.find(' ') + + if not whitespace: + continue + + key = line[0:whitespace] + value = line[whitespace + 1:] + d.update({key: value}) + + list_data = [value for value in list_data if value != {}] + return list_data + + +def str2list(data): + """ + Create a list of values from a whitespace and newline delimited text + (keys are ignored). + + For example, this: + ip 1.2.3.4 + ip 1.2.3.5 + ip 1.2.3.6 + + becomes: + ['1.2.3.4', '1.2.3.5', '1.2.3.6'] + """ + list_data = [] + + for line in data.split('\n'): + line = line.strip() + + if not line: + continue + + try: + splitted = line.split(' ') + # key = splitted[0] + value = splitted[1] + except Exception: + continue + + list_data.append(value) + + return list_data + + +def dict2str(data): + """ + Create a string with a whitespace and newline delimited text from a + dictionary. + + For example, this: + {'cpu': '1100', 'ram': '640', 'smp': 'auto'} + + becomes: + cpu 1100 + ram 640 + smp auto + + cpu 2200 + ram 1024 + """ + result = '' + for k in data: + if data[k] != None: + result += '%s %s\n' % (str(k), str(data[k])) + else: + result += '%s\n' % str(k) + + return result + + +def fixxpath(xpath, namespace=None): + # ElementTree wants namespaces in its xpaths, so here we add them. + if not namespace: + return xpath + + return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')]) + + +def findtext(element, xpath, namespace=None): + return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + + +def findattr(element, xpath, namespace=None): + return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + + +def findall(element, xpath, namespace=None): + return element.findall(fixxpath(xpath=xpath, namespace=namespace)) + + +def reverse_dict(dictionary): + return dict([(value, key) for key, value in dictionary.iteritems()]) + + +def get_driver(drivers, provider): + """ + Get a driver. + + @param drivers: Dictionary containing valid providers. + @param provider: Id of provider to get driver + @type provider: L{libcloud.types.Provider} + """ + if provider in drivers: + mod_name, driver_name = drivers[provider] + _mod = __import__(mod_name, globals(), locals(), [driver_name]) + return getattr(_mod, driver_name) + + raise AttributeError('Provider %s does not exist' % (provider)) + + +def merge_valid_keys(params, valid_keys, extra): + """ + Merge valid keys from extra into params dictionary and return + dictionary with keys which have been merged. + + Note: params is modified in place. + """ + merged = {} + if not extra: + return merged + + for key in valid_keys: + if key in extra: + params[key] = extra[key] + merged[key] = extra[key] + + return merged + + +def get_new_obj(obj, klass, attributes): + """ + Pass attributes from the existing object 'obj' and attributes + dictionary to a 'klass' constructor. + Attributes from 'attributes' dictionary are only passed to the + constructor if they are not None. + """ + kwargs = {} + for key, value in obj.__dict__.items(): + if isinstance(value, dict): + kwargs[key] = value.copy() + elif isinstance(value, (tuple, list)): + kwargs[key] = value[:] + else: + kwargs[key] = value + + for key, value in attributes.items(): + if value is None: + continue + + if isinstance(value, dict): + kwargs_value = kwargs.get(key, {}) + for key1, value2 in value.items(): + if value2 is None: + continue + + kwargs_value[key1] = value2 + kwargs[key] = kwargs_value + else: + kwargs[key] = value + + return klass(**kwargs) diff --git a/trunk/setup.py b/trunk/setup.py new file mode 100644 index 0000000000..36a1dde56e --- /dev/null +++ b/trunk/setup.py @@ -0,0 +1,234 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import doctest + +from distutils.core import setup +from distutils.core import Command +from unittest import TextTestRunner, TestLoader +from glob import glob +from subprocess import call +from os.path import splitext, basename, join as pjoin + +import libcloud.utils +libcloud.utils.SHOW_DEPRECATION_WARNING = False + + +HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/libcloud/trunk' +PROJECT_BASE_DIR = 'http://libcloud.apache.org' +TEST_PATHS = ['test', 'test/common', 'test/compute', 'test/storage', + 'test/loadbalancer', 'test/dns'] +DOC_TEST_MODULES = [ 'libcloud.compute.drivers.dummy', + 'libcloud.storage.drivers.dummy', + 'libcloud.dns.drivers.dummy' ] + + +def read_version_string(): + version = None + sys.path.insert(0, pjoin(os.getcwd())) + from libcloud import __version__ + version = __version__ + sys.path.pop(0) + return version + +class TestCommand(Command): + description = "run test suite" + user_options = [] + + def initialize_options(self): + THIS_DIR = os.path.abspath(os.path.split(__file__)[0]) + sys.path.insert(0, THIS_DIR) + for test_path in TEST_PATHS: + sys.path.insert(0, pjoin(THIS_DIR, test_path)) + self._dir = os.getcwd() + + def finalize_options(self): + pass + + def run(self): + try: + import mock + mock + except ImportError: + print 'Missing "mock" library. mock is library is needed ' + \ + 'to run the tests. You can install it using pip: ' + \ + 'pip install mock' + sys.exit(1) + + status = self._run_tests() + sys.exit(status) + + def _run_tests(self): + secrets = pjoin(self._dir, 'test', 'secrets.py') + if not os.path.isfile(secrets): + print "Missing %s" % (secrets) + print "Maybe you forgot to copy it from -dist:" + print " cp test/secrets.py-dist test/secrets.py" + sys.exit(1) + + pre_python26 = (sys.version_info[0] == 2 + and sys.version_info[1] < 6) + if pre_python26: + missing = [] + # test for dependencies + try: + import simplejson + simplejson # silence pyflakes + except ImportError: + missing.append("simplejson") + + try: + import ssl + ssl # silence pyflakes + except ImportError: + missing.append("ssl") + + if missing: + print "Missing dependencies: %s" % ", ".join(missing) + sys.exit(1) + + testfiles = [] + for test_path in TEST_PATHS: + for t in glob(pjoin(self._dir, test_path, 'test_*.py')): + testfiles.append('.'.join( + [test_path.replace('/', '.'), splitext(basename(t))[0]])) + + tests = TestLoader().loadTestsFromNames(testfiles) + + for test_module in DOC_TEST_MODULES: + tests.addTests(doctest.DocTestSuite(test_module)) + + t = TextTestRunner(verbosity = 2) + res = t.run(tests) + return not res.wasSuccessful() + + +class Pep8Command(Command): + description = "run pep8 script" + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + try: + import pep8 + pep8 + except ImportError: + print 'Missing "pep8" library. You can install it using pip: ' + \ + 'pip install pep8' + sys.exit(1) + + cwd = os.getcwd() + retcode = call(('pep8 %s/libcloud/ %s/test/' % + (cwd, cwd)).split(' ')) + sys.exit(retcode) + + +class ApiDocsCommand(Command): + description = "generate API documentation" + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + os.system( + 'pydoctor' + ' --add-package=libcloud' + ' --project-name=libcloud' + ' --make-html' + ' --html-viewsource-base="%s"' + ' --project-base-dir=`pwd`' + ' --project-url="%s"' + % (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR) + ) + +class CoverageCommand(Command): + description = "run test suite and generate coverage report" + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + import coverage + cov = coverage.coverage(config_file='.coveragerc') + cov.start() + + tc = TestCommand(self.distribution) + tc._run_tests() + + cov.stop() + cov.save() + cov.html_report() + +# pre-2.6 will need the ssl PyPI package +pre_python26 = (sys.version_info[0] == 2 and sys.version_info[1] < 6) + +setup( + name='apache-libcloud', + version=read_version_string(), + description='A unified interface into many cloud server providers', + author='Apache Software Foundation', + author_email='dev@libcloud.apache.org', + requires=([], ['ssl', 'simplejson'],)[pre_python26], + packages=[ + 'libcloud', + 'libcloud.common', + 'libcloud.compute', + 'libcloud.compute.drivers', + 'libcloud.storage', + 'libcloud.storage.drivers', + 'libcloud.drivers', + 'libcloud.loadbalancer', + 'libcloud.loadbalancer.drivers', + 'libcloud.dns', + 'libcloud.dns.drivers' + ], + package_dir={ + 'libcloud': 'libcloud', + }, + package_data={ + 'libcloud': ['data/*.json'] + }, + license='Apache License (2.0)', + url='http://libcloud.apache.org/', + cmdclass={ + 'test': TestCommand, + 'pep8': Pep8Command, + 'apidocs': ApiDocsCommand, + 'coverage': CoverageCommand + }, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Topic :: Software Development :: Libraries :: Python Modules' + ], +) diff --git a/trunk/test/__init__.py b/trunk/test/__init__.py new file mode 100644 index 0000000000..4b2bdc19b7 --- /dev/null +++ b/trunk/test/__init__.py @@ -0,0 +1,288 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import random +import unittest + +from cStringIO import StringIO +from urllib2 import urlparse +from cgi import parse_qs + +XML_HEADERS = {'content-type': 'application/xml'} + + +class LibcloudTestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + self._visited_urls = [] + self._executed_mock_methods = [] + super(LibcloudTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + self._visited_urls = [] + self._executed_mock_methods = [] + + def _add_visited_url(self, url): + self._visited_urls.append(url) + + def _add_executed_mock_method(self, method_name): + self._executed_mock_methods.append(method_name) + + def assertExecutedMethodCount(self, expected): + actual = len(self._executed_mock_methods) + self.assertEqual(actual, expected, + 'expected %d, but %d mock methods were executed' + % (expected, actual)) + +class multipleresponse(object): + """ + A decorator that allows MockHttp objects to return multi responses + """ + count = 0 + func = None + + def __init__(self, f): + self.func = f + + def __call__(self, *args, **kwargs): + ret = self.func(self.func.__class__, *args, **kwargs) + response = ret[self.count] + self.count = self.count + 1 + return response + + +class MockResponse(object): + """ + A mock HTTPResponse + """ + headers = {} + body = StringIO() + status = 0 + reason = '' + version = 11 + + def __init__(self, status, body, headers=None, reason=None): + self.status = status + self.body = StringIO(body) + self.headers = headers or self.headers + self.reason = reason or self.reason + + def read(self, *args, **kwargs): + return self.body.read(*args, **kwargs) + + def getheader(self, name, *args, **kwargs): + return self.headers.get(name, *args, **kwargs) + + def getheaders(self): + return self.headers.items() + + def msg(self): + raise NotImplemented + +class BaseMockHttpObject(object): + def _get_method_name(self, type, use_param, qs, path): + meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_') + if type: + meth_name = '%s_%s' % (meth_name, self.type) + if use_param: + param = qs[self.use_param][0].replace('.', '_').replace('-', '_') + meth_name = '%s_%s' % (meth_name, param) + return meth_name + +class MockHttp(BaseMockHttpObject): + """ + A mock HTTP client/server suitable for testing purposes. This replaces + `HTTPConnection` by implementing its API and returning a mock response. + + Define methods by request path, replacing slashes (/) with underscores (_). + Each of these mock methods should return a tuple of: + + (int status, str body, dict headers, str reason) + + >>> mock = MockHttp('localhost', 8080) + >>> mock.request('GET', '/example/') + >>> response = mock.getresponse() + >>> response.body.read() + 'Hello World!' + >>> response.status + 200 + >>> response.getheaders() + [('X-Foo', 'libcloud')] + >>> MockHttp.type = 'fail' + >>> mock.request('GET', '/example/') + >>> response = mock.getresponse() + >>> response.body.read() + 'Oh Noes!' + >>> response.status + 403 + >>> response.getheaders() + [('X-Foo', 'fail')] + + """ + responseCls = MockResponse + host = None + port = None + response = None + + type = None + use_param = None # will use this param to namespace the request function + + test = None # TestCase instance which is using this mock + + def __init__(self, host, port, *args, **kwargs): + self.host = host + self.port = port + + def request(self, method, url, body=None, headers=None, raw=False): + # Find a method we can use for this request + parsed = urlparse.urlparse(url) + scheme, netloc, path, params, query, fragment = parsed + qs = parse_qs(query) + if path.endswith('/'): + path = path[:-1] + meth_name = self._get_method_name(type=self.type, + use_param=self.use_param, + qs=qs, path=path) + meth = getattr(self, meth_name) + + if self.test and isinstance(self.test, LibcloudTestCase): + self.test._add_visited_url(url=url) + self.test._add_executed_mock_method(method_name=meth_name) + + status, body, headers, reason = meth(method, url, body, headers) + self.response = self.responseCls(status, body, headers, reason) + + def getresponse(self): + return self.response + + def connect(self): + """ + Can't think of anything to mock here. + """ + pass + + def close(self): + pass + + # Mock request/response example + def _example(self, method, url, body, headers): + """ + Return a simple message and header, regardless of input. + """ + return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'}, + httplib.responses[httplib.OK]) + + def _example_fail(self, method, url, body, headers): + return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'}, + httplib.responses[httplib.FORBIDDEN]) + +class MockHttpTestCase(MockHttp, unittest.TestCase): + # Same as the MockHttp class, but you can also use assertions in the + # classes which inherit from this one. + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self) + + if kwargs.get('host', None) and kwargs.get('port', None): + MockHttp.__init__(self, *args, **kwargs) + + def runTest(self): + pass + +class StorageMockHttp(MockHttp): + def putrequest(self, method, action): + pass + + def putheader(self, key, value): + pass + + def endheaders(self): + pass + + def send(self, data): + pass + +class MockRawResponse(BaseMockHttpObject): + """ + Mock RawResponse object suitable for testing. + """ + + type = None + responseCls = MockResponse + + def __init__(self, connection): + super(MockRawResponse, self).__init__() + self._data = [] + self._current_item = 0 + + self._status = None + self._response = None + self._headers = None + self._reason = None + self.connection = connection + + def next(self): + if self._current_item == len(self._data): + raise StopIteration + + value = self._data[self._current_item] + self._current_item += 1 + return value + + def _generate_random_data(self, size): + data = [] + current_size = 0 + while current_size < size: + value = str(random.randint(0, 9)) + value_size = len(value) + data.append(value) + current_size += value_size + + return data + + @property + def response(self): + return self._get_response_if_not_availale() + + @property + def status(self): + self._get_response_if_not_availale() + return self._status + + @property + def headers(self): + self._get_response_if_not_availale() + return self._headers + + @property + def reason(self): + self._get_response_if_not_availale() + return self._reason + + def _get_response_if_not_availale(self): + if not self._response: + meth_name = self._get_method_name(type=self.type, + use_param=False, qs=None, + path=self.connection.action) + meth = getattr(self, meth_name) + result = meth(self.connection.method, None, None, None) + self._status, self._body, self._headers, self._reason = result + self._response = self.responseCls(self._status, self._body, + self._headers, self._reason) + return self + return self._response + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/test/common/__init__.py b/trunk/test/common/__init__.py new file mode 100644 index 0000000000..ae1e83eeb3 --- /dev/null +++ b/trunk/test/common/__init__.py @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/trunk/test/common/test_cloudstack.py b/trunk/test/common/test_cloudstack.py new file mode 100644 index 0000000000..74e8359739 --- /dev/null +++ b/trunk/test/common/test_cloudstack.py @@ -0,0 +1,187 @@ +import httplib +import sys +import unittest +import urlparse + +try: + import simplejson as json +except ImportError: + import json + +try: + parse_qsl = urlparse.parse_qsl +except AttributeError: + import cgi + parse_qsl = cgi.parse_qsl + +from libcloud.common.cloudstack import CloudStackConnection, CloudStackResponse +from libcloud.common.types import MalformedResponseError + +from test import MockHttpTestCase + +async_delay = 0 + +class CloudStackMockDriver(object): + host = 'nonexistant.' + path = '/path' + async_poll_frequency = 0 + + name = 'fake' + + async_delay = 0 + +class CloudStackCommonTest(unittest.TestCase): + def setUp(self): + CloudStackConnection.conn_classes = (None, CloudStackMockHttp) + self.connection = CloudStackConnection('apikey', 'secret', + host=CloudStackMockDriver.host) + self.driver = self.connection.driver = CloudStackMockDriver() + + def test_sync_request_bad_response(self): + self.driver.path = '/bad/response' + try: + self.connection._sync_request('fake') + except Exception, e: + self.assertTrue(isinstance(e, MalformedResponseError)) + return + self.assertTrue(False) + + def test_sync_request(self): + self.driver.path = '/sync' + self.connection._sync_request('fake') + + def test_async_request_successful(self): + self.driver.path = '/async/success' + result = self.connection._async_request('fake') + self.assertEqual(result, {'fake': 'result'}) + + def test_async_request_unsuccessful(self): + self.driver.path = '/async/fail' + try: + self.connection._async_request('fake') + except: + return + self.assertFalse(True) + + def test_async_request_delayed(self): + global async_delay + self.driver.path = '/async/delayed' + async_delay = 2 + self.connection._async_request('fake') + self.assertEqual(async_delay, 0) + + def test_signature_algorithm(self): + cases = [ + ( + { + 'command': 'listVirtualMachines' + }, 'z/a9Y7J52u48VpqIgiwaGUMCso0=' + ), ( + { + 'command': 'deployVirtualMachine', + 'name': 'fred', + 'displayname': 'George', + 'serviceofferingid': 5, + 'templateid': 17, + 'zoneid': 23, + 'networkids': 42 + }, 'gHTo7mYmadZ+zluKHzlEKb1i/QU=' + ), ( + { + 'command': 'deployVirtualMachine', + 'name': 'fred', + 'displayname': 'George+Ringo', + 'serviceofferingid': 5, + 'templateid': 17, + 'zoneid': 23, + 'networkids': 42 + }, 'tAgfrreI1ZvWlWLClD3gu4+aKv4=' + ) + ] + + connection = CloudStackConnection('fnord', 'abracadabra') + for case in cases: + params = connection.add_default_params(case[0]) + self.assertEqual(connection._make_signature(params), case[1]) + +class CloudStackMockHttp(MockHttpTestCase): + def _response(self, status, result, response): + return (status, json.dumps(result), result, response) + + def _check_request(self, url): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + return query + + def _bad_response(self, method, url, body, headers): + self._check_request(url) + result = {'success': True} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _sync(self, method, url, body, headers): + query = self._check_request(url) + result = {query['command'].lower() + 'response': {}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_success(self, method, url, body, headers): + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + result = { + query['command'].lower() + 'response': { + 'jobstatus': 1, + 'jobresult': {'fake': 'result'} + } + } + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_fail(self, method, url, body, headers): + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + result = { + query['command'].lower() + 'response': { + 'jobstatus': 2, + 'jobresult': {'fake': 'failresult'} + } + } + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + + def _async_delayed(self, method, url, body, headers): + global async_delay + + query = self._check_request(url) + if query['command'].lower() == 'queryasyncjobresult': + self.assertEqual(query['jobid'], '42') + if async_delay == 0: + result = { + query['command'].lower() + 'response': { + 'jobstatus': 1, + 'jobresult': {'fake': 'result'} + } + } + else: + result = { + query['command'].lower() + 'response': { + 'jobstatus': 0, + } + } + async_delay -= 1 + else: + result = {query['command'].lower() + 'response': {'jobid': '42'}} + return self._response(httplib.OK, result, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/__init__.py b/trunk/test/compute/__init__.py new file mode 100644 index 0000000000..924830d8fc --- /dev/null +++ b/trunk/test/compute/__init__.py @@ -0,0 +1,90 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.compute.base import Node, NodeImage, NodeLocation +from libcloud.pricing import get_pricing + +class TestCaseMixin(object): + should_list_locations = True + should_have_pricing = False + + def test_list_nodes_response(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + for node in nodes: + self.assertTrue(isinstance(node, Node)) + + def test_list_sizes_response(self): + sizes = self.driver.list_sizes() + size = sizes[0] + self.assertTrue(isinstance(sizes, list)) + # Check that size values are ints or None + self.assertTrue(size.ram is None or isinstance(size.ram, int)) + self.assertTrue(size.disk is None or isinstance(size.disk, int)) + self.assertTrue(size.bandwidth is None or + isinstance(size.bandwidth, int)) + + def test_list_images_response(self): + images = self.driver.list_images() + self.assertTrue(isinstance(images, list)) + for image in images: + self.assertTrue(isinstance(image, NodeImage)) + + def test_list_locations_response(self): + if not self.should_list_locations: + return None + + locations = self.driver.list_locations() + self.assertTrue(isinstance(locations, list)) + for dc in locations: + self.assertTrue(isinstance(dc, NodeLocation)) + + def test_create_node_response(self): + # should return a node object + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node(name='node-name', + image=image, + size=size) + self.assertTrue(isinstance(node, Node)) + + def test_destroy_node_response(self): + # should return a node object + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_reboot_node_response(self): + # should return a node object + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_get_pricing_success(self): + if not self.should_have_pricing: + return None + + driver_type = 'compute' + try: + get_pricing(driver_type=driver_type, driver_name=self.driver.api_name) + except KeyError: + self.fail("No {driver_type!r} pricing info for {driver}.".format( + driver=self.driver.__class__.__name__, + driver_type=driver_type, + )) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/trunk/test/compute/fixtures/bluebox/api_block_products_json.json b/trunk/test/compute/fixtures/bluebox/api_block_products_json.json new file mode 100644 index 0000000000..b3baa12d5b --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_block_products_json.json @@ -0,0 +1 @@ +[{"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}, {"cost": 0.25, "id": "b412f354-5056-4bf0-a42f-6ddd998aa092", "description": "Block 2GB Virtual Server"}, {"cost": 0.35, "id": "0cd183d3-0287-4b1a-8288-b3ea8302ed58", "description": "Block 4GB Virtual Server"}, {"cost": 0.45, "id": "b9b87a5b-2885-4a2e-b434-44a163ca6251", "description": "Block 8GB Virtual Server"}] diff --git a/trunk/test/compute/fixtures/bluebox/api_block_templates_json.json b/trunk/test/compute/fixtures/bluebox/api_block_templates_json.json new file mode 100644 index 0000000000..2ea7cb620a --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_block_templates_json.json @@ -0,0 +1 @@ +[{"public": true, "id": "c66b8145-f768-45ef-9878-395bf8b1b7ff", "description": "CentOS 5 (Latest Release)", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "1fc24f51-6d7d-4fa9-9a6e-0d6f36b692e2", "description": "Ubuntu 8.10 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b6f152db-988c-4194-b292-d6dd2aa2dbab", "description": "Debian 5.0 64bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "4b697e48-282b-4140-8cf8-142e2a2711ee", "description": "Ubuntu 8.04 LTS 64bit", "created": "2009/07/31 15:58:20 -0700"}, {"public": true, "id": "a6a141bf-592a-4fa6-b130-4c14f69e82d0", "description": "Ubuntu 8.04 LTS 32Bit", "created": "2009/04/20 15:46:34 -0700"}, {"public": true, "id": "b181033f-aea7-4e6c-8bb4-11169775c0f8", "description": "Ubuntu 9.04 64bit", "created": "2010/01/26 11:31:19 -0800"}, {"public": true, "id": "b5371c5a-9da2-43ee-a745-99a4723f624c", "description": "ArchLinux 2009.08 64bit", "created": "2010/02/13 18:07:01 -0800"}, {"public": true, "id": "a00baa8f-b5d0-4815-8238-b471c4c4bf72", "description": "Ubuntu 9.10 64bit", "created": "2010/02/17 22:06:21 -0800"}, {"public": true, "id": "03807e08-a13d-44e4-b011-ebec7ef2c928", "description": "Ubuntu 10.04 LTS 64bit", "created": "2010/05/04 14:43:30 -0700"}, {"public": true, "id": "8b60e6de-7cbc-4c8e-b7df-5e2f9c4ffd6b", "description": "Ubuntu 10.04 LTS 32bit", "created": "2010/05/04 14:43:30 -0700"}] diff --git a/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json new file mode 100644 index 0000000000..9db716d7d1 --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json @@ -0,0 +1 @@ +{"ips": [{"address": "67.214.214.212"}], "memory": 1073741824, "template": "centos", "id": "99df878c-6e5c-4945-a635-d94da9fd3146", "storage": 21474836480, "hostname": "apitest.c44905.c44905.blueboxgrid.com", "description": "1 GB RAM + 20 GB Disk", "cpu": 0.5, "status": "running", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff --git a/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json new file mode 100644 index 0000000000..934a00f420 --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json @@ -0,0 +1 @@ +{"text":"Block destroyed."} diff --git a/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json new file mode 100644 index 0000000000..2ea54d2232 --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json @@ -0,0 +1 @@ +{ "status": "ok", "text": "Reboot initiated." } diff --git a/trunk/test/compute/fixtures/bluebox/api_blocks_json.json b/trunk/test/compute/fixtures/bluebox/api_blocks_json.json new file mode 100644 index 0000000000..eb7d35ff6e --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_blocks_json.json @@ -0,0 +1 @@ +[{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"running"}] diff --git a/trunk/test/compute/fixtures/bluebox/api_blocks_json_post.json b/trunk/test/compute/fixtures/bluebox/api_blocks_json_post.json new file mode 100644 index 0000000000..4452f7185b --- /dev/null +++ b/trunk/test/compute/fixtures/bluebox/api_blocks_json_post.json @@ -0,0 +1 @@ +{"ips":[{"address":"67.214.214.212"}],"memory":1073741824,"id":"99df878c-6e5c-4945-a635-d94da9fd3146","storage":21474836480,"hostname":"foo.apitest.blueboxgrid.com","description":"1 GB RAM + 20 GB Disk","cpu":0.5,"status":"queued", "product": {"cost": 0.15, "id": "94fd37a7-2606-47f7-84d5-9000deda52ae", "description": "Block 1GB Virtual Server"}} diff --git a/trunk/test/compute/fixtures/brightbox/create_server.json b/trunk/test/compute/fixtures/brightbox/create_server.json new file mode 100644 index 0000000000..e4b31226ba --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/create_server.json @@ -0,0 +1,62 @@ +{"id": "srv-3a97e", + "url": "servers/(server_id)", + "name": "My web server", + "status": "active", + "hostname": "srv-3a97e.gb1.brightbox.com", + "created_at": "", + "deleted_at": "", + "started_at": "", + "account": + {"id": "acc-3jd8s", + "url": "accounts/(account_id)", + "name": "Brightbox Systems Ltd.", + "status": "verified", + "ram_limit": 20480, + "ram_used": 2048, + "limits_cloudips": 5}, + "image": + {"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}, + "server_type": + {"id": "typ-a97e6", + "url": "server_types/(server_type_id)", + "handle": "nano", + "name": "Brightbox Nano", + "status": "", + "cores": 2, + "ram": 2048, + "disk_size": ""}, + "zone": + {"id": "zon-8ja0a", + "url": "zones/(zone_id)", + "handle": "gb1-a"}, + "snapshots": + [{"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}], + "cloud_ips": + [{"id": "cip-ja8ub", + "url": "cloud_ips/(cloud_ip_id)", + "public_ip": "109.107.42.129", + "status": "mapped", + "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], + "interfaces": + [{"id": "int-mc3a9", + "url": "interfaces/(interface_id)", + "mac_address": "02:24:19:6e:18:36", + "ipv4_address": "10.110.24.54"}]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/brightbox/list_images.json b/trunk/test/compute/fixtures/brightbox/list_images.json new file mode 100644 index 0000000000..9239569202 --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/list_images.json @@ -0,0 +1,21 @@ +[{"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright", + "ancestor": + {"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}}] diff --git a/trunk/test/compute/fixtures/brightbox/list_server_types.json b/trunk/test/compute/fixtures/brightbox/list_server_types.json new file mode 100644 index 0000000000..63484613d9 --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/list_server_types.json @@ -0,0 +1,8 @@ +[{"id": "typ-4nssg", + "url": "server_types/typ-4nssg", + "handle": "nano", + "name": "Brightbox Nano Instance", + "status": "", + "cores": 1, + "ram": 512, + "disk_size": 10240}] diff --git a/trunk/test/compute/fixtures/brightbox/list_servers.json b/trunk/test/compute/fixtures/brightbox/list_servers.json new file mode 100644 index 0000000000..776a7f5f9f --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/list_servers.json @@ -0,0 +1,62 @@ +[{"id": "srv-3a97e", + "url": "servers/(server_id)", + "name": "My web server", + "status": "active", + "hostname": "srv-3a97e.gb1.brightbox.com", + "created_at": "", + "deleted_at": "", + "started_at": "", + "account": + {"id": "acc-3jd8s", + "url": "accounts/(account_id)", + "name": "Brightbox Systems Ltd.", + "status": "verified", + "ram_limit": 20480, + "ram_used": 2048, + "limits_cloudips": 5}, + "image": + {"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}, + "server_type": + {"id": "typ-a97e6", + "url": "server_types/(server_type_id)", + "handle": "nano", + "name": "Brightbox Nano", + "status": "", + "cores": 2, + "ram": 2048, + "disk_size": ""}, + "zone": + {"id": "zon-8ja0a", + "url": "zones/(zone_id)", + "handle": "gb1-a"}, + "snapshots": + [{"id": "img-9vxqi", + "url": "images/(image_id)", + "name": "Brightbox Lucid 32", + "status": "available", + "description": "Jeremy's debian ec2 image", + "source": "jeremy_debian-32_ec2", + "source_type": "upload", + "arch": "32-bit", + "created_at": "", + "owner": "acc-bright"}], + "cloud_ips": + [{"id": "cip-ja8ub", + "url": "cloud_ips/(cloud_ip_id)", + "public_ip": "109.107.42.129", + "status": "mapped", + "reverse_dns": "cip-109-107-42-129.gb1.brightbox.com"}], + "interfaces": + [{"id": "int-mc3a9", + "url": "interfaces/(interface_id)", + "mac_address": "02:24:19:6e:18:36", + "ipv4_address": "10.110.24.54"}]}] diff --git a/trunk/test/compute/fixtures/brightbox/list_zones.json b/trunk/test/compute/fixtures/brightbox/list_zones.json new file mode 100644 index 0000000000..4bf54ce92a --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/list_zones.json @@ -0,0 +1,3 @@ +[{"id": "zon-8ja0a", + "url": "zones/(zone_id)", + "handle": "gb1-a"}] diff --git a/trunk/test/compute/fixtures/brightbox/token.json b/trunk/test/compute/fixtures/brightbox/token.json new file mode 100644 index 0000000000..0c0f12ba06 --- /dev/null +++ b/trunk/test/compute/fixtures/brightbox/token.json @@ -0,0 +1 @@ +{"access_token":"k1bjflpsaj8wnrbrwzad0eqo36nxiha", "expires_in": 3600} diff --git a/trunk/test/compute/fixtures/cloudsigma/drives_clone.txt b/trunk/test/compute/fixtures/cloudsigma/drives_clone.txt new file mode 100644 index 0000000000..d18ae9e4e1 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/drives_clone.txt @@ -0,0 +1,19 @@ +status active +use dbserver,general +name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub clone +bits 64 +url http://www.microsoft.com/sqlserver/2008/en/us/ +read:bytes 4096 +description Please refer to the install notes for a full guide to initial configuration. +write:bytes 21474840576 +drive a814def5-1789-49a0-bf88-7abe7bb1682a +install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +os windows +user 93b34fd9-7986-4b25-8bfd-98a50383605d +read:requests 1 +licenses msft_p73_04837 msft_tfa_00009 +type disk +write:requests 5242881 +size 21474836480 diff --git a/trunk/test/compute/fixtures/cloudsigma/drives_info.txt b/trunk/test/compute/fixtures/cloudsigma/drives_info.txt new file mode 100644 index 0000000000..e9cc833bf0 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/drives_info.txt @@ -0,0 +1,39 @@ +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive 3d18db4b-f9bd-4313-b034-12ae181efa88 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 53687091200 + +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive 3d18db4b-f9bd-4313-b034-12ae181efa99 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 103687091200 diff --git a/trunk/test/compute/fixtures/cloudsigma/drives_single_info.txt b/trunk/test/compute/fixtures/cloudsigma/drives_single_info.txt new file mode 100644 index 0000000000..6efe9aaea7 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/drives_single_info.txt @@ -0,0 +1,19 @@ +status active +use general +name test node +bits 64 +url http://www.centos.org/ +read:bytes 4096 +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +write:bytes 21474840576 +os linux +drive d18119ce_7afa_474a_9242_e0384b160220 +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1 +free true +type disk +write:requests 5242881 +size 53687091200 diff --git a/trunk/test/compute/fixtures/cloudsigma/drives_standard_info.txt b/trunk/test/compute/fixtures/cloudsigma/drives_standard_info.txt new file mode 100644 index 0000000000..4a024114eb --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/drives_standard_info.txt @@ -0,0 +1,1735 @@ +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Yoper is a multipurpose high performance operating system which has been carefully optimised for PC's with either 686 or higher processor types. The binaries that come with Yoper have been built from scratch using the original sources combined with the best features of major distros, measuring up to the demanding proliferation of network communications and more intensive digital multimedia, graphics and audio capabilities which are ushering in a new era of business productivity enabled by a new generation of sophisticated microprocessors, and business application tools. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 102401 +name Yoper 2010 Linux with XFCE Install CD +url http://yoper-linux.org/ +read:bytes 4096 +claim:type shared +drive 7e3e7628-d1e6-47c6-858d-7b54aac5c916 +write:bytes 419434496 +read:requests 1 +os linux + +type cdrom +size 2621440000 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Ultimate Edition, first released in December 2006, is a fork of Ubuntu. The goal of the project is to create a complete, seamlessly integrated, visually stimulating, and easy-to-install operating system. Single-button upgrade is one of several special characteristics of this distribution. Other main features include custom desktop and theme with 3D effects, support for a wide range of networking options, including WiFi and Bluetooth, and integration of many extra applications and package repositories. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 640001 +name Ultimate 2.6 Linux 64bit Install CD +url http://ultimateedition.info/ +read:bytes 440279040 +claim:type shared +drive 526ed5cb-6fbe-46fb-a064-7707c844d774 +write:bytes 2621444096 +read:requests 107490 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 168961 +name FreeBSD 8.0 Linux 64bit Install CD +url http://www.freebsd.org/ +read:bytes 479866880 +claim:type shared +drive 95380e4c-4f69-432d-be2b-1965a282bdb9 +write:bytes 692064256 +read:requests 117155 +os other + +type cdrom +size 218103808 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description The Fedora 13 x86_64 (64bit) network installation CD +favourite true +install_notes The Fedora 13 network installaton cd will install, through the network, the latest Fedora packages; since it includes the "updates" repo.\n\nThe minimal install option offers great ground to build on top of a very nice base. This configuration is recommended for most servers.\n\nBuild your own and share them wth us! +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 53249 +name Fedora 13 Linux x86 64bit netinst Install CD +url http://fedoraproject.org/ +read:bytes 1444963840 +claim:type shared +drive 14b1e97f-5bba-4cf1-aec4-7b7b573826c2 +write:bytes 218107904 +read:requests 352119 +os linux + +type cdrom +size 452984832 +use security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description The Untangle Lite package offers a collection of free, open-source software applications to run on the Untangle Server. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 110593 +name Untangle 7.4 Linux 64bit Install CD +url http://www.untangle.com/ +read:bytes 4096 +claim:type shared +drive 06c39099-9f75-40f4-b2e1-6012c87f3579 +write:bytes 452988928 +read:requests 1 +os linux + +type cdrom +size 138412032 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description Puppy Linux is extraordinarily small, yet quite full featured. Puppy boots into a 64MB ramdisk, and that's it, the whole caboodle runs in RAM. Unlike live CD distributions that have to keep pulling stuff off the CD, Puppy in its entirety loads into RAM. This means that all applications start in the blink of an eye and respond to user input instantly. Puppy Linux has the ability to boot off a flash card or any USB memory device, CDROM, Zip disk or LS/120/240 Superdisk, floppy disks, internal hard drive. It can even use a multisession formatted CD-R/DVD-R to save everything back to the CD/DVD with no hard drive required at all! +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 33793 +name Puppy 5.1 Linux Install CD +url www.puppylinux.org +read:bytes 276828160 +claim:type shared +drive 60111502-6ff3-43e1-9485-5be775f81657 +write:bytes 138416128 +read:requests 67585 +os linux + +type cdrom +size 171966464 +use router,general,networking +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description Vyatta project is a Linux-based router and firewall offering a free community edition and two commercial editions with support. Vyatta has changed the networking world by developing the first commercially supported, open-source router & firewall solution. Vyatta combines the features, performance and reliability of an enterprise router & firewall with the cost savings, flexibility and security of open source. +favourite true +install_notes \nCD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 41985 +name Vyatta 6.1 Live CD +url www.yyatta.com +read:bytes 687869952 +claim:type shared +drive 8159ab9b-9703-48f6-a206-ac26efe8fdc2 +write:bytes 171970560 +read:requests 167937 +os linux + +type cdrom +size 721420288 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description ZenLive Linux is a live cd derived from Zenwalk distribution. Zenwalk Linux (formerly Minislack) is a Slackware-based Linux distribution with focus on Internet applications, multimedia and programming tools. ZenLive Linux LiveCD is a complete system with software for Internet browsing, mail, chat, multimedia and office, as well as for programming in C, Perl, Python and Ruby. The main objectives of Zenwalk Linux are to be simple and fast, provide one application per task, be a complete development and desktop environment and to be small enough to fit on a 400MB ISO image. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 176129 +name Zenlive 6.4 Linux Install CD +url http://www.zenwalk.org/ +read:bytes 721424384 +claim:type shared +drive fcc2aa68-24ce-438e-8386-1d4e66336155 +write:bytes 721424384 +read:requests 176129 +os linux + +type cdrom +claimed 00059836-5512-4ce2-bf66-4daab2d994e4:guest:2e82c87e-61a1-443c-bc81-5c3167df5c11:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:3234b1fc-415f-4019-ada1-27781aea8750:ide:0:0 +size 4198498304 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description CentOS as a group is a community of open source contributors and users. Typical CentOS users are organisations and individuals that do not need strong commercial support in order to achieve successful operation. CentOS is 100% compatible rebuild of the Red Hat Enterprise Linux, in full compliance with Red Hat's redistribution requirements. CentOS is for people who need an enterprise class operating system stability without the cost of certification and support. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1025025 +name CentOS 5.5 Linux 32bit Install DVD +url http://www.centos.org +read:bytes 16706375680 +claim:type shared +drive 6e0e2282-c29a-4d19-97e6-7ddb7cdf0dd2 +write:bytes 4198502400 +read:requests 4078705 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description Ubuntu is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit."Ubuntu&quot; is an ancient African word, meaning"humanity to others&quot;. The Ubuntu distribution brings the spirit of Ubuntu to the software world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Ubuntu 10.04 Linux 32bit Install CD +url http://www.ubuntu.com/ +read:bytes 1298436608 +claim:type shared +drive 0e305bb9-f512-4d4a-894c-4a733cae570f +write:bytes 734007296 +read:requests 295036 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd,livecd +status active +description Chakra is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 204801 +name Chakra Alpha 5 Linux 64bit Install and Live CD +url http://chakra-project.org/ +read:bytes 185200640 +claim:type shared +drive c0856590-c2b1-4725-9448-bba7c74d35dc +write:bytes 838864896 +read:requests 45215 +os linux + +type cdrom +claimed 00043e69-ac57-45b1-8692-75db24064fb9:guest:4c014a4e-615e-489e-b22a-bf966bce83d7:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:0a9d4833-fc5f-4825-9626-5a3e6555d329:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:71d09667-fd6b-491a-949f-6a7ab9c70907:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:c8264872-67a1-4452-a736-8dc6ef9eb07d:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:6efe92c3-0126-4ddb-9140-73706c804c3b:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:158c515f-1649-44f0-895c-f0de39575a1c:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:7d62f26e-2062-469e-846a-b926dffb00b1:ide:0:0 +size 4697620480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description - +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1146881 +name Debian Linux 5.0 Install CD +url http://www.debian.org/ +read:bytes 4612921344 +claim:type shared +drive 794a068d-228c-4758-81f0-e1bc955a6cce +write:bytes 4697624576 +read:requests 985768 +os linux + +type cdrom +size 2751463424 +use dev,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type livecd +status active +description Scientific Linux is a recompiled Red Hat Enterprise Linux put together by various labs and universities around the world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 674125 +name Scientific Linux 5.5 64bit Live CD +url https://www.scientificlinux.org/ +read:bytes 10903552 +claim:type shared +drive 7aa74ca3-4c64-4b08-9972-eddeb38a650d +write:bytes 2761216000 +read:requests 2662 +os linux + +type cdrom +size 612368384 +use networking,other +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Nexenta OS is a free and open source operating system combining the OpenSolaris kernel with GNU application userland. Nexenta OS runs on Intel/AMD 32-/64-bit hardware and is distributed as a single installable CD. Upgrades and binary packages not included on the CD can be installed from Nexenta OS repository using Advanced Packaging Tool. In addition, source based software components can be downloaded from network repositories available at Debian/GNU Linux and Ubuntu Linux. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 149357 +name NexentaStor 3.0.3 Linux 32bit/64bit Install CD +url http://www.nexenta.org/ +read:bytes 2822144 +claim:type shared +drive 2c3369a5-22eb-4462-8137-35a62b7a93cf +write:bytes 611766272 +read:requests 689 +os other + +type cdrom +size 301989888 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Zenwalk Linux (formerly Minislack) is a Slackware-based GNU/Linux operating system with a goal of being slim and fast by using only one application per task and with focus on graphical desktop and multimedia usage. Zenwalk features the latest Linux technology along with a complete programming environment and libraries to provide an ideal platform for application programmers. Zenwalk's modular approach also provides a simple way to convert Zenwalk Linux into a finely-tuned modern server (e.g. LAMP, messaging, file sharing). +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 73342 +name Zenwalk Core 6.4 Install CD +url http://yoper-linux.org/ +read:bytes 1576960 +claim:type shared +drive 3d58f1c6-9ec4-4963-917e-9917d39e5003 +write:bytes 300408832 +read:requests 385 +os linux + +type cdrom +size 67108864 +use general,security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type preinstalled +status active +description IPFire is a linux-distribution that focusses on easy setup, good handling and a high niveau of security. It is operable via an intuitive webinterface, which offers a lot of playground for beginners and even experienced administrators. IPFire is maintained by experienced developers, who are really concerned about security and regulary updates to keep it secure. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 16385 +name IPFire 2.7 Core 40 Linux 32bit Install CD +url http://www.ipfire.org/ +read:bytes 4096 +claim:type shared +drive 231aa9af-f2ef-407c-9374-76a1215b94d3 +write:bytes 67112960 +read:requests 1 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Legacy OS (formerly TEENpup Linux) is a distribution based on Puppy Linux. Although the original concept was to create a flavour of Puppy Linux with more applications and a more appealing desktop aimed at teenage users, Legacy OS has now grown to become a general purpose distribution. It comes with a large number of applications, browser plugins and media codecs as standard software. Despite these enhancements Legacy OS is still perfectly suitable for installation on older and low-resource computers, as well as modern hardware. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Legacy OS Linux 32bit Install CD +url http://pupweb.org/wikka/TeenPup +read:bytes 4096 +claim:type shared +drive 39f24226-dc6c-40e2-abc8-e8f2da976671 +write:bytes 734007296 +read:requests 1 +os linux + +type cdrom +size 209715200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Yohsuke Ooi has announced the release of Momonga Linux 7, a Japanese community distribution loosely modelled on Fedora. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 51201 +name Momonga 7 Linux 64bit Net Install CD +url http://www.momonga-linux.org/ +read:bytes 4096 +claim:type shared +drive f424888b-e66e-43f4-99c1-2991a5b82894 +write:bytes 209719296 +read:requests 1 +os linux + +type cdrom +size 713031680 +use general,security,systemrecovery +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description CAINE (Computer Aided INvestigative Environment) is an Ubuntu-based GNU/Linux live distribution created as a project of Digital Forensics for Interdepartmental Centre for Research on Security (CRIS), supported by the University of Modena and Reggio Emilia in Italy. The CAINE forensic framework contains a collection of tools wrapped up into a user-friendly environment. It introduces novel features - it aims to fill the interoperability gap across different forensic tools, it provides a homogeneous GUI that guides digital investigators during the acquisition and analysis of electronic evidence, and it offers a semi-automatic process for the documentation and report compilation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 174081 +name Caine 2.0 Linux 32bit Live CD +url http://www.caine-live.net/ +read:bytes 4096 +claim:type shared +drive 9768a0d1-e90c-44eb-8da7-06bca057cb93 +write:bytes 713035776 +read:requests 1 +os linux + +type cdrom +size 708837376 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Kongoni GNU/Linux is a Slackware-based, desktop-oriented GNU/Linux distribution and live CD. Its main features include a graphical installer, a Kongoni Integrated Setup System (KISS), and an easy-to-use Ports Installation GUI (PIG). The distribution's package management borrows its main concepts from BSD ports, with an intuitive graphical package installer that compiles and installs programs from source code on the user's system. Kongoni, which means gnu (also known as wildebeest) in Shona, includes only software that complies with Free Software Foundation's definition of software freedom. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 173057 +name Kongoni 1.12.3 Linux 32bit Live CD +url http://www.kongoni.org/ +read:bytes 4096 +claim:type shared +drive 6ac51b9d-a1db-44fc-b325-30bdefd0dd0a +write:bytes 708841472 +read:requests 1 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description Debris Linux is a minimalist, desktop-oriented distribution and live CD based on Ubuntu. It includes the GNOME desktop and a small set of popular desktop applications, such as GNOME Office, Firefox web browser, Pidgin instant messenger, and ufw firewall manager. Debris Linux ships with a custom kernel, a custom system installer called DebI, and a script that makes it easy to save and restore any customisations made while in live mode. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Debris 2.0 Linux Live CD +url http://debrislinux.org/ +read:bytes 0 +claim:type shared +drive 258e1026-36bf-4368-ba7c-52836de4f757 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1887436800 +use systemrecovery,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type livecd +status active +description Toorox is a Linux Live-DVD based on Gentoo that starts as a bootable media using KNOPPIX technology. While the system is booting, all necessary drivers will be included automatically (lshwd). Toorox is only using the memory and an existing swap partition at runtime, so your hard disks won't be touched by default. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Toorox 05.2010 Linux 64bit Live CD +url http://toorox.de/ +read:bytes 0 +claim:type shared +drive 8fa3bc29-47e8-496a-89c6-02872a0d2642 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 2516582400 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's"4.4BSD-Lite&quot; release, with some"4.4BSD-Lite2&quot; enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's"Net/2&quot; to the i386, known as"386BSD&quot;, though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name FreeBSD 7.3 Linux 64bit Install CD +url http://www.freebsd.org/ +read:bytes 13836288 +claim:type shared +drive 92444414-dc65-451d-9018-2b1ab8db4ceb +write:bytes 0 +read:requests 3378 +os other + +type cdrom +size 1073741824 +use systemrecovery,security +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd,livecd +status active +description KANOTIX is a Linux distribution based on the latest stable release of Debian GNU/Linux. It is built on top of a latest kernel which is carefully patched with fixes and drivers for most modern hardware. Although it can be used as a live CD, it also includes a graphical installer for hard disk installation. The user-friendly nature of the distribution is further enhanced by a custom-built control centre and specialist scripts. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Kanotix 4-2.6 Linux with KDE 64bit Install and Live CD +url http://www.kanotix.com/ +read:bytes 232169472 +claim:type shared +drive c7c33c07-5e28-42c8-9800-eb40e2aef287 +write:bytes 0 +read:requests 56682 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description TinyMe is a Unity Linux-based mini-distribution. It exists to ease installation of Unity Linux on older computers, to provide a minimal installation for developers, and to deliver a fast Linux installation for where only the bare essentials are needed. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name TinyMe 2010 Linux Install CD +url http://tinymelinux.com/ +read:bytes 0 +claim:type shared +drive 87b3f98c-c95c-454d-a002-bef63f5bbc1a +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description moonOS is a complete, Ubuntu-based distribution featuring the LXDE and Enlightenment 17 desktop managers and imaginative, original artwork. A project created and designed by Cambodian artist Chanrithy Thim, moonOS is intended as an operating system for any desktop, laptop or virtual machine. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name MoonOS 3 Linux 32bit Install CD +url http://www.moonos.org/ +read:bytes 0 +claim:type shared +drive d2651d5b-3760-41be-a8b0-6fe5ca208825 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description - +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Yoper 2010 Linux with KDE3 Install CD +url http://yoper-linux.org/ +read:bytes 0 +claim:type shared +drive 50e0ca32-c04a-47e3-be37-1cd6f0ad9ff8 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Easy Peasy is an Ubuntu based operating system for netbooks. It's optimized for netbooks and favors the best software available by delivering Firefox with Flash and Java, Skype, Google Picasa, Songbird etc. out of the box. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name EasyPeasy 1.6 Linux Install CD +url http://www.geteasypeasy.com/ +read:bytes 195153920 +claim:type shared +drive daac6531-8f59-4c96-baa0-6545350d5a5e +write:bytes 0 +read:requests 47645 +os linux + +type cdrom +size 1572864000 +use email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Calculate Linux is a Gentoo-based family of three distinguished distributions. Calculate Directory Server (CDS) is a solution that supports Windows and Linux clients via LDAP + SAMBA, providing proxy, mail and Jabbers servers with streamlined user management. Calculate Linux Desktop (CLD) is a workstation and client distribution with KDE, GNOME or Xfce desktop that includes a wizard to configure a connection to Calculate Directory Server. Calculate Linux Scratch (CLS) is live CD with a build framework for creating a custom distribution. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Calculate 10.4 Linux 64bit Install CD +url http://www.calculate-linux.org/ +read:bytes 16932864 +claim:type shared +drive 20f5b0dd-5c63-40aa-97b8-5b34e5107a25 +write:bytes 0 +read:requests 4134 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description KahelOS is a Linux distribution based on Arch Linux. Its desktop edition comes with pre-configured GNOME as the default desktop environment, GNOME Office productivity suite, Epiphany web browser, GIMP image manipulation program, and other popular GTK+ and GNOME applications. Like Arch Linux, KahelOS maintains a rolling-release model of updating software packages using its parent's repositories. A server edition is also available. Both the desktop and server editions come in the form of installation CDs with text-based installers, but no live media. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name KahelOS 05-01-2010 Linux 64bit Install CD +url http://www.kahelos.org/ +read:bytes 0 +claim:type shared +drive 1ddaedbf-ceb8-43b5-a587-e9e635d97f50 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 524288000 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description PCLinuxOS is a user-friendly, Mandriva-based Linux distribution with out-of-the-box support for many popular graphics and sound cards, as well as other peripheral devices. The bootable live CD provides an easy-to-use graphical installer and the distribution sports a wide range of popular applications for the typical desktop user, including browser plugins and full multimedia playback. The intuitive system configuration tools include Synaptic for package management, Addlocale to add support to many languages, Getopenoffice to install the latest OpenOffice.org, and Mylivecd to create a customised live CD. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PCLinuxOS 2010.1 Linux with KDE Install and Live CD +url http://www.pc-os.org/ +read:bytes 0 +claim:type shared +drive 3e0f427e-10eb-4277-bc3b-48f054908a09 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 524288000 +use multimedia,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description Peppermint was designed for enhanced mobility, efficiency and ease of use. While other operating systems are taking 10 minutes to load, you are already connected, communicating and getting things done. And, unlike other operating systems, Peppermint is ready to use out of the box. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Peppermint 1.0 Linux Live CD +url http://peppermintos.com/ +read:bytes 0 +claim:type shared +drive 92ffa2f6-f663-49d9-98ec-dc0b474369c4 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 419430400 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type livecd +status active +description PureOS and PureOSlight are GNU/Linux live CDs based on Debian's testing repository. These are desktop distributions that can be used as live media (CD or USB) or as full-featured operating systems installed on a hard disk. PureOS is a 700 MB live CD with KDE, Iceweasel, Icedove, OpenOffice.org, Songbird, VLC and K3B. PureOSlight is a small 300 MB live CD with Xfce, Iceweasel, Icedove, AbiWord, Gnumeric and Exaile. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PureOS 2.0 Linux Live CD +url http://www.pureos.org/ +read:bytes 100663296 +claim:type shared +drive ed6421b5-41c2-4ba3-a3c9-7c330d36e5b3 +write:bytes 0 +read:requests 24576 +os linux + +type cdrom +size 104857600 +use dev,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description ReactOS® is a free, modern operating system based on the design of Windows® XP/2003. Written completely from scratch, it aims to follow the Windows-NT® architecture designed by Microsoft from the hardware level right through to the application level. This is not a Linux based system, and shares none of the unix architecture. The main goal of the ReactOS project is to provide an operating system which is binary compatible with Windows. This will allow your Windows applications and drivers to run as they would on your Windows system. Additionally, the look and feel of the Windows operating system is used, such that people accustomed to the familiar user interface of Windows® would find using ReactOS straightforward. The ultimate goal of ReactOS is to allow you to remove Windows® and install ReactOS without the end user noticing the change. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name ReactOS 0.3.11 Alpha Install CD +url http://www.reactos.org/ +read:bytes 0 +claim:type shared +drive 327fd7dd-a2ca-4437-b87e-7610fccc3202 +write:bytes 0 +read:requests 0 +os other + +type cdrom +size 1887436800 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Ubuntu Studio is a variant of Ubuntu aimed at the GNU/Linux audio, video and graphic enthusiast as well as professional. The distribution provides a collection of open-source applications available for multimedia creation. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Ubuntu Studio 10.04 Linux 32bit Install CD +url http://www.ubuntu.com/ +read:bytes 499675136 +claim:type shared +drive c6a368d1-cae6-43d9-8af6-b42142aed4b9 +write:bytes 0 +read:requests 121991 +os linux + +type cdrom +size 1073741824 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 32 +drive_type livecd +status active +description Vector Linux is a small, fast, Intel based Linux operating system for PC style computers. The creators of Vector Linux had a single credo: keep it simple, keep it small and let the end user decide what their operating system is going to be. What has evolved from this concept is perhaps the best little Linux operating system available anywhere. For the casual computer user you have a lightening fast desktop with graphical programs to handle your daily activities from web surfing, sending and receiving email, chatting on ICQ or IRC to running an ftp server. The power user will be pleased because all the tools are there to compile their own programs, use the system as a server or perhaps the gateway for their home or office computer network. Administrators will be equally as pleased because the small size and memory requirements of the operating system can be deployed on older machines maybe long forgotten. +favourite true +free true +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Vector 6.0 Linux with KDE Live CD +url http://www.vectorlinux.com/ +read:bytes 0 +claim:type shared +drive 0aa0b75d-ce40-4877-9882-8a81443911fe +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 713031680 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Greenie Linux is an Ubuntu-based distribution customised for Slovak and Czech users. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Greenie 7 Linux 32bit Install CD +url http://www.greenie.sk/ +read:bytes 0 +claim:type shared +drive bdddc973-e84f-4cbc-a2c9-a9fce73bc462 +write:bytes 0 +read:requests 0 +os linux + +type cdrom +size 67108864 +use networking,gateway +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd,livecd +status active +description pfSense is a free, open source customized distribution of FreeBSD tailored for use as a firewall and router. In addition to being a powerful, flexible firewalling and routing platform, it includes a long list of related features and a package system allowing further expandability without adding bloat and potential security vulnerabilities to the base distribution. pfSense is a popular project with more than 1 million downloads since its inception, and proven in countless installations ranging from small home networks protecting a PC and an Xbox to large corporations, universities and other organizations protecting thousands of network devices.  +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name PfSense 1.2.3 Linux 32bit Live and Install CD +url http://www.pfsense.org +read:bytes 68657152 +claim:type shared +drive db46ea0d-26f3-4cd0-8a55-54da2af10363 +write:bytes 0 +read:requests 16762 +os linux + +type cdrom +size 46137344 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type installcd +status active +description Mandrake Linux was created in 1998 with the goal of making Linux easier to use for everyone. At that time, Linux was already well-known as a powerful and stable operating system that demanded strong technical knowledge and extensive use of the "command line"; MandrakeSoft saw this as an opportunity to integrate the best graphical desktop environments and contribute its own graphical configuration utilities and quickly became famous for setting the standard in ease-of-use and functionality. Mandriva Linux, formerly known as Mandrakelinux, is a friendly Linux Operating System which specializes in ease-of-use for both servers and the home/office. It is freely available in many languages throughout the world. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Mandriva Spring 2010 Linux 64bit Net Install CD +url http://mandriva.com/ +read:bytes 19488768 +claim:type shared +drive 857456e4-e16c-4a6f-9bfc-f5be3e58bde5 +write:bytes 0 +read:requests 4758 +os linux + +type cdrom +size 1606418432 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description UHU-Linux is the leading distribution of Linux in Hungary. It is primarily intended for Hungarian users, thus special care is taken to support the Hungarian language as much as possible. Ease of installation and sane default settings both help new users of Linux and make veterans feel comfortable. Usability as the main goal involves having all the cutting-edge yet stable releases of Open Source packages, with dpkg as the package manager. Development is completely open and everyone is invited to join. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 392193 +name Uhu Linux 2.2 32bit Install CD +url http://uhulinux.hu/ +read:bytes 354873344 +claim:type shared +drive 9d99705b-818a-49f8-8c77-0cd4a42cdea6 +write:bytes 1606422528 +read:requests 86639 +os linux + +type cdrom +size 734003200 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description Chakra, a fast, user-friendly and extremely powerful Live CD and/or distrolet based on the award winning KDE Software Compilation and on the GNU/Linux distribution for connoisseurs: Arch Linux. Currently in alpha stage, it features a graphical installer, automatic hardware configuration, and of course some more tools and extras. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 179201 +name Chakra 2.0 Linux Live CD +url http://www.chakra-project.org/ +read:bytes 4096 +claim:type shared +drive fdfa8104-05fb-4210-aba5-fe78c4e6ee8c +write:bytes 734007296 +read:requests 1 +os linux + +type cdrom +size 662700032 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 32 +drive_type installcd +status active +description UberStudent ("uber" meaning "productive" in Latin) is an Ubuntu-based distribution on a DVD designed for learning and teaching academic computing at the higher education and advanced secondary levels. UberStudent comes with software for everyday computing tasks, plus a core set of programs and features designed to teach and make easier the tasks and habits common to high-performing students of all academic disciplines. Lifelong learners, as well as any sort of knowledge worker, will equally benefit. UberStudent is supported by a free Moodle virtual learning environment. +favourite true +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0013fc75-b170-4d62-abaf-804b8fc466cc +host 001318df-35c6-439f-8e72-8d57c36ca86b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 161793 +name UberStudent 1.0 Linux with LXDE 32bit Install CD +url http://www.uberstudent.org/ +read:bytes 4096 +claim:type shared +drive 854a9706-fb14-4868-80df-53d712f1531a +write:bytes 662704128 +read:requests 1 +os linux + +type disk +size 3221225472 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready Fedora system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. The Fedora Project is an openly-developed project designed by Red Hat, open for general participation, led by a meritocracy, following a set of project objectives. The goal of The Fedora Project is to work with the Linux community to build a complete, general purpose operating system exclusively from open source software. Development will be done in a public forum. The project will produce time-based releases of Fedora about 2-3 times a year, with a public release schedule. The Red Hat engineering team will continue to participate in building Fedora and will invite and encourage more outside participation than in past releases. By using this more open process, we hope to provide an operating system more in line with the ideals of free software and more appealing to the open source community. +favourite true +install_notes ***You must update the default root/superuser password for Fedora 13 on first login.***\n\nPre-installed Fedora 13 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Complete the personalisation of your new server\n---------------------------------------------------------------------\nUpon first start-up you should be presented with the welcome screen after the initial boot process has completed. You will now have the opportunity to personalise your system installation. \na) Click the 'forward' button to get started.\n\nb) You will now see the license information for this system. Fedora 13 has an open source GNU license. Assuming this is acceptable you should click the 'forward' button again.\n\nc) You can now create your own user account, enter your name and set the password. Please note:\n\nTHIS IS NOT THE ADMINISTRATIVE ACCOUNT. YOU SHOULD RESET THE ROOT/SUPERUSER PASSWORD AS OUTLINED IN STEP 4 BELOW AFTER COMPLETING STEP 3.\n\nd) After clicking forward again you will have the opportunity to set the time servers that will set your servers time. You can just leave the default values unless you have some specific needs. Once you are happy please click the 'forward' button.\n\ne) Finally you have the option to submit your hardware profile to the Fedora Project to help with their development. This is entirely your personal choice. Either way once you are ready click the 'finish' button.\n\n4. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsu root\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n7. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 0003ca60-6b03-4da9-a409-84d8d7afa738 +host 00031836-a624-4b22-bc7d-41ff8977087b +user 00000000-0000-0000-0000-000000000001 +autoexpanding true +write:requests 786433 +name Fedora 13 Linux 64bit Preinstalled System +url http://fedoraproject.org/ +read:bytes 40962080768 +claim:type shared +drive d18119ce-7afa-474a-9242-e0384b160220 +write:bytes 3221229568 +read:requests 10000508 +os linux + +type disk +size 4294967296 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 0003ca60-6b03-4da9-a409-84d8d7afa738 +host 00031836-a624-4b22-bc7d-41ff8977087b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 1048577 +name Debian 5.0 Preinstalled without X +url www.debian.org +read:bytes 35180666880 +claim:type shared +drive fd49670e-17e8-4b0e-b03e-d6a65c138445 +write:bytes 4294971392 +read:requests 8589030 +os linux + +type disk +size 21474836480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready CentOS system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +favourite true +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name CentOS 5.5 Linux 64bit Preinstalled System +url http://www.centos.org/ +read:bytes 251925499904 +claim:type shared +drive 1ea7dead-9d52-4e79-9a9b-435db7cc972c +write:bytes 21474840576 +read:requests 61505249 +os linux + +type disk +size 2684354560 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready Ubuntu system that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. Ubuntu Linux is a complete desktop Linux operating system, freely available with both community and professional support. The Ubuntu community is built on the ideas enshrined in the Ubuntu Manifesto: that software should be available free of charge, that software tools should be usable by people in their local language and despite any disabilities, and that people should have the freedom to customise and alter their software in whatever way they see fit. "Ubuntu" is an ancient African word, meaning "humanity to others". The Ubuntu Linux distribution brings the spirit of Ubuntu to the software world. +favourite true +install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding true +write:requests 655361 +name Ubuntu Linux 10.04 Desktop 64bit Preinstalled System +url http://www.ubuntu.com/ +read:bytes 24617140224 +claim:type shared +drive 99a75966-209f-41d5-817c-7a3916354540 +write:bytes 2684358656 +read:requests 6010044 +os linux + +type disk +size 8589934592 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Ubuntu 10.04 on first login.***\n\nPre-installed Ubuntu 10.04 64bit Linux on 01/09/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: cloudsigma\nThe default root/superuser password is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\nsudo su\n\nWhen prompted use the password 'cloudsigma' (don't type the quote marks). The prompt should change to show you now using the root/superuser account.\n\nc) Now type the command:\n\npasswd\n\nd) When prompted enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\ne) CoudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\nf) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed and designed to expand to fill the initial size of the drive. After first login you should find that the system has used the full disk size as its partition.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 000848ef-af56-4fe4-b724-5338a9fa6aa6 +host 0008d252-5102-43a0-82c6-18e8e2dd2bff +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 2097153 +name Ubuntu 10.04 Server Edition Linux 64bit +url http://www.ubuntu.com/server +read:bytes 71391387648 +claim:type shared +drive 0b060e09-d98b-44cc-95a4-7e3a22ba1b53 +write:bytes 8589938688 +read:requests 17429538 +os linux + +type disk +size 21474836480 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +description This is a pre-installed ready CentOS system including AppFirst monitoring software that can be deployed to your account instantly. Please refer to the install notes for a full guide to initial configuration. CentOS is an Enterprise-class Linux Distribution derived from sources freely provided to the public by a prominent North American Enterprise Linux vendor. CentOS conforms fully with the upstream vendors redistribution policy and aims to be 100% binary compatible. +favourite true +install_notes ***You must update the default root/superuser password for CentOS 5.5 on first login.***\n\nPre-installed CentOS 5.5 64bit Linux with AppFirst Monitoring on 24/07/2010\n============================================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n----------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n----------------------------------------------------\n\nThe default account is: root\nThe default root/superuser password is set to: CloudSigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\ne) When prompted enter the password 'CloudSigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nf) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ng) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n6. AppFirst\n-------------------------\nThis disk image includes AppFirst monitoring software already installed. This software is able to provide in-depth server and application performance feedback. In order to take advantage of this software you need to have an AppFirst account.\n\nFull details of AppFirst's services including a 14-day free trial are available at http://www.appfirst.com . +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name CentOS 5.5 Linux 64bit Preinstalled System with AppFirst Monitoring +read:bytes 838707331072 +claim:type shared +drive c157e1eb-aa9c-4dd7-80b8-6fd4a238f2a9 +write:bytes 21474840576 +read:requests 204762532 +os linux + +type disk +size 8589934592 +use dbserver,webserver,email,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free true +bits 64 +drive_type preinstalled +status active +favourite true +install_notes ***You must update the default root/superuser password for Debian 5.0 on first login.***\n\nPre-installed Debian 5.0 64bit Linux on 02/08/2010\n========================================\n\n1. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements based on user feedback are:\n\nCPU: 1GHz\nRAM: 1GHz\n\nWe recommend specifying a larger size for a better user experience.\n\n\n2. Access your server for the first time using VNC\n------------------------------------------------------------------\nInitially you will need to access your server via VNC to complete the setup. \n\na) Start up your new server.\n\nb) Please use one of the recommended VNC clients, you will find a full list by following the 'Connect to your server via VNC now' link on general tab of the server detail window.\n\nb) Use the IP address and VNC password as provided on the general tab of the server detail window.\n\nc) After a few seconds you should see your server appear.\n\n\n3. Update your root/superuser password\n-------------------------------------------------------\n\nThe default accounta are: root and cloudsigma\nThe default passwords for both accounts is set to: cloudsigma .\n\nPlease CHANGE this IMMEDIATELY after first connecting with VNC.\n\na) Open a terminal or use the command line.\n\nb) Type the command:\n\npasswd\n\nc) When prompted enter the password 'cloudsigma' (no quotes should be typed) again and press return. You will then be prompted to enter a new password. We recommend a password of at least 8 characters including upper case and lower case letters as well as numbers.\n\nd) CloudSigma does not have access to your server account passwords. It is imperative that you securely store and manage your own passwords for your servers.\n\ne) Please note your root/superuser password is different from your VNC password. You can change your VNC password from the web console at any time.\n\n\n4. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive if you chose a disk size greater than 20GB. There are a number of tools which you can use to achieve this included GParted, the CD of which is also available in the drives library.\n\n\n4. Enabling Remote Access\n--------------------------------------\n\nVNC access is always enabled in our cloud allowing emergency recovery. We recommend using SSH to access your server for command line actions. By default this is turned off initially for security reasons. You will need to enable and install SSH if you wish to use it for access moving forward.\n\n\n5. Networking\n-------------------\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs. +volume 001118bb-dbdb-4ab0-b7db-d4cceb160098 +host 00115b1d-6fe9-40b2-a013-426a6a584ff7 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 2097153 +name Debian 5.0 Preinstalled +url www.debian.org +read:bytes 71179878400 +claim:type shared +drive 9b732c4e-32a3-4369-b5f7-9a0325195baa +write:bytes 8589938688 +read:requests 17377900 +os linux + +type cdrom +claimed 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:7055acf3-8d9a-4a99-a24f-dda1aaf37944:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:0a486768-08c1-419d-ad9c-1c8143df3496:ide:0:0 +size 2248146944 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 64 +email drives@elastichosts.com +drive_type installcd +status active +description - +favourite false +free false +volume 0007aee7-bd5b-4551-9d8f-a958051235a9 +host 00079b57-1b29-4a89-a8d0-1d648fc20804 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Web Server 2008 Trial Install CD +url http://www.microsoft.com +read:bytes 55097581056 +claim:type shared +drive 7aead6d3-c3e6-4940-85c7-f5ee61f6ef2b +write:bytes 0 +read:requests 22364695 +os windows + +type cdrom +claimed 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:6100e29c-708d-4a5b-821b-6a9faa3ba013:ide:0:1 00031836-a624-4b22-bc7d-41ff8977087b:guest:fcde7569-e034-452c-9909-7c485f5d168f:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:1ac4641e-aa67-47f2-a77d-e9c5982d68b2:ide:0:0 0012c12d-72b1-4dfc-ae0f-aeab09881545:guest:300989f8-da5c-42a6-91f8-97e87b85b748:ide:0:1 00016115-af87-452b-a3bf-3affc8a7d934:guest:f679b4ba-a4de-4254-90d1-27396aac8712:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:65e59c8b-579b-4977-b60c-b3b7eb404026:ide:0:0 00016115-af87-452b-a3bf-3affc8a7d934:guest:76eaf2fe-554a-4d3f-a3ef-a1214e878793:ide:0:0 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:030cfdda-9c6c-4622-a68c-2e3588fbe828:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:64a5375a-31cc-414f-9e14-006b5c39b51f:ide:0:0 00059836-5512-4ce2-bf66-4daab2d994e4:guest:83da4fb5-037f-4985-a0f6-f696fa7ff727:ide:0:0 0014dbec-e818-4cf4-b467-d4f0dad10246:guest:90f4a2d3-9b76-4444-a1b2-72bbd06fe3e2:ide:0:0 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:cbb4ecc9-654f-4410-aeb4-b9ca602faa01:ide:0:0 0008d252-5102-43a0-82c6-18e8e2dd2bff:guest:e7ea14b2-aaa0-48b4-b1ac-7c8351c2edf4:ide:0:0 001318df-35c6-439f-8e72-8d57c36ca86b:guest:67f96fa3-8d41-4f8b-8199-4111617d3150:ide:0:1 000663ee-9fb6-4461-90f6-01327a4aff07:guest:245dd0b0-18eb-4e24-b219-9549bafdea87:ide:0:0 000663ee-9fb6-4461-90f6-01327a4aff07:guest:b52e106f-f14c-4312-8597-bcfedf4b0e70:ide:0:0 +size 2663383040 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +email drives@elastichosts.com +drive_type installcd +status active +description - +favourite false +install_notes pass:123456 +volume 0007aee7-bd5b-4551-9d8f-a958051235a9 +host 00079b57-1b29-4a89-a8d0-1d648fc20804 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Server 2008 Trial Install CD +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 78315713024 +claim:type shared +drive f89af28e-ff00-4fc9-a7ed-22e7fa5a88db +write:bytes 0 +read:requests 32289210 +os windows + +status active +name Gentoo Install Minimal amd64 20100408 +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 437561856 +write:bytes 119558144 +claim:type shared +drive 73162606-78ca-4b0a-8f7a-70aa70563d90 +free none +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +os linux +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 79760 +claimed 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:d74c8d2b-a169-486c-adbd-89ca50dccafa:ide:0:1 +type cdrom +write:requests 29189 +size 209715200 + +status active +name Peppermint Ice Linux 32bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 1986560 +description Peppermint OS is an Ubuntu-based Linux distribution that aims to be lightning fast and easy on system resources. By employing Mozilla's Prism technology Peppermint integrates seamlessly with Cloud and web-based applications. The distribution's other features include automatic updates, easy step-by-step installation, sleek and user-friendly interface, and increased mobility by integrating directly with Cloud-based applications. +write:bytes 437698560 +claim:type shared +drive f9d92afc-27ff-4139-84c7-ac6655e6f6f1 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 485 +free none +type cdrom +write:requests 106860 +size 436207616 + +status active +name Super Gamer Linux 32bit and 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 137039872 +description SuperGamer is a live DVD based on VectorLinux, intended to showcase gaming on Linux. The distribution is optimised for a gaming computer environment, with some tweaks to help speed up running from the live DVD. Extra games are added along with some demo editions of proprietary games. All games are native Linux games, but users wishing to run Windows games may install WINE or a related emulator, such as Cedega. +write:bytes 8446324736 +claim:type shared +drive d72701b2-01b9-4ac3-9afa-d0afdb6bcf2f +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00018aab-c080-4ed3-b52f-459933d34ec9 +host 00016115-af87-452b-a3bf-3affc8a7d934 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 33457 +free none +type cdrom +write:requests 2062091 +size 8413773824 + +status active +name ZeroShell 1.3 Linux Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 856064 +description Zeroshell is a Linux distribution for servers and embedded devices aimed at providing the main network services a LAN requires. It is available in the form of Live CD or Compact Flash image and you can configure and administer it using your web browser. +write:bytes 153247744 +claim:type shared +drive 44358ce4-0f30-4e48-86d1-e93330961a8a +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC.\n +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 209 +free none +type cdrom +write:requests 37414 +size 155189248 + +status active +name Astaro Security Gateway Firewall Server 8.0 Linux Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 365871104 +description Astaro offers an integrated software solution that provides superior performance in an all-in-one firewall. Its hardened operating system, stateful packet inspection, content filtering (virus & surf protection), application proxies and IPsec based VPN provides a powerful solution to today's security issues. It is designed to maximize your networks security without compromising its performance enabling telecommuters, branch offices, customers and suppliers to safely share critical business information. Our proprietary user interface, WebAdmin allows ease of use and manageability of all open source firewall components, as well as the Up2Date service via the Internet. It is easy to install with all components on one CD achieving simple implementation and integration to existing network environments. +write:bytes 369696768 +claim:type shared +drive 916b0e39-b234-407b-89ab-e8108f05726f +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 89324 +claimed 000096ce-ff07-413d-912a-aa1a33963802:guest:20911753-98a6-4951-af34-89e157452c84:ide:0:0 00115b1d-6fe9-40b2-a013-426a6a584ff7:guest:75a96f35-c3fd-492a-a48b-34dcd10987d6:ide:0:0 +free none +type cdrom +write:requests 90258 +size 369098752 + +status active +name Chakra 0.2.2 Linux 64bit Install and Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 5451776 +description Chakra GNU/Linux is a user-friendly and powerful distribution and live CD based on Arch Linux. It features a graphical installer, automatic hardware detection and configuration, the latest KDE desktop, and a variety of tools and extras. +write:bytes 724774912 +claim:type shared +drive 0e8c8ac2-f791-4764-a964-c6d2679ae49a +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend using it as an installation CD and not just a live CD.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 0000acbf-fa0a-44a6-8018-2f106f96a45f +host 000096ce-ff07-413d-912a-aa1a33963802 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1331 +free none +type cdrom +write:requests 176947 +size 721420288 + +status active +name Clonezilla Live 1.2.6 64bit +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 876544 +description Clonezilla Live is a Debian-based live CD containing Clonezilla, a partition and disk cloning software similar to Norton Ghost. It saves and restores only used blocks in hard drive. With Clonezilla, one can clone a 5 GB system to 40 clients in about 10 minutes. +write:bytes 134045696 +claim:type shared +drive cec8330f-59c7-4e20-9577-54df28d598e7 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n3. Start the server.\n4. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n5. You will be able to start using your new server via VNC. +volume 0000acbf-fa0a-44a6-8018-2f106f96a45f +host 000096ce-ff07-413d-912a-aa1a33963802 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 214 +free none +type cdrom +write:requests 32726 +size 134217728 + +status active +name Absolute Linux 13.1.2 Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 93573632 +description Absolute Linux is a light-weight modification of Slackware Linux. It includes several utilities that make configuration and maintenance easier and it has many common desktop and Internet applications installed and configured with tight integration of menus, applications and MIME types. Absolute Linux uses IceWM and ROX for its window and file managers. +write:bytes 728211456 +claim:type shared +drive b745638c-87ff-4836-8623-e48e67286494 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 39232 +free none +type cdrom +write:requests 177786 +size 725614592 + +status active +name Sabayon_Linux_5.4_amd64_K.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 12877824 +write:bytes 2160496640 +claim:type shared +drive 75119285-7c20-43f4-9d3b-e6af3f1823e3 +free none +volume 000431a5-46d9-4a67-9c03-3c3402a41992 +host 00043e69-ac57-45b1-8692-75db24064fb9 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3144 +type cdrom +write:requests 527465 +size 2151677952 + +status active +name FreeBSD 8.1 Linux 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 60035072 +description About FreeBSD FreeBSD is a UN*X-like operating system for the i386, IA-64, PC-98, Alpha/AXP, and UltraSPARC platforms based on U.C. Berkeley's "4.4BSD-Lite" release, with some "4.4BSD-Lite2" enhancements. It is also based indirectly on William Jolitz's port of U.C. Berkeley's "Net/2" to the i386, known as "386BSD", though very little of the 386BSD code remains. FreeBSD is used by companies, Internet Service Providers, researchers, computer professionals, students and home users all over the world in their work, education and recreation. +write:bytes 2315309056 +claim:type shared +drive fb940d5b-b9a0-4f9c-8cb7-94c3378d1676 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 14657 +free none +type cdrom +write:requests 565261 +size 2306867200 + +status active +name BackTrack 4 Release 1 Linux Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 4008857600 +description A SLAX-based live CD with a comprehensive collection of security and forensics tools BackTrack 4 R1, a Linux-based penetration testing arsenal for security professionals. +write:bytes 2023919616 +claim:type shared +drive ef152c9c-1460-44f5-b192-8e0524909709 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 978725 +free none +type cdrom +write:requests 494121 +size 2017460224 + +status active +name Vector 6.0 Linux 32bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 3035136 +description VECTORLINUX is a small, fast, Intel Linux operating system based on one of the original Linux distributions, Slackware. The enormously popular Slackware is the true "Unix" of Linux distributions and is used by major corporations, universities and home users alike. It's popularity stems from the fact that it is a robust, versatile and almost unbreakable system. Slackware has been traditionally known to be about as user friendly as a coiled rattlesnake and that's where Vector Linux comes into play. We have produced a bloat free, easy to install, configure and maintain Slackware based system that is second to none. +write:bytes 729059328 +claim:type shared +drive c2a757b9-dfd0-432c-bb29-b380b4dd6fb6 +free none +volume 00059deb-640a-464e-9509-6a3ec6cfd549 +host 00059836-5512-4ce2-bf66-4daab2d994e4 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 741 +type cdrom +write:requests 177993 +size 729808896 + +status active +name PCBSD 8.1 Linux 64bit Install CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 15228928 +description PC-BSD has as its goals to be an easy-to-install-and-use desktop operating system, based on FreeBSD. To accomplish this, it currently has a graphical installation, which will enable even UNIX novices to easily install and get it running. It will also come with KDE pre-built, so that the desktop can be used immediately. Currently in development is a graphical software installation program, which will make installing pre-built software as easy as other popular operating systems. +write:bytes 3794726912 +claim:type shared +drive 802fbcab-2723-469c-b775-6fdeb21287da +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3718 +free none +type cdrom +write:requests 926447 +size 3783262208 + +status active +name nst-2.13.0.x86_64.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 7503872 +write:bytes 1436717056 +claim:type shared +drive 9d04c648-712d-4076-bd99-70088d85fe01 +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 1832 +type cdrom +write:requests 350761 +size 1430257664 + +status active +name Peppermint-Ice-10012010.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 2613248 +write:bytes 452710400 +claim:type shared +drive 2e79eeee-b4ad-4dcf-a072-86dcede6af1b +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 638 +type cdrom +write:requests 110525 +size 452984832 + +status active +name Sabayon_Linux_5.4_amd64_K.iso +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 14082048 +write:bytes 2161713152 +claim:type shared +drive 07e2a6df-8389-4130-a003-edacc19a9ee3 +free none +volume 00065289-b9c8-4548-8d83-e1891f831f51 +host 000663ee-9fb6-4461-90f6-01327a4aff07 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 3438 +type cdrom +write:requests 527762 +size 2151677952 + +type cdrom +claimed 00031836-a624-4b22-bc7d-41ff8977087b:guest:ffe02269-b653-47ad-ab21-a02805b24904:ide:0:0 000096ce-ff07-413d-912a-aa1a33963802:guest:1f378a18-1b59-40e7-8e9a-7f81d7eda6b8:ide:0:0 00079b57-1b29-4a89-a8d0-1d648fc20804:guest:8c13b69d-6d11-4151-975b-a2f084c7ada7:ide:0:0 00166b98-6431-40ad-94b0-244881ff87d5:guest:1705b116-aac2-449a-b0de-3dd4ab7e765f:ide:0:0 000932a7-c74f-4de3-bfc4-227435f78998:guest:30d887ee-ed96-4c32-a1a8-5ab49abd2a7e:ide:0:1 00154bbf-adf4-475d-9fe6-71c1e987d1bd:guest:bcea8695-baeb-476e-8089-475ce8948646:ide:0:1 0002c6df-a1d2-4d1d-96f0-f95405a28183:guest:df1368af-05a3-4ad5-8017-54be3ea70232:ide:0:0 00109617-2c6b-424b-9cfa-5b572c17bafe:guest:3569d646-7ae5-410f-b66e-64bba1381cba:ide:0:0 +size 2663383040 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +bits 64 +drive_type installcd +status active +description - +favourite false +free false +volume 0009c669-9ea6-4825-b788-b40902bb1902 +host 000932a7-c74f-4de3-bfc4-227435f78998 +encryption:cipher aes-xts-plain +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 0 +name Windows Server 2008 Trial Install CD +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 5261708288 +claim:type shared +drive 7e23b099-dd35-446b-8d90-2953643b664f +write:bytes 0 +read:requests 1883649 +os windows + +status active +name Unity Linux 64bit Install and Live CD +readers ffffffff-ffff-ffff-ffff-ffffffffffff +favourite none +read:bytes 147034112 +description The community-oriented Unity Linux is a minimalist distribution and live CD based on Mandriva Linux. The project's main goal is to create a base operating system from which more complete, user-oriented distribution can easily be built - either by other distribution projects or by the users themselves. Unity Linux uses Openbox as the default window manager. Its package management is handled via Smart and RPM 5 which can download and install additional software packages from the project's online repository. +write:bytes 290488320 +claim:type shared +drive d235dada-407c-4105-b4ef-636eb7604404 +install_notes CD Installation instructions:\n1. Add this CD to a server that you wish to install this software onto.\n2. Make sure there is a hard drive also attached to the same server on which this software can be installed if you intend to use this as an installation and not live CD only.\n3. Make sure that this CD is selected as the boot drive. You can do this from the 'Drives' tab on the main server detail window.\n4. Start the server.\n5. Connect to the server using VNC. You will find the IP address and password on the 'Summary' tab of the main server detail window and in the server list view.\n6. You will be able to complete the software installation via VNC and start using your new server. +volume 00106cda-0e17-40c8-a576-b516f0eb67bc +host 00109617-2c6b-424b-9cfa-5b572c17bafe +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +read:requests 35897 +free none +type cdrom +write:requests 70920 +size 289406976 + +type disk +licenses msft_p73_04837 msft_tfa_00009 +size 21474836480 +use dbserver,general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Server Standard 2008 and the Super Administrator password (sa) for SQL Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 15/07/2010\n=========================================================================\n\n1. Minimum Hardware Requirements\n--------------------------------\n\nThe recommended minimum hardware requirements for the use of SQL Server Standard 2008 R2 with Windows Server Standard 2008 R2 as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/library/ms143506.aspx\n\n\n2. Update your administrator password\n-------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n3. Expanding your drive\n-----------------------\n\nThe system is fully installed, but you will need to extend the\ndisk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n4. Enabling Remote Access\n-------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection according to your Security Configuration\n\n\n5. Pinging Service\n------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules"\n\n\nSQL Server 2008 R2 on 15/07/2010\n================================\n\n1. Change the Super Administrator Password (sa). \n--------------------------------------------------------------------\n\nThe default password has been set to "CloudSigma1"\n\na) Open "Microsoft SQL Server Management Studio"\n\nb) Connect to the Server Using "Windows Indentificaiton"\n\nc) From the Object Explorer select "Security" then "Longins"\n\nd) Right-click on sa and select "Properties"\n\ne) Enter the new password into "Password" and "Confirm Password" and press "OK"\n\n\n2. The following features were installed:\n-----------------------------------------------------\n\na) Main features\n\n-Database Engine Services\n-SQL Server Replication\n-Full-Text Search\n-Analysis Services\n-Reporting Services\n\nb) Shared Features\n\n-Business Intelligengce Development Studio\n-Client Tools Connectivity\n-Integration Services\n-Clinet Tools Backwards Compatibility\n-Clinet Tools SDK\n-SQL Server Books Online\n-Mangement Tools - Basic\n-Management Tools - Complete\n-SQL Client Connectivity SDK\n-Microsoft Sync Framework\n\n3 The following services were configured:\n--------------------------------------------------------\n\n\nService: SQL Server Agent\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Manual\n\nService: SQL Server Database Engine\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Automatic\n\nService: SQL Server Analysis Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Reporting Services\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n\nService: SQL Server Integration Services 10.1\nUser: NT AUTHORITY\NETWORK SERVICE\nStartup Type: Disabled\n \nService: SQL Full-text filter Daemon Lanuch\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nService: SQL Server Browser\nUser: NT AUTHORITY\LOCALSERVICE\nStartup Type: Disabled\n\nFor detailed server installation configuration refer to the following installation log files on the system:\nC:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log\20100716_162426\Summary_WIN-K0F21FV1C1V_20100716_162426.txt\n +volume 00023324-4c49-4567-a017-c85c8a6b8313 +host 0002c6df-a1d2-4d1d-96f0-f95405a28183 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 5242881 +name SQL Server Standard 2008 R2 - Windows Server Standard 2008 R2 - 64bit English pub +url http://www.microsoft.com/sqlserver/2008/en/us/ +read:bytes 49172439040 +claim:type shared +drive 7b013f8c-dd4c-4701-b1ca-936506dc37ca +write:bytes 21474840576 +read:requests 12004990 +os windows + +type disk +licenses msft_lwa_00135 +size 13958643712 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Web Server 2008***\n\nPre-installed Windows Server 2008 Web R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" +volume 00120946-d7a4-486e-867e-8348bebe0b95 +host 0012c12d-72b1-4dfc-ae0f-aeab09881545 +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 3407873 +name Windows Server Web 2008 R2 64bit English +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 145252270080 +claim:type shared +drive 71697799-c611-41b9-93be-f79152aefbe5 +write:bytes 13958647808 +read:requests 35461980 +os windows + +type disk +licenses msft_p73_04837 +size 13958643712 +use general +readers ffffffff-ffff-ffff-ffff-ffffffffffff +free false +bits 64 +drive_type preinstalled +status active +description Please refer to the install notes for a full guide to initial configuration. +favourite false +install_notes ***You must update the default Administrator password for Windows Server Standard 2008***\n\nPre-installed Windows Server 2008 Standard R2 64bit English on 24/07/2010\n============================================================\n\n1. Connecting to your server via VNC\n--------------------------------------------------\n\na) Having installed a compatible VNC client, open a VNC connection to your server.\n\nb) Enter your IP address and VNC password as displayed on your Server Summary Window.\n\nc) Start to configure your server.\n\n\n2. Minimum Hardware Requirements\n--------------------------------------------------\n\nThe recommended minimum hardware requirements as publishes by Microsoft can be found through the following link:\n\nhttp://msdn.microsoft.com/en-us/windowsserver/cc196364.aspx\n\nWe recommend specifying a higher level of RAM for a better user experience.\n\n\n3. Update your administrator password\n----------------------------------------------------\n\nThe default administrator password is set to: CloudSigma1\n\nPlease CHANGE this IMMEDIATELY after first logging on.\n\na) Open the "Control Panel" from the "Start" menu and select "User Accounts"\n\nb) Select "Change your Windows Password"\n\nc) The Administrator user icon will be shown on the right, select again "Change your Password", and on this screen update your details accordingly\n\n\n4. Configuring your Networking\n------------------------------------------\n\na) IP networking can be left as DHCP. Even if you are using a static IP address with this server our management layer will apply the same IP address via DHCP. Using DHCP ensures you will have no issues receiving the correct networking settings. We recommend using DHCP initially if only to receive all the correct networking settings.\n\nb) CloudSigma employs an open networking infrastructure. All ports are open for inbound and outbound traffic with the exception of port 25 (SMTP) which is blocked for trial users.\n\nIt is important that you secure access to your server according to your needs.\n\n\n5. Expanding your drive\n--------------------------------\n\nThe system is fully installed, but you will need to extend the disk partition to cover the whole of your drive. To do this:\n\na) Open the "Computer Management" tool from "Administrative Tools" on the "Start" menu.\n\nb) Select "Storage" then "Disk Management" in the left hand pane\n\nc) Right-click on the 12.90 GB NTFS primary partition, and select "Extend Volume"\n\nd) Enter the amount of disk space that you want to increase the Volume size by (the default will be the maximum available)\n\nYou will need to repeat this procedure if you ever resize this drive in the future.\n\n\n6. Enabling Remote Access\n--------------------------------------\n\nAfter logging in to VNC for the first time to configure your new Windows server, we recommend that if you are logging in from a Windows Desktop that you enable Remote Desktop for better access performance. To do this, follow these instructions:\n\na) Select "Start" | "Applications" | "Control Panel"\n\nb) Select "System and Security"\n\nc) Under "System" select "Allow Remote Access"\n\nd) Select the corresponding connection type according to your Security Configuration\n\n\n7. Pinging Service\n-------------------------\n\nThe Pinging service has been turned on by default, if you wish to disable it please follow these instructions:\n\na) Select the "Windows Firewall with Advanced Security" tool from "Administrative Tools" on the "Start" menu.\n\nb) On the left hand pane, select "Inbound Rules"\n\nc) On the middle pane, select the rules "File and Printer Sharing (Echo Request - ICMPv4-In)" and "File and Printer Sharing (Echo Request - ICMPv6-In)"\n\nd) From the right-hand pane, select "Disable Rules" +volume 0013fc75-b170-4d62-abaf-804b8fc466cc +host 001318df-35c6-439f-8e72-8d57c36ca86b +user 00000000-0000-0000-0000-000000000001 +autoexpanding false +write:requests 3407873 +name Windows Server Standard 2008 R2 64bit English +url http://www.microsoft.com/windowsserver2008/en/us/default.aspx +read:bytes 257073537024 +claim:type shared +drive 0611be3f-0607-4b3c-8bad-a0af392d928a +write:bytes 13958647808 +read:requests 62762094 +os windows diff --git a/trunk/test/compute/fixtures/cloudsigma/resources_ip_create.txt b/trunk/test/compute/fixtures/cloudsigma/resources_ip_create.txt new file mode 100644 index 0000000000..89db213e3d --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/resources_ip_create.txt @@ -0,0 +1,13 @@ +resource 1.2.3.4 +netmask 255.255.255.0 +nameserver 91.203.56.1 +user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b +type ip +gateway 91.203.56.1 + +resource 1.2.3.5 +netmask 255.255.255.0 +nameserver 91.203.56.1 +user f2e19d5c-eaa1-44e5-94aa-dc194594bd7b +type ip +gateway 91.203.56.1 diff --git a/trunk/test/compute/fixtures/cloudsigma/resources_ip_list.txt b/trunk/test/compute/fixtures/cloudsigma/resources_ip_list.txt new file mode 100644 index 0000000000..73435077e0 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/resources_ip_list.txt @@ -0,0 +1,3 @@ +ip 1.2.3.4 +ip 1.2.3.5 +ip 1.2.3.6 diff --git a/trunk/test/compute/fixtures/cloudsigma/servers_create.txt b/trunk/test/compute/fixtures/cloudsigma/servers_create.txt new file mode 100644 index 0000000000..95b8e14369 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/servers_create.txt @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 1 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff --git a/trunk/test/compute/fixtures/cloudsigma/servers_info.txt b/trunk/test/compute/fixtures/cloudsigma/servers_info.txt new file mode 100644 index 0000000000..95b8e14369 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/servers_info.txt @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 1 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff --git a/trunk/test/compute/fixtures/cloudsigma/servers_set.txt b/trunk/test/compute/fixtures/cloudsigma/servers_set.txt new file mode 100644 index 0000000000..095a88c3e9 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudsigma/servers_set.txt @@ -0,0 +1,26 @@ +ide:0:0:write:requests 466 +rx 760681 +vnc:password testpass +ide:0:0 f0202f1c-0b4f-4cfc-8ae3-e30951d09ef0 +ide:0:0:read:requests 7467 +ide:0:0:read:bytes 165395968 +vnc:ip 178.22.66.28 +tx:packets 32 +tx 2568 +boot ide:0:0 +smp 2 +started 1286568422 +nic:0:model virtio +status active +mem 640 +rx:packets 12662 +user 93b34fd9-7986-4b25-8bfd-98a50383605d +ide:0:0:media disk +name cloudsigma node +persistent true +nic:0:block tcp/22 tcp/23 tcp/25 tcp/43594 tcp/43595 +server 62fe7cde-4fb9-4c63-bd8c-e757930066a0 +nic:0:dhcp 1.2.3.4 +nic:1:dhcp 1.2.3.5 +ide:0:0:write:bytes 7358464 +cpu 1100 diff --git a/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json new file mode 100644 index 0000000000..5fbec216b1 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_default.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17164,"id":2602} } diff --git a/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json new file mode 100644 index 0000000000..2162fe1a0a --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"errorcode" : 431, "errortext" : "Unable to find service offering: 104"} } diff --git a/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json new file mode 100644 index 0000000000..05790d2f42 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/deployVirtualMachine_deployfail2.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":17177,"id":2602} } diff --git a/trunk/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json b/trunk/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json new file mode 100644 index 0000000000..dfd664f9ea --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/destroyVirtualMachine_default.json @@ -0,0 +1 @@ +{ "destroyvirtualmachineresponse" : {"jobid":17166} } diff --git a/trunk/test/compute/fixtures/cloudstack/listNetworks_default.json b/trunk/test/compute/fixtures/cloudstack/listNetworks_default.json new file mode 100644 index 0000000000..270134783f --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listNetworks_default.json @@ -0,0 +1 @@ +{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail.json b/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail.json new file mode 100644 index 0000000000..270134783f --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail.json @@ -0,0 +1 @@ +{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json b/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json new file mode 100644 index 0000000000..270134783f --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listNetworks_deployfail2.json @@ -0,0 +1 @@ +{ "listnetworksresponse" : { "network" : [ {"id":860,"name":"Virtual Network","displaytext":"A dedicated virtualized network for your account. The broadcast domain is contained within a VLAN and all public network access is routed out by a virtual router.","broadcastdomaintype":"Vlan","traffictype":"Guest","zoneid":1,"networkofferingid":6,"networkofferingname":"DefaultVirtualizedNetworkOffering","networkofferingdisplaytext":"Virtual Vlan","networkofferingavailability":"Required","isshared":false,"issystem":false,"state":"Implemented","related":860,"broadcasturi":"vlan://1459","dns1":"1.1.1.1","dns2":"1.1.1.2","type":"Virtual","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","isdefault":true,"service":[{"name":"Gateway"},{"name":"Firewall","capability":[{"name":"MultipleIps","value":"true"},{"name":"TrafficStatistics","value":"per public ip"},{"name":"StaticNat","value":"true"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedSourceNatTypes","value":"per account"}]},{"name":"UserData"},{"name":"Dns"},{"name":"Dhcp"},{"name":"Lb","capability":[{"name":"TrafficStatistics","value":"per public ip"},{"name":"SupportedProtocols","value":"tcp,udp"},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn"}]}],"networkdomain":"cs363local","securitygroupenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json b/trunk/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json new file mode 100644 index 0000000000..b9b8527df5 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listPublicIpAddresses_default.json @@ -0,0 +1 @@ +{ "listpublicipaddressesresponse" : { "publicipaddress" : [ {"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-23T05:20:39+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33999,"ipaddress":"1.1.1.48","allocated":"2011-06-23T05:20:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33998,"ipaddress":"1.1.1.47","allocated":"2011-06-23T05:20:30+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33970,"ipaddress":"1.1.1.19","allocated":"2011-06-20T04:08:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":true,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listServiceOfferings_default.json b/trunk/test/compute/fixtures/cloudstack/listServiceOfferings_default.json new file mode 100644 index 0000000000..a56d8906ba --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listServiceOfferings_default.json @@ -0,0 +1 @@ +{ "listserviceofferingsresponse" : { "serviceoffering" : [ {"id":105,"name":"Compute Micro PRD","displaytext":"1CPU, 384MB, 80GB HDD","cpunumber":1,"cpuspeed":1200,"memory":384,"created":"2011-06-01T03:38:05+0000","storagetype":"shared","offerha":false,"domainid":14,"domain":"AA000062"}, {"id":70,"name":"Compute XLarge PRD","displaytext":"8CPU, 13.6GB RAM, 160GB Storage","cpunumber":8,"cpuspeed":1200,"memory":13928,"created":"2011-02-08T07:06:19+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":69,"name":"Compute Large PRD","displaytext":"4CPU, 6.8GB RAM, 160GB Storage","cpunumber":4,"cpuspeed":1200,"memory":6964,"created":"2011-02-08T07:05:47+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":68,"name":"Compute Medium PRD","displaytext":"2CPU, 3.4GB RAM, 160GB Storage","cpunumber":2,"cpuspeed":1200,"memory":3484,"created":"2011-02-08T07:05:03+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"}, {"id":67,"name":"Compute Small PRD","displaytext":"1CPU, 1.7GB RAM, 160GB Storage","cpunumber":1,"cpuspeed":1200,"memory":1744,"created":"2011-02-08T07:03:44+0000","storagetype":"shared","offerha":true,"domainid":14,"domain":"AA000062"} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listTemplates_default.json b/trunk/test/compute/fixtures/cloudstack/listTemplates_default.json new file mode 100644 index 0000000000..b7f99007ec --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listTemplates_default.json @@ -0,0 +1 @@ +{ "listtemplatesresponse" : { "template" : [ {"id":576,"name":"ESX[beta] Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","displaytext":"ESX[beta] Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","ispublic":true,"created":"2011-06-01T01:25:12+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":126,"ostypename":"Ubuntu 10.04 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":702743552,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":443,"name":"XEN Basic Windows Svr 2008 R2 x64 R2.1","displaytext":"XEN Basic Windows Svr 2008 R2 x64 R2.1","ispublic":true,"created":"2011-03-25T01:29:46+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":54,"ostypename":"Windows Server 2008 R2 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":474,"name":"XEN Basic Windows Svr 2003 SP2 STD","displaytext":"XEN Basic Windows Svr 2003 SP2 STD","ispublic":true,"created":"2011-04-07T10:38:45+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":89,"ostypename":"Windows Server 2003 Standard Edition(32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":444,"name":"ESX[beta] Windows 2003 x32 R2.0","displaytext":"ESX[beta] Windows 2003 x32 R2.0","ispublic":true,"created":"2011-03-25T01:34:00+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":89,"ostypename":"Windows Server 2003 Standard Edition(32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":876909056,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":447,"name":"ESX[beta] Windows 2008 x32 R2.0","displaytext":"ESX[beta] Windows 2008 x32 R2.0","ispublic":true,"created":"2011-03-25T01:45:23+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":52,"ostypename":"Windows Server 2008 (32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":3391547904,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":462,"name":"ESX[beta] Centos 5.5 x64 R2.0","displaytext":"ESX[beta] Centos 5.5 x64 R2.0","ispublic":true,"created":"2011-03-28T05:06:36+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":2263178240,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":425,"name":"XEN Windows 2008 x32 R2.0","displaytext":"XEN Windows 2008 x32 R2.0","ispublic":true,"created":"2011-03-22T03:22:21+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":52,"ostypename":"Windows Server 2008 (32-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171798691840,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":461,"name":"ESX[beta] Basic Windows 2008 R2 x64","displaytext":"ESX[beta] Basic Windows 2008 R2 x64","ispublic":true,"created":"2011-03-26T22:48:48+0000","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":true,"crossZones":false,"ostypeid":54,"ostypename":"Windows Server 2008 R2 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":3230146048,"templatetype":"USER","hypervisor":"VMware","domain":"ROOT","domainid":1,"isextractable":false}, {"id":575,"name":"Xen Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","displaytext":"Xen Ubuntu 10.04.2 CHEF Small \\ Micro Optimised","ispublic":true,"created":"2011-06-01T01:06:21+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":85899345920,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":481,"name":"XEN Centos 5.4 x64 R2.0","displaytext":"XEN Centos 5.4 x64 R2.0","ispublic":true,"created":"2011-04-14T01:43:49+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171966464000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":421,"name":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","displaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","ispublic":true,"created":"2011-03-22T02:54:06+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":167772160000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":423,"name":"XEN Basic Centos 5.5 x64 PV r2.2","displaytext":"XEN Basic Centos 5.5 x64 PV r2.2","ispublic":true,"created":"2011-03-22T02:59:31+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":167772160000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false}, {"id":422,"name":"XEN OpenSUSE x64 11.4 R2.0","displaytext":"XEN OpenSUSE x64 11.4 R2.0","ispublic":true,"created":"2011-03-22T02:58:25+0000","isready":true,"passwordenabled":false,"format":"VHD","isfeatured":true,"crossZones":false,"ostypeid":12,"ostypename":"CentOS 5.3 (64-bit)","account":"admin","zoneid":1,"zonename":"Sydney","size":171966464000,"templatetype":"USER","hypervisor":"XenServer","domain":"ROOT","domainid":1,"isextractable":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listVirtualMachines_default.json b/trunk/test/compute/fixtures/cloudstack/listVirtualMachines_default.json new file mode 100644 index 0000000000..d55c436b2e --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listVirtualMachines_default.json @@ -0,0 +1 @@ +{ "listvirtualmachinesresponse" : { "virtualmachine" : [ {"id":2600,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:06:42+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"1.78%","networkkbsread":2,"networkkbswrite":2,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3891,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.116","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2601,"name":"test","displayname":"test","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:09:44+0000","state":"Starting","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"jobid":17147,"jobstatus":0,"nic":[{"id":3892,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.2.1","ipaddress":"1.1.1.203","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listZones_default.json b/trunk/test/compute/fixtures/cloudstack/listZones_default.json new file mode 100644 index 0000000000..0316936e45 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listZones_default.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listZones_deployfail.json b/trunk/test/compute/fixtures/cloudstack/listZones_deployfail.json new file mode 100644 index 0000000000..0316936e45 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listZones_deployfail.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/listZones_deployfail2.json b/trunk/test/compute/fixtures/cloudstack/listZones_deployfail2.json new file mode 100644 index 0000000000..0316936e45 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/listZones_deployfail2.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff --git a/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json new file mode 100644 index 0000000000..31fd39e97c --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17164.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17164,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff --git a/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json new file mode 100644 index 0000000000..694d7d032e --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17165.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17165,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff --git a/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json new file mode 100644 index 0000000000..5245af2d3d --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17166.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17166,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":2602,"name":"fred","displayname":"fred","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-23T05:48:31+0000","state":"Destroyed","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.13%","networkkbsread":2,"networkkbswrite":1,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3893,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.1.2","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}}} } diff --git a/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json new file mode 100644 index 0000000000..7b743c54c8 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/queryAsyncJobResult_17177.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17177,"jobstatus":2} } diff --git a/trunk/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json b/trunk/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json new file mode 100644 index 0000000000..07cdba36a1 --- /dev/null +++ b/trunk/test/compute/fixtures/cloudstack/rebootVirtualMachine_default.json @@ -0,0 +1 @@ +{ "rebootvirtualmachineresponse" : {"jobid":17165} } diff --git a/trunk/test/compute/fixtures/ec2/create_tags.xml b/trunk/test/compute/fixtures/ec2/create_tags.xml new file mode 100644 index 0000000000..0419dcb716 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/create_tags.xml @@ -0,0 +1,4 @@ + + b001068a-ca0d-4f05-b622-28fe984f44be + true + diff --git a/trunk/test/compute/fixtures/ec2/delete_tags.xml b/trunk/test/compute/fixtures/ec2/delete_tags.xml new file mode 100644 index 0000000000..f3ea960c91 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/delete_tags.xml @@ -0,0 +1,4 @@ + + 7a297da7-3ecb-4156-8bcb-3be73896cc14 + true + diff --git a/trunk/test/compute/fixtures/ec2/describe_addresses.xml b/trunk/test/compute/fixtures/ec2/describe_addresses.xml new file mode 100644 index 0000000000..cdbf64b24e --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_addresses.xml @@ -0,0 +1,9 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + i-4382922a + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_addresses_multi.xml b/trunk/test/compute/fixtures/ec2/describe_addresses_multi.xml new file mode 100644 index 0000000000..9f183233df --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_addresses_multi.xml @@ -0,0 +1,17 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + i-4382922a + + + 1.2.3.6 + i-4382922b + + + 1.2.3.5 + i-4382922b + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_addresses_single.xml b/trunk/test/compute/fixtures/ec2/describe_addresses_single.xml new file mode 100644 index 0000000000..68f9cf8cb9 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_addresses_single.xml @@ -0,0 +1,9 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 1.2.3.4 + i-4382922a + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_availability_zones.xml b/trunk/test/compute/fixtures/ec2/describe_availability_zones.xml new file mode 100644 index 0000000000..dc77d152a2 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_availability_zones.xml @@ -0,0 +1,17 @@ + + cc0dfb29-efef-451c-974f-341b3edfb28f + + + eu-west-1a + available + eu-west-1 + + + + eu-west-1b + available + eu-west-1 + + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_images.xml b/trunk/test/compute/fixtures/ec2/describe_images.xml new file mode 100644 index 0000000000..34db6abab4 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_images.xml @@ -0,0 +1,16 @@ + + + + ami-be3adfd7 + ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml + available + 206029621532 + false + i386 + machine + aki-4438dd2d + ari-4538dd2c + + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_instances.xml b/trunk/test/compute/fixtures/ec2/describe_instances.xml new file mode 100644 index 0000000000..0fe77bed25 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_instances.xml @@ -0,0 +1,72 @@ + + 56d0fffa-8819-4658-bdd7-548f143a86d2 + + + r-07adf66e + 822272953071 + + + default + + + + + i-4382922a + ami-0d57b264 + + 0 + pending + + + + + 1.2.3.5 + 1.2.3.5 + 0 + + m1.small + 2009-08-07T05:47:04.000Z + + us-east-1a + + + disabled + + + + i-8474834a + ami-0f234b234 + + 0 + pending + + + + + 1.2.3.5 + 1.2.3.5 + 0 + + m1.micro + 2009-08-07T05:47:04.000Z + + us-west-1a + + + disabled + + + + user_key0 + user_val0 + + + user_key1 + user_val1 + + + + + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_instances_with_tags.xml b/trunk/test/compute/fixtures/ec2/describe_instances_with_tags.xml new file mode 100644 index 0000000000..61ab584c34 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_instances_with_tags.xml @@ -0,0 +1,53 @@ + + 56d0fffa-8819-4658-bdd7-548f143a86d2 + + + r-07adf66e + 822272953071 + + + default + + + + + i-8474834a + ami-0f234b234 + + 0 + pending + + + + + 1.2.3.5 + 1.2.3.5 + 0 + + m1.micro + 2009-08-07T05:47:04.000Z + + us-west-1a + + + disabled + + + + Name + foobar1 + + + user_key1 + user_val1 + + + user_key2 + user_val2 + + + + + + + diff --git a/trunk/test/compute/fixtures/ec2/describe_tags.xml b/trunk/test/compute/fixtures/ec2/describe_tags.xml new file mode 100644 index 0000000000..37216d9456 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/describe_tags.xml @@ -0,0 +1,23 @@ + + fa7e0e44-df5e-49a0-98d7-5d4d19a29f95 + + + i-4382922a + instance + tag + test one + + + i-4382922a + instance + owner + libcloud + + + i-4382922a + instance + stack + Production + + + diff --git a/trunk/test/compute/fixtures/ec2/modify_instance_attribute.xml b/trunk/test/compute/fixtures/ec2/modify_instance_attribute.xml new file mode 100644 index 0000000000..333c76515f --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/modify_instance_attribute.xml @@ -0,0 +1,4 @@ + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + diff --git a/trunk/test/compute/fixtures/ec2/reboot_instances.xml b/trunk/test/compute/fixtures/ec2/reboot_instances.xml new file mode 100644 index 0000000000..1019d1b6f9 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/reboot_instances.xml @@ -0,0 +1,4 @@ + + 76dabb7a-fb39-4ed1-b5e0-31a4a0fdf5c0 + true + diff --git a/trunk/test/compute/fixtures/ec2/run_instances.xml b/trunk/test/compute/fixtures/ec2/run_instances.xml new file mode 100644 index 0000000000..df14e59c3c --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/run_instances.xml @@ -0,0 +1,31 @@ + + r-47a5402e + AIDADH4IGTRXXKCD + + + default + + + + + i-2ba64342 + ami-be3adfd7 + + 0 + pending + + + + example-key-name + 0 + m1.small + 2007-08-07T11:51:50.000Z + + us-east-1b + + + true + + + + diff --git a/trunk/test/compute/fixtures/ec2/run_instances_idem.xml b/trunk/test/compute/fixtures/ec2/run_instances_idem.xml new file mode 100644 index 0000000000..255b24021d --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/run_instances_idem.xml @@ -0,0 +1,32 @@ + + r-47a5402e + AIDADH4IGTRXXKCD + + + default + + + + + i-2ba64342 + ami-be3adfd7 + + 0 + pending + + + + example-key-name + 0 + m1.small + 2007-08-07T11:51:50.000Z + + us-east-1b + + + true + + testclienttoken + + + diff --git a/trunk/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml b/trunk/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml new file mode 100644 index 0000000000..9b704e7b90 --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/run_instances_idem_mismatch.xml @@ -0,0 +1,12 @@ + + + + + IdempotentParameterMismatch + + Arguments on this idempotent request are inconsistent with arguments used in previous request(s). + + + + 5dabd361-d2e0-4f79-937d-4b2852a3b719 + diff --git a/trunk/test/compute/fixtures/ec2/terminate_instances.xml b/trunk/test/compute/fixtures/ec2/terminate_instances.xml new file mode 100644 index 0000000000..346dbb09dd --- /dev/null +++ b/trunk/test/compute/fixtures/ec2/terminate_instances.xml @@ -0,0 +1,16 @@ + + fa63083d-e0f7-4933-b31a-f266643bdee8 + + + i-4382922a + + 32 + shutting-down + + + 16 + running + + + + diff --git a/trunk/test/compute/fixtures/ecp/htemplate_list.json b/trunk/test/compute/fixtures/ecp/htemplate_list.json new file mode 100644 index 0000000000..cffa9e3b78 --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/htemplate_list.json @@ -0,0 +1,9 @@ +{"templates": [ + +{"uuid": "1", "hypervisor_name": "kvm-hvm", "cpus": 1, "memory": 512, "arch": "i686", "id": 1, "name": "Small"}, + +{"uuid": "2", "hypervisor_name": "kvm-hvm", "cpus": 2, "memory": 1024, "arch": "i686", "id": 2, "name": "Medium"}, + +{"uuid": "3", "hypervisor_name": "kvm-hvm", "cpus": 3, "memory": 2048, "arch": "x86_64", "id": 3, "name": "Large"} + +], "errno": 0, "message": "Success"} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/network_list.json b/trunk/test/compute/fixtures/ecp/network_list.json new file mode 100644 index 0000000000..c588ab5b63 --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/network_list.json @@ -0,0 +1 @@ +{"errno": 0, "message": "Success", "networks": [{"uuid": "1", "vlan_id": null, "name": "Default"}]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/ptemplate_list.json b/trunk/test/compute/fixtures/ecp/ptemplate_list.json new file mode 100644 index 0000000000..1f45c47c4f --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/ptemplate_list.json @@ -0,0 +1,6 @@ +{"errno": 0, "message": "Success", "packages": [ + +{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "1", "name": "centos54"}, + +{"os": "unknown", "description": "AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2", "storage": 20480, "uuid": "2", "name": "centos54 two"} +]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_1_action_delete.json b/trunk/test/compute/fixtures/ecp/vm_1_action_delete.json new file mode 100644 index 0000000000..abe1a6d51c --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_1_action_delete.json @@ -0,0 +1 @@ +{"errno": 0, "message": "Success"} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_1_action_start.json b/trunk/test/compute/fixtures/ecp/vm_1_action_start.json new file mode 100644 index 0000000000..6dd8dacbad --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_1_action_start.json @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.12", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_1_action_stop.json b/trunk/test/compute/fixtures/ecp/vm_1_action_stop.json new file mode 100644 index 0000000000..78838b6b38 --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_1_action_stop.json @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "unkown", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_1_get.json b/trunk/test/compute/fixtures/ecp/vm_1_get.json new file mode 100644 index 0000000000..8bb5614851 --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_1_get.json @@ -0,0 +1,3 @@ +{"errno": 0, "message": "Success", "vm": +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "off", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_list.json b/trunk/test/compute/fixtures/ecp/vm_list.json new file mode 100644 index 0000000000..48436a85d4 --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_list.json @@ -0,0 +1,10 @@ +{"errno": 0, "message": "Success", "vms": +[ +{"vnc_enabled": true, "uuid": 1, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, + +{"vnc_enabled": true, "uuid": 2, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:72:b4:71:21", "network_name": "Default", "uuid": "c76edd61-2dfd-11df-84ca-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5902", "name": "dummy-2", "state": "running", "trusted": null, "os": "unknown", "vnc_password": "zoiZW31T", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"}, + +{"vnc_enabled": true, "uuid": 3, "tags": [], "ip_address": "42.78.124.75", "interfaces": [{"ip": "42.78.124.75", "mac": "00:16:e9:d6:40:c6", "network_name": "Default", "uuid": "479b9823-2ded-11df-94e8-0015174e564c", "network": "fc38963c-a9fa-11de-8c4b-001b63a56c51"}], "vnc_port": "5900", "name": "dummy-1", "state": "stopped", "trusted": null, "os": "unknown", "vnc_password": "jBs5UT00", "vnc_ip_address": "192.168.1.111", "hardware_profile_uuid": "bcaff710-2914-11de-836c-001a929face2"} + +] +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ecp/vm_put.json b/trunk/test/compute/fixtures/ecp/vm_put.json new file mode 100644 index 0000000000..55f01bfe6f --- /dev/null +++ b/trunk/test/compute/fixtures/ecp/vm_put.json @@ -0,0 +1 @@ +{"errno": 0, "message": "Success", "txid": "fc38963c-a9fa-11de-8c4b-001baaa56c51", "machine_id": "1234"} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/elastichosts/drives_create.json b/trunk/test/compute/fixtures/elastichosts/drives_create.json new file mode 100644 index 0000000000..659ea4151c --- /dev/null +++ b/trunk/test/compute/fixtures/elastichosts/drives_create.json @@ -0,0 +1,12 @@ +{ + "drive": "0012e24a-6eae-4279-9912-3432f698cec8", + "encryption:cipher": "aes-xts-plain", + "name": "test drive", + "read:bytes": "4096", + "read:requests": "1", + "size": 10737418240, + "status": "active", + "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", + "write:bytes": "4096", + "write:requests": "1" +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/elastichosts/drives_info.json b/trunk/test/compute/fixtures/elastichosts/drives_info.json new file mode 100644 index 0000000000..659ea4151c --- /dev/null +++ b/trunk/test/compute/fixtures/elastichosts/drives_info.json @@ -0,0 +1,12 @@ +{ + "drive": "0012e24a-6eae-4279-9912-3432f698cec8", + "encryption:cipher": "aes-xts-plain", + "name": "test drive", + "read:bytes": "4096", + "read:requests": "1", + "size": 10737418240, + "status": "active", + "user": "2164ce57-591c-43ee-ade5-e2fe0ee13c3e", + "write:bytes": "4096", + "write:requests": "1" +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/elastichosts/servers_create.json b/trunk/test/compute/fixtures/elastichosts/servers_create.json new file mode 100644 index 0000000000..3a17f9645a --- /dev/null +++ b/trunk/test/compute/fixtures/elastichosts/servers_create.json @@ -0,0 +1,25 @@ +{ + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "ide:0:0:read:bytes": "299696128", + "ide:0:0:read:requests": "73168", + "ide:0:0:write:bytes": "321044480", + "ide:0:0:write:requests": "78380", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], + "nic:0:model": "virtio", + "rx": 679560, + "rx:packets": 644, + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "smp": 1, + "started": 1280723696, + "status": "active", + "tx": 21271, + "tx:packets": "251", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:ip": "216.151.208.174", + "vnc:password": "testvncpass" +} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/elastichosts/servers_info.json b/trunk/test/compute/fixtures/elastichosts/servers_info.json new file mode 100644 index 0000000000..72b6b48bef --- /dev/null +++ b/trunk/test/compute/fixtures/elastichosts/servers_info.json @@ -0,0 +1,27 @@ +[ + { + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "ide:0:0:read:bytes": "299696128", + "ide:0:0:read:requests": "73168", + "ide:0:0:write:bytes": "321044480", + "ide:0:0:write:requests": "78380", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], + "nic:0:model": "virtio", + "rx": 679560, + "rx:packets": 644, + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "smp": 1, + "started": 1280723696, + "status": "active", + "tx": 21271, + "tx:packets": "251", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:ip": "216.151.208.174", + "vnc:password": "testvncpass" + } +] \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/account_info.xml b/trunk/test/compute/fixtures/gandi/account_info.xml new file mode 100644 index 0000000000..58499fbc7c --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/account_info.xml @@ -0,0 +1,317 @@ + + + + + + +handle +AB3917-GANDI + + +products + + + +errors_for_updating + +product_name_does_not_match +no_action_on_free_product + + + +can_release +1 + + +date_end + + + +product_name +shares_fixed + + +autorenew + + + +errors_for_removing + + + + +errors_for_releasing + +no_action_on_free_product +not_available_resource + + + +is_in_redemption + + + +errors_for_autorenewing + +no_action_on_free_product + + + +duration +1y + + +date_created +20101028T12:38:17 + + +quantity +12 + + +errors_for_renewing + +no_action_on_free_product + + + +id +11153 + + +redemption +7 + + + + +errors_for_updating + +no_action_on_free_product + + + +can_release +0 + + +date_end + + + +product_name +ips + + +autorenew + + + +errors_for_removing + + + + +errors_for_releasing + +no_action_on_free_product +db_can_not_release + + + +is_in_redemption + + + +errors_for_autorenewing + +no_action_on_free_product + + + +duration +1m + + +date_created +20110124T11:42:35 + + +quantity +4 + + +errors_for_renewing + +no_action_on_free_product + + + +id +11196 + + +redemption +7 + + + + + +share_definition + + +servers +1 + + +bandwidth +5120.0 + + +memory +256 + + +cores +0.25 + + +slots +0.66666666666666663 + + +disk +8192 + + + + +fullname +Aymeric Barantal + + +id +58757 + + +resources + + +available + + +shares +12 + + +servers +8 + + +ips +4 + + +bandwidth +51200.0 + + +memory +2560 + + +cores +3.0 + + +slots +4.0 + + +disk +89088 + + + + +granted + + +shares +12 + + +servers +12 + + +ips +8 + + +bandwidth +61440 + + +memory +3072 + + +cores +5.0 + + +slots +8.0 + + +disk +98304 + + + + +used + + +servers +4 + + +ips +4 + + +bandwidth +10240.0 + + +memory +512 + + +cores +2.0 + + +slots +4 + + +disk +9216 + + + + +expired + + + + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/datacenter_list.xml b/trunk/test/compute/fixtures/gandi/datacenter_list.xml new file mode 100644 index 0000000000..9d8cc688af --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/datacenter_list.xml @@ -0,0 +1,53 @@ + + + + + + + + + + + country + France + + + iso + FR + + + id + 1 + + + name + Equinix Paris + + + + + + + country + United States of America + + + iso + US + + + id + 2 + + + name + Level3 Baltimore + + + + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/disk_attach.xml b/trunk/test/compute/fixtures/gandi/disk_attach.xml new file mode 100644 index 0000000000..88e1c2a7fa --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/disk_attach.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T12:57:05 + + +vm_id +250133 + + +date_start + + + +disk_id +34918 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:57:05 + + +type +disk_attach + + +id +657982 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/disk_create_from.xml b/trunk/test/compute/fixtures/gandi/disk_create_from.xml new file mode 100644 index 0000000000..4d34d0e19c --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/disk_create_from.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T14:20:56 + + +vm_id + + + +date_start + + + +disk_id +35288 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T14:20:56 + + +type +disk_create + + +id +657985 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/disk_detach.xml b/trunk/test/compute/fixtures/gandi/disk_detach.xml new file mode 100644 index 0000000000..388ad6d899 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/disk_detach.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T12:57:35 + + +vm_id +250133 + + +date_start + + + +disk_id +34918 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:57:35 + + +type +disk_detach + + +id +657983 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/disk_list.xml b/trunk/test/compute/fixtures/gandi/disk_list.xml new file mode 100644 index 0000000000..5af7613f10 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/disk_list.xml @@ -0,0 +1,200 @@ + + + + + + + +datacenter_id +1 + + +name +disk_libcloud2 + + +kernel_version +2.6.32 + + +can_snapshot +0 + + +visibility +private + + +label +Debian 5 + + +vms_id + + + + +source +23351 + + +state +created + + +is_boot_disk +0 + + +date_updated +20101116T10:51:59 + + +date_created +20101028T13:52:38 + + +type +data + + +id +34918 + + +size +3072 + + + + +datacenter_id +1 + + +name +test1 + + +kernel_version +2.6.32 + + +can_snapshot + + + +visibility +private + + +label +Debian 5 + + +vms_id + +250133 + + + +source +23351 + + +state +created + + +is_boot_disk +1 + + +date_updated +20110120T15:02:01 + + +date_created +20110120T14:57:55 + + +type +data + + +id +34951 + + +size +3072 + + + + +datacenter_id +1 + + +name +test_disk + + +kernel_version +2.6.32 + + +can_snapshot +1 + + +visibility +private + + +label +Debian 5 + + +vms_id + +250288 + + + +source +23351 + + +state +created + + +is_boot_disk +1 + + +date_updated +20110325T16:31:11 + + +date_created +20110324T17:14:06 + + +type +data + + +id +35170 + + +size +3072 + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/disk_update.xml b/trunk/test/compute/fixtures/gandi/disk_update.xml new file mode 100644 index 0000000000..9dcd73e479 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/disk_update.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110921T14:23:10 + + +vm_id + + + +date_start + + + +disk_id +34951 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T14:23:10 + + +type +disk_update + + +id +657987 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/iface_attach.xml b/trunk/test/compute/fixtures/gandi/iface_attach.xml new file mode 100644 index 0000000000..33856c1ed4 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/iface_attach.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id +7857 + + +date_updated +20110921T12:49:35 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:49:35 + + +type +iface_attach + + +id +657980 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/iface_detach.xml b/trunk/test/compute/fixtures/gandi/iface_detach.xml new file mode 100644 index 0000000000..f5080d9013 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/iface_detach.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id +7857 + + +date_updated +20110921T12:53:29 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110921T12:53:29 + + +type +iface_detach + + +id +657981 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/iface_list.xml b/trunk/test/compute/fixtures/gandi/iface_list.xml new file mode 100644 index 0000000000..ba530a6711 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/iface_list.xml @@ -0,0 +1,99 @@ + + + + + + + +date_updated +20110120T14:58:44 + + +vm_id +250133 + + +bandwidth +5120.0 + + +datacenter_id +1 + + +state +used + + +num +0 + + +ips_id + +9256 +9294 + + + +date_created +20110120T14:57:55 + + +type +public + + +id +7857 + + + + +date_updated +20110324T17:14:16 + + +vm_id +250288 + + +bandwidth +5192.0 + + +datacenter_id +1 + + +state +used + + +num +0 + + +ips_id + +9298 +9508 + + + +date_created +20110324T17:14:06 + + +type +public + + +id +8019 + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/image_list_dc0.xml b/trunk/test/compute/fixtures/gandi/image_list_dc0.xml new file mode 100644 index 0000000000..ee68292ceb --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/image_list_dc0.xml @@ -0,0 +1,493 @@ + + + + + + + + + + + date_updated + 20100928T10:41:38 + + + disk_id + 34198 + + + label + GandiOS + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 2 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11233 + + + label + Mandriva 2008.0 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 3 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11235 + + + label + Centos 5 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 4 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11236 + + + label + Fedora Core 7 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 5 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11237 + + + label + Open SUSE 10.3 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 6 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11238 + + + label + Debian 4 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20070101T00:00:00 + + + author_id + 248842 + + + id + 7 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11239 + + + label + Fedora Core 8 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 8 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11240 + + + label + Open SUSE 11.0 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 9 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11241 + + + label + Mandriva 2008.1 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 10 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 11242 + + + label + Ubuntu 8.04 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20080101T00:00:00 + + + author_id + 248842 + + + id + 11 + + + + + + + date_updated + 20100922T11:56:05 + + + disk_id + 23351 + + + label + Debian 5 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20090101T00:00:00 + + + author_id + 248842 + + + id + 12 + + + + + + + date_updated + 20100811T16:30:06 + + + disk_id + 23352 + + + label + Ubuntu 9.04 + + + datacenter_id + 1 + + + visibility + all + + + os_arch + x86-32 + + + date_created + 20090101T00:00:00 + + + author_id + 248842 + + + id + 13 + + + + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/ip_list.xml b/trunk/test/compute/fixtures/gandi/ip_list.xml new file mode 100644 index 0000000000..74cce54587 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/ip_list.xml @@ -0,0 +1,261 @@ + + + + + + + +reverse +xvm-6-186.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:57:55 + + +ip +10.5.6.186 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20101028T12:49:11 + + +id +9256 + + + + +reverse +xvm6-fe37-9f7b.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:58:44 + + +ip +2001:4b98:dc0:543:216:3eff:fe37:9f7b + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110120T14:58:44 + + +id +9294 + + + + +reverse +xvm-6-179.ghst.net + + +iface_id +7861 + + +date_updated +20110124T15:53:44 + + +ip +10.5.6.179 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20110124T11:43:17 + + +id +9298 + + + + +reverse +xvm6-fea8-3724.ghst.net + + +iface_id +7861 + + +date_updated +20110124T15:54:44 + + +ip +2001:4b98:dc0:543:216:3eff:fea8:3724 + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110124T15:54:44 + + +id +9301 + + + + +reverse + + + +iface_id + + + +date_updated +20110217T17:39:39 + + +ip + + + +datacenter_id +1 + + +state +being_created + + +num + + + +version +4 + + +date_created +20110217T17:39:39 + + +id +9323 + + + + +reverse +xvm-6-26.ghst.net + + +iface_id + + + +date_updated +20110225T11:59:55 + + +ip +10.5.6.26 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20110224T16:46:33 + + +id +9332 + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/operation_info.xml b/trunk/test/compute/fixtures/gandi/operation_info.xml new file mode 100644 index 0000000000..33163438a0 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/operation_info.xml @@ -0,0 +1,45 @@ + + + + + + +date_updated +20110324T15:49:50 + + +last_error + + + +date_start + + + +source +AB3917-GANDI + + +step +DONE + + +eta +39 + + +date_created +20110324T15:49:32 + + +type +vm_delete + + +id +637366 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_create_from.xml b/trunk/test/compute/fixtures/gandi/vm_create_from.xml new file mode 100644 index 0000000000..eda889a3c3 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_create_from.xml @@ -0,0 +1,147 @@ + + + + + + + +iface_id + + + +date_updated +20110324T17:14:06 + + +type +disk_create + + +date_start + + + +disk_id +35170 + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T17:14:06 + + +vm_id + + + +id +637370 + + + + +iface_id +8019 + + +date_updated +20110324T17:14:06 + + +vm_id + + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id +9298 + + +date_created +20110324T17:14:06 + + +type +iface_create + + +id +637371 + + + + +iface_id + + + +date_updated +20110324T17:14:07 + + +type +vm_create + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T17:14:07 + + +vm_id +250288 + + +id +637372 + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_delete.xml b/trunk/test/compute/fixtures/gandi/vm_delete.xml new file mode 100644 index 0000000000..a731b41032 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_delete.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110324T15:49:32 + + +vm_id +250136 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110324T15:49:32 + + +type +vm_delete + + +id +637366 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_info.xml b/trunk/test/compute/fixtures/gandi/vm_info.xml new file mode 100644 index 0000000000..5b01c94ced --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_info.xml @@ -0,0 +1,330 @@ + + + + + + +memory +256 + + +hostname +test2 + + +console +0 + + +description + + + +triggers + + + + +date_updated +20110120T15:25:07 + + +disks + + + +datacenter_id +1 + + +name +test2 + + +kernel_version +2.6.32 + + +can_snapshot + + + +kernel_cmdline + + +root +/dev/xvda1 + + +ro +1 + + +console +xvc0 + + +nosep +1 + + + + +visibility +private + + +label +Debian 5 + + +vms_id + +250133 + + + +source +23351 + + +state +running + + +is_boot_disk +1 + + +date_updated +20110120T15:02:01 + + +date_created +20110120T14:57:55 + + +type +data + + +id +34951 + + +size +3072 + + + + + +disks_id + +34951 + + + +datacenter_id +1 + + +state +running + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +ifaces + + + +date_updated +20110120T14:58:44 + + +vm_id +250133 + + +bandwidth +5120.0 + + +datacenter_id +1 + + +ips + + + +reverse +xvm-6-186.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:57:55 + + +ip +10.5.6.186 + + +datacenter_id +1 + + +state +created + + +num +0 + + +version +4 + + +date_created +20101028T12:49:11 + + +id +9256 + + + + +reverse +xvm6-fe37-9f7b.ghst.net + + +iface_id +7857 + + +date_updated +20110120T14:58:44 + + +ip +2001:4b98:dc0:543:216:3eff:fe37:9f7b + + +datacenter_id +1 + + +state +created + + +num +1 + + +version +6 + + +date_created +20110120T14:58:44 + + +id +9294 + + + + + +state +used + + +num +0 + + +ips_id + +9256 +9294 + + + +date_created +20110120T14:57:55 + + +type +public + + +id +7857 + + + + + +cores +1 + + +ifaces_id + +7857 + + + +graph_urls + + +vcpu + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vcpu&device_number=0 + + + +vdi + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vdi&device_number=0 + + + +vif + +http://graph.dev.hosting.gandi.net:8080//?key=88a6b2a04f21c3b9c055d73310ee37ea47fe25c7&vm_id=379&dc_id=1&stats_target=vif&device_number=0 + + + + + +date_created +20110120T14:57:55 + + +id +250133 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_list.xml b/trunk/test/compute/fixtures/gandi/vm_list.xml new file mode 100644 index 0000000000..d0f40729ca --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_list.xml @@ -0,0 +1,141 @@ + + + + + + + +memory +256 + + +console +0 + + +description + + + +date_updated +20110120T15:25:07 + + +hostname +test1 + + +disks_id + +34951 + + + +datacenter_id +1 + + +state +running + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +cores +1 + + +ifaces_id + +7857 + + + +date_created +20110120T14:57:55 + + +id +250133 + + + + +memory +256 + + +console +0 + + +description + + + +date_updated +20110225T12:09:31 + + +hostname +test2 + + +disks_id + +34954 + + + +datacenter_id +1 + + +state +halted + + +flex_shares +0 + + +ai_active +0 + + +vm_max_memory +2048 + + +cores +1 + + +ifaces_id + +7861 + + + +date_created +20110124T15:53:44 + + +id +250136 + + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_reboot.xml b/trunk/test/compute/fixtures/gandi/vm_reboot.xml new file mode 100644 index 0000000000..4a8aab2fa1 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_reboot.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110325T13:18:27 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110325T13:18:27 + + +type +vm_reboot + + +id +637398 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gandi/vm_stop.xml b/trunk/test/compute/fixtures/gandi/vm_stop.xml new file mode 100644 index 0000000000..67461fb291 --- /dev/null +++ b/trunk/test/compute/fixtures/gandi/vm_stop.xml @@ -0,0 +1,53 @@ + + + + + + +iface_id + + + +date_updated +20110325T13:19:52 + + +vm_id +250133 + + +date_start + + + +disk_id + + + +source +AB3917-GANDI + + +step +WAIT + + +ip_id + + + +date_created +20110325T13:19:52 + + +type +vm_stop + + +id +637399 + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/gogrid/image_list.json b/trunk/test/compute/fixtures/gogrid/image_list.json new file mode 100644 index 0000000000..761a14988b --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/image_list.json @@ -0,0 +1,180 @@ +{ + "list": [ + { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + { + "billingtokens": [ + { + "id": 47, + "name": "CentOS 5.3 64bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (64-bit) w/ None", + "friendlyName": "CentOS 5.3 (64-bit) w/ None", + "id": 1532, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img", + "name": "centos5.3_64_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (64-bit)", + "id": 17, + "name": "CentOS 5.3 (64-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + }, + { + "billingtokens": [ + { + "id": 48, + "name": "RHEL 5.4 32bit", + "price": 0 + } + ], + "description": "RHEL 5.4 (32-bit) w/ None", + "friendlyName": "RHEL 5.4 (32-bit) w/ None", + "id": 1533, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-4c88cb92-dd7b-4bb1-95b6-7cc93eb1d2aa.img", + "name": "rhel5.4_32_base", + "object": "serverimage", + "os": { + "description": "RHEL 5.4 (32-bit)", + "id": 18, + "name": "RHEL 5.4 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + }, + { + "billingtokens": [ + { + "id": 49, + "name": "RHEL 5.4 64bit", + "price": 0 + } + ], + "description": "RHEL 5.4 (64-bit) w/ None", + "friendlyName": "RHEL 5.4 (64-bit) w/ None", + "id": 1534, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2bd8ddb3-cc53-4a76-8188-0dce7537a422.img", + "name": "rhel5.4_64_base", + "object": "serverimage", + "os": { + "description": "RHEL 5.4 (64-bit)", + "id": 19, + "name": "RHEL 5.4 (64-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789076417 + } + ], + "method": "/grid/image/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 59, + "start": 0, + "total": 59 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/image_save.json b/trunk/test/compute/fixtures/gogrid/image_save.json new file mode 100644 index 0000000000..e20c6d8022 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/image_save.json @@ -0,0 +1,62 @@ +{ + "list": [ + { + "architecture": { + "description": "32 bit OS", + "id": 1, + "name": "32-bit", + "object": "option" + }, + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "object": "billingtoken", + "price": 0 + } + ], + "createdTime": 1289119839685, + "description": "", + "friendlyName": "testimage", + "id": 5050, + "isActive": true, + "isPublic": false, + "location": "123/GSI-3ee65927-f80d-43df-92df-6c7e352f009c.img", + "name": "GSI-3ee65927-f80d-43df-92df-6c7e352f009c", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": 123, + "name": "name", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is being saved", + "id": 1, + "name": "Saving", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1289119839685 + } + ], + "method": "/grid/image/save", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/ip_list.json b/trunk/test/compute/fixtures/gogrid/ip_list.json new file mode 100644 index 0000000000..cf44eb9782 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/ip_list.json @@ -0,0 +1,69 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348099, + "ip": "192.168.75.66", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348100, + "ip": "192.168.75.67", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 5348101, + "ip": "192.168.75.68", + "object": "ip", + "public": false, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "192.168.75.64/255.255.255.240" + } + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 3, + "start": 0, + "total": 3 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/ip_list_empty.json b/trunk/test/compute/fixtures/gogrid/ip_list_empty.json new file mode 100644 index 0000000000..146bd9f45a --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/ip_list_empty.json @@ -0,0 +1,12 @@ +{ + "list": [ + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 0, + "start": 0, + "total": 0 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json b/trunk/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json new file mode 100644 index 0000000000..81a61c142e --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/lookup_list_ip_datacenter.json @@ -0,0 +1,24 @@ +{ + "list": [ + { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + } + ], + "method": "/common/lookup/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 2, + "start": 0, + "total": 2 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/password_list.json b/trunk/test/compute/fixtures/gogrid/password_list.json new file mode 100644 index 0000000000..a06a3ef7dd --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/password_list.json @@ -0,0 +1,102 @@ +{ + "list": [ + { + "password": "bebebe", + "object": "password", + "username": "root", + "server": { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + } + ], + "method": "/grid/server/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_add.json b/trunk/test/compute/fixtures/gogrid/server_add.json new file mode 100644 index 0000000000..35a4ca1af7 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_add.json @@ -0,0 +1,96 @@ +{ + "list": [ + { + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/add", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_delete.json b/trunk/test/compute/fixtures/gogrid/server_delete.json new file mode 100644 index 0000000000..068ff83013 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_delete.json @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/delete", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_edit.json b/trunk/test/compute/fixtures/gogrid/server_edit.json new file mode 100644 index 0000000000..0133a1bb7f --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_edit.json @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/edit", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_list.json b/trunk/test/compute/fixtures/gogrid/server_list.json new file mode 100644 index 0000000000..23e43ff78b --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_list.json @@ -0,0 +1,98 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "description": "test server", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_power.json b/trunk/test/compute/fixtures/gogrid/server_power.json new file mode 100644 index 0000000000..71ba6780b8 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_power.json @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/power", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/gogrid/server_power_fail.json b/trunk/test/compute/fixtures/gogrid/server_power_fail.json new file mode 100644 index 0000000000..b938016483 --- /dev/null +++ b/trunk/test/compute/fixtures/gogrid/server_power_fail.json @@ -0,0 +1,97 @@ +{ + "list": [ + { + "id": 90967, + "image": { + "billingtokens": [ + { + "id": 46, + "name": "CentOS 5.3 32bit", + "price": 0 + } + ], + "description": "CentOS 5.3 (32-bit) w/ None", + "friendlyName": "CentOS 5.3 (32-bit) w/ None", + "id": 1531, + "isActive": true, + "isPublic": true, + "location": "gogrid/GSI-2c4c6672-69e1-4928-ac9d-a564521d55fe.img", + "name": "centos5.3_32_base", + "object": "serverimage", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "owner": { + "id": -1, + "name": "GoGrid", + "object": "customer" + }, + "price": 0, + "state": { + "description": "Image is available for adds", + "id": 2, + "name": "Available", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + }, + "updatedTime": 1257789046453 + }, + "ip": { + "id": 1659927, + "ip": "192.168.0.202", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "192.168.0.192/255.255.255.240" + }, + "isSandbox": false, + "name": "test1", + "object": "server", + "os": { + "description": "CentOS 5.3 (32-bit)", + "id": 16, + "name": "CentOS 5.3 (32-bit)", + "object": "option" + }, + "ram": { + "description": "Server with 512MB RAM", + "id": 1, + "name": "512MB", + "object": "option" + }, + "state": { + "description": "Server is in active state.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "Web or Application Server", + "id": 1, + "name": "Web Server", + "object": "option" + } + } + ], + "method": "/grid/server/power", + "status": "failure", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/compute/fixtures/ibm_sbc/create.xml b/trunk/test/compute/fixtures/ibm_sbc/create.xml new file mode 100644 index 0000000000..71d8811434 --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/create.xml @@ -0,0 +1 @@ +28558128558RationalInsight4woodser@us.ibm.com11LARGEMyPublicKey02010-04-19T10:03:34.327-04:002010-04-26T10:03:43.610-04:00SUSE Linux Enterprise10 SP2OS diff --git a/trunk/test/compute/fixtures/ibm_sbc/delete.xml b/trunk/test/compute/fixtures/ibm_sbc/delete.xml new file mode 100644 index 0000000000..7f2027262e --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/delete.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/compute/fixtures/ibm_sbc/images.xml b/trunk/test/compute/fixtures/ibm_sbc/images.xml new file mode 100644 index 0000000000..91732f7072 --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/images.xml @@ -0,0 +1,2 @@ + +2fd2d0478b132490897526b9b4433a334Rational Build Forge Agent11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A233F5A0-05A5-F21D-3E92-3793B722DFBD}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00384e900960c3d4b648fa6d4670aed2cd1SUSE 10 SP211SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2SuSE v10.2 Base OS Imagehttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{07F112A1-84A7-72BF-B8FD-B36011E0E433}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0015a72d3e7bb1cb4942ab0da2968e2e77bbWebSphere Application Server and Rational Agent Controller11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere Application Server and Rational Agent Controller enables a performance based foundation to build, reuse, run, integrate and manage Service Oriented Architecture (SOA) applications and services.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{86E8E71D-29A3-86DE-8A26-792C5E839D92}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00117da905ba0fdf4d8b8f94e7f4ef43c1beRational Insight11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Rational Insight helps organizations reduce time to market, improve quality, and take greater control of software and systems development and delivery. It provides objective dashboards and best practice metrics to identify risks, status, and trends.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{4F774DCF-1469-EAAB-FBC3-64AE241CF8E8}/1.0/GettingStarted.htmlLARGE2009-04-25T00:00:00.000-04:0018edf7ad43f75943b1b0c0f915dba8d86cDB2 Express-C11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2DB2 Express-C is an entry-level edition of the DB2 database server for the developer community. It has standard relational functionality and includes pureXML, and other features of DB2 for Linux, Unix, and Windows (LUW).https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{E69488DE-FB79-63CD-E51E-79505A1309BD}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:0021c03be6800bf043c0b44c584545e04099Informix Dynamic Server Developer Edition11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2Informix Dynamic Server (IDS) Developer Edition is a development version of the IDS Enterprise Edition. IDS is designed to meet the database server needs of small-size to large-size enterprise businesses.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9B0C8F66-9639-CA0A-0A94-7928D7DAD6CB}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:00229b2b6482ba374a6ab4bb3585414a910aWebSphere sMash with AppBuilder11SYSTEMPUBLICi386SUSE Linux Enterprise/10 SP2WebSphere sMash® provides a web platform that includes support for dynamic scripting in PHP and Groovy.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{88E74AC6-9CCB-2710-7E9B-936DA2CE496C}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-04-25T00:00:00.000-04:001000150416662e71fae44bdba4d7bb502a09c5e7DB2 Enterprise V9.7 (32-bit, 90-day trial)11leonsp@ca.ibm.comPUBLICi386SuSE v10.2DB2 Enterprise V9.7 (32-bit, 90-day trial)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{38F2AB86-9F03-E463-024D-A9ABC3AE3831}/2.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-09T17:01:28.000-05:00100020639da8863714964624b8b13631642c785bRHEL 5.4 Base OS11youngdj@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Red Hat Enterprise Linux 5.4 Base OShttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{34904879-E794-A2D8-2D7C-2E8D6AD6AE77}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2009-11-18T13:51:12.000-05:0010002573e5f09a64667e4faeaf3ac661600ec6caRational Build Forge11leighw@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Build Forge provides an adaptive process execution framework that automates, orchestrates, manages, and tracks all the processes between each handoff within the assembly line of software development, creating an automated software factory.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{22E039C6-108E-B626-ECC9-E2C9B62479FF}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-08T16:34:37.000-05:00100030563e276d758ed842caafe77770d60dedeaRational Asset Manager 7.2.0.111gmendel@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Asset Manager helps to create, modify, govern, find and reuse development assets, including SOA and systems development assets. It facilitates the reuse of all types of software development related assets, potentially saving development time.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{296C6DDF-B87B-327B-3E5A-F2C50C353A69}/1.0/GettingStarted.htmlMEDIUMLARGE2009-12-14T14:30:57.000-05:0010003854e3067f999edf4914932295cfb5f79d59WebSphere Portal/WCM 6.1.511mlamb@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM® WebSphere® Portal Server enables you to quickly consolidate applications and content into role-based applications, complete with search, personalization, and security capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{279F3E12-A7EF-0768-135B-F08B66DF8F71}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-12T18:06:29.000-05:00100038640112efd8f1e144998f2a70a165d00bd3Rational Quality Manager11brownms@gmail.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Quality Manager provides a collaborative application lifecycle management (ALM) environment for test planning, construction, and execution.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{9DA927BA-2CEF-1686-71B0-2BAC468B7445}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T09:40:12.000-05:00100038653fbf6936e5cb42b5959ad9837add054fIBM Mashup Center with IBM Lotus Widget Factory11mgilmore@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM Mashup Center is an end-to-end enterprise mashup platform, supporting rapid assembly of dynamic web applications with the management, security, and governance capabilities.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{0F867D03-588B-BA51-4E18-4CE9D11AECFC}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-01-15T10:44:24.000-05:0010003780425e2dfef95647498561f98c4de356abRational Team Concert11sonia_dimitrov@ca.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Team Concert is a collaborative software delivery environment that empowers project teams to simplify, automate and govern software delivery.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{679CA6F5-1E8E-267B-0C84-F7B0B41DF1DC}/1.0/GettingStarted.htmlMEDIUMLARGE2010-01-19T14:13:58.000-05:0010003785c4867b72f2fc43fe982e76c76c32efaaLotus Forms Turbo 3.5.111rlintern@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Lotus Forms Turbo requires no training and is designed to help customers address basic form software requirements such as surveys, applications, feedback, orders, request for submission, and more - without involvement from the IT department.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{846AD7D3-9A0F-E02C-89D2-BE250CAE2318}/1.0/GettingStarted.htmlLARGE2010-01-22T13:27:08.000-05:0010005598Rational Requirements Composer11mutdosch@us.ibm.comPUBLICi386Redhat Enterprise Linux (32-bit)/5.4Rational Requirements Composer helps teams define and use requirements effectively across the project lifecycle.https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{28C7B870-2C0A-003F-F886-B89F5B413B77}/1.0/GettingStarted.htmlMEDIUMLARGE2010-02-08T11:43:18.000-05:0010007509Rational Software Architecture11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 2/22/10 8:06 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{2C6FB6D2-CB87-C4A0-CDE0-5AAF03E214B2}/1.1/GettingStarted.htmlLARGE2010-02-22T20:03:18.000-05:0010008319WebSphere Feature Pack for OSGi Apps and JPA 2.011radavenp@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2IBM WebSphere Application Server V7.0 Fix Pack 7, Feature Pack for OSGi Applications and Java Persistence API 2.0 Open Beta, and Feature Pack for Service Component Architecture (SCA) V1.0.1 Fix Pack V1.0.1.1https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{A397B7CD-A1C7-1956-7AEF-6AB495E37958}/1.0/GettingStarted.htmlSMALLMEDIUMLARGE2010-03-14T21:06:38.000-04:0010008273Rational Software Architect for WebSphere11danberg@us.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2Rational Software Architect for WebSphere with the Cloud Client plug-ins created on 3/15/10 12:21 PMhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{839D92BB-DEA5-9820-8E2E-AE5D0A6DEAE3}/1.1/GettingStarted.htmlLARGE2010-03-15T12:17:26.000-04:0010008404Rational Application Developer11khiamt@ca.ibm.comPUBLICi386SUSE Linux Enterprise/10 SP2An Eclipse-based IDE with visual development features that helps Java developers rapidly design, develop, assemble, test, profile and deploy high quality Java/J2EE, Portal, Web/Web 2.0, Web services and SOA applications. (03/16/2010)https://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/parameters.xmlhttps://www-180.ibm.com/cloud/enterprise/beta/ram.ws/RAMSecure/artifact/{6A957586-A17A-4927-7C71-0FDE280DB66B}/1.0/GettingStarted.htmlMEDIUMLARGE2010-03-16T00:10:30.000-04:00 \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ibm_sbc/instances.xml b/trunk/test/compute/fixtures/ibm_sbc/instances.xml new file mode 100644 index 0000000000..aecd2d5fed --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/instances.xml @@ -0,0 +1 @@ +26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28193128193RAD instancewoodser@us.ibm.com10008404MEDIUMasdff22010-04-15T15:20:10.317-04:002010-04-22T15:20:19.564-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ibm_sbc/instances_deleted.xml b/trunk/test/compute/fixtures/ibm_sbc/instances_deleted.xml new file mode 100644 index 0000000000..05e8644254 --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/instances_deleted.xml @@ -0,0 +1 @@ +26557126557Insight Instancewoodser@us.ibm.com11LARGEPublic keyvm519.developer.ihost.com129.33.196.12852010-04-06T15:40:24.745-04:002010-04-19T04:00:00.000-04:00SUSE Linux Enterprise10 SP2OS28194128194RSAwoodser@us.ibm.com10007509LARGEasdff22010-04-15T15:23:04.753-04:002010-04-22T15:23:13.658-04:00SUSE Linux Enterprise10 SP2OS \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ibm_sbc/locations.xml b/trunk/test/compute/fixtures/ibm_sbc/locations.xml new file mode 100644 index 0000000000..8f43ded01f --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/locations.xml @@ -0,0 +1 @@ +1US North East: Poughkeepsie, NYPOK50100200ext3SMALLMEDIUMLARGE \ No newline at end of file diff --git a/trunk/test/compute/fixtures/ibm_sbc/reboot_active.xml b/trunk/test/compute/fixtures/ibm_sbc/reboot_active.xml new file mode 100644 index 0000000000..2a43c1cbaf --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/reboot_active.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/compute/fixtures/ibm_sbc/sizes.xml b/trunk/test/compute/fixtures/ibm_sbc/sizes.xml new file mode 100644 index 0000000000..2a43c1cbaf --- /dev/null +++ b/trunk/test/compute/fixtures/ibm_sbc/sizes.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/compute/fixtures/meta/helloworld.txt b/trunk/test/compute/fixtures/meta/helloworld.txt new file mode 100644 index 0000000000..b45ef6fec8 --- /dev/null +++ b/trunk/test/compute/fixtures/meta/helloworld.txt @@ -0,0 +1 @@ +Hello, World! \ No newline at end of file diff --git a/trunk/test/compute/fixtures/opennebula/compute.xml b/trunk/test/compute/fixtures/opennebula/compute.xml new file mode 100644 index 0000000000..3f5a3bb8e5 --- /dev/null +++ b/trunk/test/compute/fixtures/opennebula/compute.xml @@ -0,0 +1,15 @@ + + + 5 + MyCompute + ACTIVE + + + + + + + + + small + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/opennebula/computes.xml b/trunk/test/compute/fixtures/opennebula/computes.xml new file mode 100644 index 0000000000..a9ba32a662 --- /dev/null +++ b/trunk/test/compute/fixtures/opennebula/computes.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/opennebula/disk.xml b/trunk/test/compute/fixtures/opennebula/disk.xml new file mode 100644 index 0000000000..2fb1bc1ae6 --- /dev/null +++ b/trunk/test/compute/fixtures/opennebula/disk.xml @@ -0,0 +1,7 @@ + + + 1 + UbuntuServer9.04-Contextualized + 5120 + file:///Users/oneuser/ubuntu-server-9.04/ubuntu-server-9.04.img + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/opennebula/storage.xml b/trunk/test/compute/fixtures/opennebula/storage.xml new file mode 100644 index 0000000000..678088142d --- /dev/null +++ b/trunk/test/compute/fixtures/opennebula/storage.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/trunk/test/compute/fixtures/openstack/_v1_1__auth.json b/trunk/test/compute/fixtures/openstack/_v1_1__auth.json new file mode 100644 index 0000000000..972d889caf --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/_v1_1__auth.json @@ -0,0 +1 @@ +{"auth":{"token":{"id":"603d2bd9-f45c-4583-b91c-2c8eac0b5654","expires":"2011-09-18T02:44:17.000-05:00"},"serviceCatalog":{"cloudFilesCDN":[{"region":"ORD","publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS","v1Default":true}],"cloudFiles":[{"region":"ORD","publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS","v1Default":true,"internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS"}],"cloudServers":[{"publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/slug","v1Default":true}]}}} diff --git a/trunk/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json b/trunk/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json new file mode 100644 index 0000000000..db0c3d1ada --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/_v1_1__auth_mssing_token.json @@ -0,0 +1 @@ +{"auth":{"token":{"expires":"2011-09-18T02:44:17.000-05:00"},"serviceCatalog":{"cloudFilesCDN":[{"region":"ORD","publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS","v1Default":true}],"cloudFiles":[{"region":"ORD","publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS","v1Default":true,"internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS"}],"cloudServers":[{"publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/slug","v1Default":true}]}}} diff --git a/trunk/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json b/trunk/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json new file mode 100644 index 0000000000..bdf8a74b83 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/_v1_1__auth_unauthorized.json @@ -0,0 +1 @@ +{"unauthorized":{"message":"Username or api key is invalid","code":401}} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/openstack/_v2_0__auth.json b/trunk/test/compute/fixtures/openstack/_v2_0__auth.json new file mode 100644 index 0000000000..c3436954b0 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/_v2_0__auth.json @@ -0,0 +1 @@ +{"access": {"token": {"expires": "2011-11-08T15:57:43.653263", "id": "aaaaaaaaaaaa-bbb-cccccccccccccc", "tenant": {"id": "45", "name": "testproj-project"}}, "serviceCatalog": [{"endpoints": [{"adminURL": "http://my.fake.hostname:8774/v1.1/slug", "region": "ORD", "internalURL": ".", "publicURL": "http://my.fake.hostname:8774/v1.1/slug"}], "type": "compute", "name": "nova"}], "user": {"id": "45", "roles": [{"tenantId": "45", "id": "2", "name": "Member"}], "name": "testproj"}}} diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml b/trunk/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml new file mode 100644 index 0000000000..9a1f313be2 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_flavors_detail.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_images_detail.xml b/trunk/test/compute/fixtures/openstack/v1_slug_images_detail.xml new file mode 100644 index 0000000000..ae6cbe941d --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_images_detail.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_images_post.xml b/trunk/test/compute/fixtures/openstack/v1_slug_images_post.xml new file mode 100644 index 0000000000..302e94b357 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_images_post.xml @@ -0,0 +1,3 @@ + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_limits.xml b/trunk/test/compute/fixtures/openstack/v1_slug_limits.xml new file mode 100644 index 0000000000..89bd811cb4 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_limits.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers.xml new file mode 100644 index 0000000000..eeb150cc3e --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail.xml new file mode 100644 index 0000000000..ae2d47741b --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml new file mode 100644 index 0000000000..a2d75bab7c --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_missing.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml new file mode 100644 index 0000000000..a35609b565 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_pending.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml new file mode 100644 index 0000000000..cc31715ccf --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_same_uuid.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml new file mode 100644 index 0000000000..6d8a120289 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_deployment_success.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml new file mode 100644 index 0000000000..49fae832e3 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_empty.xml @@ -0,0 +1,2 @@ + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml new file mode 100644 index 0000000000..714199689b --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_detail_metadata.xml @@ -0,0 +1,16 @@ + + + + + somevalue + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_ips.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_ips.xml new file mode 100644 index 0000000000..cf7b9274de --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_ips.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml b/trunk/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml new file mode 100644 index 0000000000..c9ad5d65cc --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_servers_metadata.xml @@ -0,0 +1,15 @@ + + + + b + d + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml new file mode 100644 index 0000000000..01df93817f --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_group.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml new file mode 100644 index 0000000000..3654b3402e --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml new file mode 100644 index 0000000000..fa368bd969 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack/v1_slug_shared_ip_groups_detail.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/openstack_v1.1/README b/trunk/test/compute/fixtures/openstack_v1.1/README new file mode 100644 index 0000000000..47bdca9c5e --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/README @@ -0,0 +1,7 @@ +The json responses contained in this directory are copied directly from the +OpenStack 1.1 documentation at +http://docs.openstack.org/trunk/openstack-compute/developer/openstack-compute-api-1.1/ +as of this writing. + +The only exception is _os_quota_sets_aTenantId.json, which was captured (and +perturbed) via packet capture. diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_flavors_7.json b/trunk/test/compute/fixtures/openstack_v1.1/_flavors_7.json new file mode 100644 index 0000000000..900d1b1d25 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_flavors_7.json @@ -0,0 +1 @@ +{"flavor": {"rxtx_quota": 2500, "name": "15.5GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/7", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/7", "rel": "bookmark"}], "ram": 16384, "vcpus": 8, "rxtx_cap": 200, "swap": 0, "disk": 620, "id": 7}} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_flavors_detail.json b/trunk/test/compute/fixtures/openstack_v1.1/_flavors_detail.json new file mode 100644 index 0000000000..46739e0df8 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_flavors_detail.json @@ -0,0 +1 @@ +{"flavors": [{"rxtx_quota": 2500, "name": "15.5GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/7", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/7", "rel": "bookmark"}], "ram": 16384, "vcpus": 8, "rxtx_cap": 200, "swap": 0, "disk": 620, "id": 7}, {"rxtx_quota": 600, "name": "1GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/3", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/3", "rel": "bookmark"}], "ram": 1024, "vcpus": 1, "rxtx_cap": 30, "swap": 0, "disk": 40, "id": 3}, {"rxtx_quota": 150, "name": "256 slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/1", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/1", "rel": "bookmark"}], "ram": 256, "vcpus": 1, "rxtx_cap": 10, "swap": 0, "disk": 10, "id": 1}, {"rxtx_quota": 1200, "name": "2GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/4", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/4", "rel": "bookmark"}], "ram": 2048, "vcpus": 2, "rxtx_cap": 60, "swap": 0, "disk": 80, "id": 4}, {"rxtx_quota": 2500, "name": "30GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/8", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/8", "rel": "bookmark"}], "ram": 30720, "vcpus": 8, "rxtx_cap": 400, "swap": 0, "disk": 1200, "id": 8}, {"rxtx_quota": 2500, "name": "4GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/5", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/5", "rel": "bookmark"}], "ram": 4096, "vcpus": 2, "rxtx_cap": 100, "swap": 0, "disk": 160, "id": 5}, {"rxtx_quota": 300, "name": "512 slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/2", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}], "ram": 512, "vcpus": 1, "rxtx_cap": 20, "swap": 0, "disk": 20, "id": 2}, {"rxtx_quota": 2500, "name": "8GB slice", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/flavors/6", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/6", "rel": "bookmark"}], "ram": 8192, "vcpus": 4, "rxtx_cap": 150, "swap": 0, "disk": 320, "id": 6}]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_images_13.json b/trunk/test/compute/fixtures/openstack_v1.1/_images_13.json new file mode 100644 index 0000000000..2cd7729952 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_images_13.json @@ -0,0 +1 @@ +{"image": {"status": "ACTIVE", "updated": "2011-08-06T18:14:02Z", "name": "Windows 2008 SP2 x86 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/13", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/13", "rel": "bookmark"}, {"href": "http://10.13.136.245:9292/rs-reach-project/images/13", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T18:13:11Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "13", "metadata": {"os_type": "windows"}}} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_images_detail.json b/trunk/test/compute/fixtures/openstack_v1.1/_images_detail.json new file mode 100644 index 0000000000..4ec661866b --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_images_detail.json @@ -0,0 +1 @@ +{"images": [{"status": "ACTIVE", "updated": "2011-08-06T18:14:02Z", "name": "Windows 2008 SP2 x86 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/13", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/13", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/13", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T18:13:11Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "13", "metadata": {"os_type": "windows"}}, {"status": "ACTIVE", "updated": "2011-08-06T18:13:11Z", "name": "Windows 2003 R2 x86 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/12", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/12", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/12", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T18:12:33Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "12", "metadata": {"os_type": "windows"}}, {"status": "ACTIVE", "updated": "2011-08-06T16:27:56Z", "name": "Windows 2008 SP2 x64 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/11", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/11", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/11", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T16:26:15Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "11", "metadata": {"os_type": "windows"}}, {"status": "ACTIVE", "updated": "2011-08-06T16:26:14Z", "name": "Windows 2008 R2 x64 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/10", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/10", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/10", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T16:24:51Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "10", "metadata": {"os_type": "windows"}}, {"status": "ACTIVE", "updated": "2011-08-06T16:24:51Z", "name": "Windows 2003 R2 x64 (B24)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/9", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/9", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/9", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-06T16:23:52Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "9", "metadata": {"os_type": "windows"}}, {"status": "ACTIVE", "updated": "2011-08-05T22:58:29Z", "name": "Ubuntu Natty (11.04)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/8", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/8", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/8", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:58:20Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "8", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:58:19Z", "name": "Ubuntu Lucid (10.04)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/7", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/7", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:58:14Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "7", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:58:14Z", "name": "Fedora 15", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/6", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/6", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/6", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:58:01Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "6", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:58:00Z", "name": "Fedora 14", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/5", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/5", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/5", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:57:47Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "5", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:57:47Z", "name": "Debian Squeeze (6.0)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/4", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/4", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/4", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:57:41Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "4", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:57:40Z", "name": "Debian Lenny (5.0)", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/3", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/3", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/3", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:57:30Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "3", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:57:30Z", "name": "CentOS 6.0", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/2", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/2", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/2", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:57:20Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "2", "metadata": {}}, {"status": "ACTIVE", "updated": "2011-08-05T22:56:20Z", "name": "CentOS 5.6", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/images/1", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/1", "rel": "bookmark"}, {"href": "http://10.13.136.170:9292/rs-reach-project/images/1", "type": "application/vnd.openstack.image", "rel": "alternate"}], "created": "2011-08-05T22:56:03Z", "minDisk": 0, "progress": 100, "minRam": 0, "id": "1", "metadata": {}}]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json b/trunk/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json new file mode 100644 index 0000000000..568350059d --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_os_quota_sets_aTenantId.json @@ -0,0 +1 @@ +{"quota_set": {"metadata_items": 10, "injected_file_content_bytes": 1000, "injected_files": 10, "volumes": 0, "instances": 25, "gigabytes": 500, "cores": 50, "ram": 102400, "id": "aTenantId", "floating_ips": 10}} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers.json new file mode 100644 index 0000000000..c50f22bae4 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers.json @@ -0,0 +1,78 @@ +{ + "server": { + "id": "52415800-8b69-11e0-9b19-734f565bc83b", + "tenantId": "1234", + "userId": "5678", + "name": "new-server-test", + "created": "2010-11-11T12:00:00Z", + "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", + "accessIPv4" : "67.23.10.138", + "accessIPv6" : "::babe:67.23.10.138", + "progress": 0, + "status": "BUILD", + "adminPass": "GFf1j9aP", + "image" : { + "id": "52415800-8b69-11e0-9b19-734f6f006e54", + "name": "CentOS 5.2", + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/images/52415800-8b69-11e0-9b19-734f6f006e54" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f6f006e54" + } + ] + }, + "flavor" : { + "id": "52415800-8b69-11e0-9b19-734f1195ff37", + "name": "256 MB Server", + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/flavors/52415800-8b69-11e0-9b19-734f1195ff37" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/flavors/52415800-8b69-11e0-9b19-734f1195ff37" + } + ] + }, + "metadata": { + "My Server Name": "Apache1" + }, + "addresses": { + "public" : [ + { + "version": 4, + "addr": "67.23.10.138" + }, + { + "version": 6, + "addr": "::babe:67.23.10.138" + } + ], + "private" : [ + { + "version": 4, + "addr": "10.176.42.19" + }, + { + "version": 6, + "addr": "::babe:10.176.42.19" + } + ] + }, + "links": [ + { + "rel": "self", + "href": "http://servers.api.openstack.org/v1.1/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" + }, + { + "rel": "bookmark", + "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734fcece0043" + } + ] + } +} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json new file mode 100644 index 0000000000..1d88ccc8ce --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12063_metadata_two_keys.json @@ -0,0 +1,6 @@ +{ + "metadata" : { + "Server Label" : "Web Head 1", + "Image Version" : "2.1" + } +} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064.json new file mode 100644 index 0000000000..3b36d512a7 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064.json @@ -0,0 +1 @@ +{"server": {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json new file mode 100644 index 0000000000..02c57eae91 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers_12064_updated_name_bob.json @@ -0,0 +1 @@ +{"server": {"status": "ACTIVE", "updated": "2011-10-11T01:22:04Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "Bob", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}} diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail.json new file mode 100644 index 0000000000..990aad9905 --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail.json @@ -0,0 +1 @@ +{"servers": [{"status": "BUILD", "updated": "2011-10-11T00:50:04Z", "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", "user_id": "rs-reach", "name": "lc-test-2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.35"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe47:788a"}], "private": [{"version": 4, "addr": "10.182.64.34"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe60:187d"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:51:39Z", "uuid": "02786501-714e-40af-8342-9c17eccb166d", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 25, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12065, "metadata": {}}, {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}}]} \ No newline at end of file diff --git a/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json b/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json new file mode 100644 index 0000000000..2dfcfbb9dc --- /dev/null +++ b/trunk/test/compute/fixtures/openstack_v1.1/_servers_detail_EMPTY.json @@ -0,0 +1,3 @@ +{ + "servers": [] +} diff --git a/trunk/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml b/trunk/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml new file mode 100644 index 0000000000..191c7ae406 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml @@ -0,0 +1,6 @@ + + Deploy Server + SUCCESS + Server "Deploy" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml new file mode 100644 index 0000000000..4870ee1c1f --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml @@ -0,0 +1,12 @@ + + + + NA1 + US - East + Ashburn + Virginia + US + https://opsource-na1.cloud-vpn.net/ + true + + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml new file mode 100644 index 0000000000..ca27554d8e --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml @@ -0,0 +1,11 @@ + + + + 53b4c05b-341e-4ac3-b688-bdd74e53ca9b + test-net1 + test-net1 description + NA1 + 10.162.1.0 + false + + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml new file mode 100644 index 0000000000..90686b9038 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml @@ -0,0 +1,7 @@ + + + Delete Server + SUCCESS + Server "Delete" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml new file mode 100644 index 0000000000..df55852b14 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml @@ -0,0 +1,7 @@ + + + Delete Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml new file mode 100644 index 0000000000..03c0f3d94d --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml @@ -0,0 +1,7 @@ + + + Power Off Server + SUCCESS + Server "Power Off" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml new file mode 100644 index 0000000000..7b9a9d7e0f --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml @@ -0,0 +1,7 @@ + + + Power Off Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml new file mode 100644 index 0000000000..0638febd1e --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml @@ -0,0 +1,6 @@ + + Restart Server + SUCCESS + Server "Restart" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml new file mode 100644 index 0000000000..ab9e31f66b --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml @@ -0,0 +1,7 @@ + + + Restart Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml new file mode 100644 index 0000000000..b0937c88b4 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml @@ -0,0 +1,6 @@ + + Graceful Shutdown Server + SUCCESS + Server "Graceful Shutdown" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml new file mode 100644 index 0000000000..eb3cbb52a4 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml @@ -0,0 +1,7 @@ + + + Graceful Shutdown Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml new file mode 100644 index 0000000000..274e05f77d --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml @@ -0,0 +1,7 @@ + + + Start Server + SUCCESS + Server "Start" issued + REASON_0 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml new file mode 100644 index 0000000000..6d1714f261 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml @@ -0,0 +1,7 @@ + + + Start Server + ERROR + Operation in progress on Server with Id 11 + REASON_392 + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml new file mode 100644 index 0000000000..ae48208bc6 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml @@ -0,0 +1,45 @@ + + + + abadbc7e-9e10-46ca-9d4a-194bcc6b6c16 + testnode01 + this is testnode01 description + + 2 + 2048 + 10 + 20 + + UNIX + REDHAT5/64 + + + 44ed8b72-ebea-11df-bdc1-001517c46384 + 53b4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.1.1 + 10-162-1-1 + true + 2011-03-02T17:16:09.882Z + + + dbadbc8e-9e10-56ca-5d4a-155bcc5b5c15 + testnode02 + this is testnode02 description + + 4 + 4096 + 10 + 20 + + UNIX + REDHAT5/64 + + + 44ed8b72-ebea-11df-bdc1-001517c46384 + 53b4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.1.2 + 10-162-1-2 + true + 2011-03-02T17:16:10.882Z + + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml new file mode 100644 index 0000000000..f24073f5b3 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml @@ -0,0 +1,26 @@ + + + + e75ead52-692f-4314-8725-c8a4f4d13a87 + test2 + test2 node + + 1 + 2048 + 10 + 0 + + UNIX + REDHAT5/64 + + + 52ed8b72-ebea-11df-bdc1-001517c46384 + 52f4c05b-341e-4ac3-b688-bdd78e43ca9e + 10.162.151.11 + + DEPLOY_SERVER + 2011-03-20T22:32:23.000Z + copia + + + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_base_image.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_base_image.xml new file mode 100644 index 0000000000..3be14f069f --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_base_image.xml @@ -0,0 +1,339 @@ + + + + 52ed8b72-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8b72-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 1 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8dca-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8dca-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 2 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 2 + 4096 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8ed8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8ed8-ebea-11df-bdc1-001517c46384 + RedHat 5.5 64-bit 4 CPU + RedHat 5.5 Enterprise (Tikanga), 64-bit + + UNIX + REDHAT5/64 + + NA1 + 4 + 6144 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 6fc040ae-3605-11e0-bfb5-001517c46384 + /oec/base/image/6fc040ae-3605-11e0-bfb5-001517c46384 + RedHat 5.5 32-bit 1 CPU + RedHat 5.5 Enterprise (Tikanga), 32-bit + + UNIX + REDHAT5/32 + + NA1 + 1 + 2048 + 10 + 0 + 2011-02-11T17:36:19.000Z + + + 52ed92d4-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed92d4-ebea-11df-bdc1-001517c46384 + Ubuntu 8.04.4 2 CPU + Ubuntu 8.04.4 LTS, 64-bit + + UNIX + UBUNTU8/64 + + NA1 + 2 + 4096 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed876c-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed876c-ebea-11df-bdc1-001517c46384 + Win2008 Ent 64-bit R2 2 CPU + Windows 2008 Enterprise R2 64-bit + + WINDOWS + WIN2008R2E/64 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8a5a-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8a5a-ebea-11df-bdc1-001517c46384 + Win2008 Ent 64-bit R2 4 CPU + Windows 2008 Enterprise R2 64-bit + + WINDOWS + WIN2008R2E/64 + + NA1 + 4 + 8192 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed865e-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed865e-ebea-11df-bdc1-001517c46384 + Win2008 Std 64-bit R2 2 CPU + Windows 2008 Standard R2 64-bit + + WINDOWS + WIN2008R2S/64 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7b96-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7b96-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 1 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 1 + 2048 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7cb8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7cb8-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 2 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7da8-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7da8-ebea-11df-bdc1-001517c46384 + Win2008 Std 32-bit 4 CPU + Windows 2008 Standard SP2 32-bit + + WINDOWS + WIN2008S/32 + + NA1 + 4 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7ea2-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7ea2-ebea-11df-bdc1-001517c46384 + Win2008 Ent 32-bit 2 CPU + Windows 2008 Enterprise SP2 32-bit + + WINDOWS + WIN2008E/32 + + NA1 + 2 + 4096 + 50 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed8fd2-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed8fd2-ebea-11df-bdc1-001517c46384 + Red Hat 4.8 32-bit 1 CPU + Red Hat ES 4.8 (Nahant), 32-bit + + UNIX + REDHAT4/32 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed90cc-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed90cc-ebea-11df-bdc1-001517c46384 + CentOS 5.5 32-bit 1 CPU + CentOS release 5.5, 32-bit + + UNIX + CENTOS5/32 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed91da-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed91da-ebea-11df-bdc1-001517c46384 + CentOS 5.5 64-bit 1 CPU + CentOS release 5.5, 64-bit + + UNIX + CENTOS5/64 + + NA1 + 1 + 2048 + 10 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed766e-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed766e-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 1 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 1 + 2048 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7876-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7876-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 2 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 2 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7984-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7984-ebea-11df-bdc1-001517c46384 + Win2003 Ent 32-bit 4 CPU + Windows 2003 Enterprise SP2 32-bit + + WINDOWS + WIN2003E/32 + + NA1 + 4 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 52ed7a88-ebea-11df-bdc1-001517c46384 + /oec/base/image/52ed7a88-ebea-11df-bdc1-001517c46384 + Win2003 Std 64-bit 2 CPU + Windows 2003 Standard x64 SP2, 64-bit + + WINDOWS + WIN2003S/64 + + NA1 + 2 + 4096 + 16 + 0 + 1970-01-01T00:00:02.010Z + + + 0c231ef0-2a42-11e0-bfb5-001517c46384 + /oec/base/image/0c231ef0-2a42-11e0-bfb5-001517c46384 + RedHat 64-bit 2 CPU with MySQL + RedHat 5.5 Enterprise with MySQL 5.5 installed + + UNIX + REDHAT5/64 + + NA1 + 2 + 8192 + 10 + 0 + 2011-01-27T18:19:58.000Z + + + 2fb5261a-2a42-11e0-bfb5-001517c46384 + /oec/base/image/2fb5261a-2a42-11e0-bfb5-001517c46384 + RedHat 64-bit 2 CPU with PostgreSQL + RedHat 5.5 Enterprise with PostgreSQL 9.0 installed + + UNIX + REDHAT5/64 + + NA1 + 2 + 8192 + 10 + 0 + 2011-01-27T18:20:57.000Z + + diff --git a/trunk/test/compute/fixtures/opsource/oec_0_9_myaccount.xml b/trunk/test/compute/fixtures/opsource/oec_0_9_myaccount.xml new file mode 100644 index 0000000000..4f3b132879 --- /dev/null +++ b/trunk/test/compute/fixtures/opsource/oec_0_9_myaccount.xml @@ -0,0 +1,26 @@ + + + testuser + Test User + Test + User + test@example.com + 8a8f6abc-2745-4d8a-9cbc-8dabe5a7d0e4 + + + create image + + + reports + + + server + + + primary administrator + + + network + + + diff --git a/trunk/test/compute/fixtures/rimuhosting/r_distributions.json b/trunk/test/compute/fixtures/rimuhosting/r_distributions.json new file mode 100644 index 0000000000..5e29122094 --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_distributions.json @@ -0,0 +1,22 @@ +{ "get_distros_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Here are the distros we are offering on new orders." + , "response_display_duration_type" : "REGULAR" + , "distro_infos" : + [{ "distro_code" : "lenny" + , "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"} + , { "distro_code" : "centos5" + , "distro_description" : "Centos5"} + , { "distro_code" : "ubuntu904" + , "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"} + , { "distro_code" : "ubuntu804" + , "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"} + , { "distro_code" : "ubuntu810" + , "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"} + , { "distro_code" : "fedora10" + , "distro_description" : "Fedora 10"}] + } +} diff --git a/trunk/test/compute/fixtures/rimuhosting/r_orders.json b/trunk/test/compute/fixtures/rimuhosting/r_orders.json new file mode 100644 index 0000000000..0706efce29 --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_orders.json @@ -0,0 +1,27 @@ +{ "get_orders_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Found 15 orders" + , "response_display_duration_type" : "REGULAR", + "about_orders" : + [{ "order_oid" : 88833465 + , "domain_name" : "api.ivan.net.nz" + , "slug" : "order-88833465-api-ivan-net-nz" + , "billing_oid" : 96122465 + , "is_on_customers_own_physical_server" : false + , "vps_parameters" : { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "host_server_oid" : "764" + , "server_type" : "VPS" + , "data_transfer_allowance" : { "data_transfer_gb" : 30 + , "data_transfer" : "30"} + , "billing_info" : { "monthly_recurring_fee": 19.99 } + , "allocated_ips" : { "primary_ip" : "1.2.3.4" + , "secondary_ips" : ["1.2.3.5","1.2.3.6"]} + , "running_state" : "RUNNING" + }] + } +} diff --git a/trunk/test/compute/fixtures/rimuhosting/r_orders_new_vps.json b/trunk/test/compute/fixtures/rimuhosting/r_orders_new_vps.json new file mode 100644 index 0000000000..3f0ce39b8e --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_orders_new_vps.json @@ -0,0 +1,62 @@ +{ "post_new_vps_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : null + , "response_display_duration_type" : "REGULAR" + , "setup_messages" : + ["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: Ivan Meredith" + , "No VPS paramters provided, using default values."] + , "about_order" : + { "order_oid" : 52255865 + , "domain_name" : "api.ivan.net.nz" + , "slug" : "order-52255865-api-ivan-net-nz" + , "billing_oid" : 96122465 + , "is_on_customers_own_physical_server" : false + , "vps_parameters" : + { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "host_server_oid" : "764" + , "server_type" : "VPS" + , "data_transfer_allowance" : + { "data_transfer_gb" : 30 , "data_transfer" : "30"} + , "billing_info" : { "monthly_recurring_fee" : 19.99 } + , "allocated_ips" : + { "primary_ip" : "74.50.57.80", "secondary_ips" : []} + , "running_state" : "RUNNING"} + , "new_order_request" : + { "billing_oid" : 96122465 + , "user_oid" : 0 + , "host_server_oid" : null + , "vps_order_oid_to_clone" : 0 + , "ip_request" : + { "num_ips" : 1, "extra_ip_reason" : ""} + , "vps_parameters" : + { "memory_mb" : 160 + , "disk_space_mb" : 4096 + , "disk_space_2_mb" : 0} + , "pricing_plan_code" : "MIRO1B" + , "instantiation_options" : + { "control_panel" : "webmin" + , "domain_name" : "api.ivan.net.nz" + , "password" : "aruxauce27" + , "distro" : "lenny"}} + , "running_vps_info" : + { "pings_ok" : true + , "current_kernel" : "default" + , "current_kernel_canonical" : "2.6.30.5-xenU.i386" + , "last_backup_message" : "" + , "is_console_login_enabled" : false + , "console_public_authorized_keys" : null + , "is_backup_running" : false + , "is_backups_enabled" : true + , "next_backup_time" : + { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} + , "vps_uptime_s" : 31 + , "vps_cpu_time_s" : 6 + , "running_state" : "RUNNING" + , "is_suspended" : false} + } +} diff --git a/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json b/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json new file mode 100644 index 0000000000..ca2f635cff --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps.json @@ -0,0 +1,13 @@ +{ "delete_server_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Server removed" + , "response_display_duration_type" : "REGULAR" + , "cancel_messages" : + ["api.ivan.net.nz is being shut down." + , "A $7.98 credit has been added to your account." + , "If you need to un-cancel the server please contact our support team."] + } +} diff --git a/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json b/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json new file mode 100644 index 0000000000..9deaf6ca03 --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json @@ -0,0 +1,40 @@ +{ "put_running_state_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK." + , "response_display_duration_type" : "REGULAR" + , "is_restarted" : true + , "is_pinging" : true + , "running_vps_info" : + { "pings_ok" : true + , "current_kernel" : "default" + , "current_kernel_canonical" : "2.6.30.5-xenU.i386" + , "last_backup_message" : "" + , "is_console_login_enabled" : false + , "console_public_authorized_keys" : null + , "is_backup_running" : false + , "is_backups_enabled" : true + , "next_backup_time" : + { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} + , "vps_uptime_s" : 19 + , "vps_cpu_time_s" : 5 + , "running_state" : "RUNNING" + , "is_suspended" : false} + , "host_server_info" : { "is_host64_bit_capable" : true + , "default_kernel_i386" : "2.6.30.5-xenU.i386" + , "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64" + , "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz" + , "host_num_cores" : 1 + , "host_xen_version" : "3.4.1" + , "hostload" : [1.45 + , 0.56 + , 0.28] + , "host_uptime_s" : 3378276 + , "host_mem_mb_free" : 51825 + , "host_mem_mb_total" : 73719 + , "running_vpss" : 34} + , "running_state_messages" : null + } +} diff --git a/trunk/test/compute/fixtures/rimuhosting/r_pricing_plans.json b/trunk/test/compute/fixtures/rimuhosting/r_pricing_plans.json new file mode 100644 index 0000000000..c0e2962eb9 --- /dev/null +++ b/trunk/test/compute/fixtures/rimuhosting/r_pricing_plans.json @@ -0,0 +1,26 @@ +{"get_pricing_plans_response" : + { "status_message" : null + , "status_code" : 200 + , "error_info" : null + , "response_type" : "OK" + , "human_readable_message" : "Here some pricing plans we are offering on new orders.  Note we offer most disk and memory sizes.  So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit.  Pricing is in USD.  If you are an NZ-based customer then we would need to add GST." + , "response_display_duration_type" : "REGULAR" + , "pricing_plan_infos" : + [{ "pricing_plan_code" : "MiroVPSLowContention" + , "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)" + , "monthly_recurring_fee" : 32.54 + , "monthly_recurring_amt" : { + "amt" : 35.0 + , "currency" : "CUR_AUD" + , "amt_usd" : 32.54} + , "minimum_memory_mb" : 950 + , "minimum_disk_gb" : 20 + , "minimum_data_transfer_allowance_gb" : 75 + , "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention" + , "server_type" : "VPS" + , "offered_at_data_center" : + { "data_center_location_code" : "DCDALLAS" + , "data_center_location_name" : "Dallas" } + }] + } +} diff --git a/trunk/test/compute/fixtures/slicehost/flavors.xml b/trunk/test/compute/fixtures/slicehost/flavors.xml new file mode 100644 index 0000000000..552e5242dc --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/flavors.xml @@ -0,0 +1,45 @@ + + + + 1 + 256 slice + 2000 + 256 + + + 2 + 512 slice + 3800 + 512 + + + 3 + 1GB slice + 7000 + 1024 + + + 4 + 2GB slice + 13000 + 2048 + + + 5 + 4GB slice + 25000 + 4096 + + + 6 + 8GB slice + 45000 + 8192 + + + 7 + 15.5GB slice + 80000 + 15872 + + diff --git a/trunk/test/compute/fixtures/slicehost/images.xml b/trunk/test/compute/fixtures/slicehost/images.xml new file mode 100644 index 0000000000..f87b4e7301 --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/images.xml @@ -0,0 +1,47 @@ + + + + CentOS 5.2 + 2 + + + Gentoo 2008.0 + 3 + + + Debian 5.0 (lenny) + 4 + + + Fedora 10 (Cambridge) + 5 + + + CentOS 5.3 + 7 + + + Ubuntu 9.04 (jaunty) + 8 + + + Arch 2009.02 + 9 + + + Ubuntu 8.04.2 LTS (hardy) + 10 + + + Ubuntu 8.10 (intrepid) + 11 + + + Red Hat EL 5.3 + 12 + + + Fedora 11 (Leonidas) + 13 + + diff --git a/trunk/test/compute/fixtures/slicehost/slices_1_reboot.xml b/trunk/test/compute/fixtures/slicehost/slices_1_reboot.xml new file mode 100644 index 0000000000..8dac90b907 --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/slices_1_reboot.xml @@ -0,0 +1,15 @@ + + libcloud-test + 10 + +
174.143.212.229
+
10.176.164.199
+
+ 100 + 70507 + 0.0 + 0.0 + 1 + reboot + 174.143.212.229 +
diff --git a/trunk/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml b/trunk/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml new file mode 100644 index 0000000000..cb8c99b237 --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/slices_1_reboot_forbidden.xml @@ -0,0 +1,3 @@ + + Permission denied + diff --git a/trunk/test/compute/fixtures/slicehost/slices_errors.xml b/trunk/test/compute/fixtures/slicehost/slices_errors.xml new file mode 100644 index 0000000000..6555aa11d5 --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/slices_errors.xml @@ -0,0 +1,4 @@ + + + Slice parameters are not properly nested + diff --git a/trunk/test/compute/fixtures/slicehost/slices_get.xml b/trunk/test/compute/fixtures/slicehost/slices_get.xml new file mode 100644 index 0000000000..5fb51bf0e3 --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/slices_get.xml @@ -0,0 +1,17 @@ + + + libcloud-foo + 10 + +
174.143.212.229
+
10.176.164.199
+
+ 0 + 1 + 0.0 + 0.0 + 1 + build + 174.143.212.229 +
+
diff --git a/trunk/test/compute/fixtures/slicehost/slices_post.xml b/trunk/test/compute/fixtures/slicehost/slices_post.xml new file mode 100644 index 0000000000..2257f2660d --- /dev/null +++ b/trunk/test/compute/fixtures/slicehost/slices_post.xml @@ -0,0 +1,16 @@ + + slicetest + 11 + +
10.176.168.15
+
67.23.20.114
+
+ fooadfa1231 + 0 + 71907 + 0.0 + 0.0 + 1 + build + 10.176.168.15 +
diff --git a/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml b/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml new file mode 100644 index 0000000000..0452f07a64 --- /dev/null +++ b/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Account_getVirtualGuests.xml @@ -0,0 +1,1066 @@ + + + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 1832 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + eaa9aaa2-8e2e-d6e0-ce11-6f01e765779c + + + + hostname + + test1 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 2048 + + + + startCpus + + 2 + + + + createDate + + 2009-09-04T14:49:45-05:00 + + + + modifyDate + + 2010-04-22T13:08:47-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 191115 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 166980 + + + + softwareId + + 191115 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2009-09-04T14:49:51-05:00 + + + + modifyDate + + 2009-09-04T14:49:51-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 67.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 13402 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 9e9e9e99-4ed9-4645-19f3-55ee4e404d56 + + + + hostname + + test2 + + + + domain + + libcloud.org + + + + maxCpu + + 1 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 1 + + + + createDate + + 2010-03-23T17:06:13-05:00 + + + + modifyDate + + 2010-04-22T13:08:43-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 0 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 257314 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 235268 + + + + softwareId + + 257314 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-03-23T17:06:17-05:00 + + + + modifyDate + + 2010-03-23T17:06:17-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19293 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 9f99e19b-2c61-9cd5-2081-67b57fd7977b + + + + hostname + + test3 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:38:53-05:00 + + + + modifyDate + + 2010-04-22T13:08:01-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277185 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250826 + + + + softwareId + + 277185 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:38:57-05:00 + + + + modifyDate + + 2010-04-22T12:38:57-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19288 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + 999f77d9-679b-c47d-136d-04cd302384ec + + + + hostname + + test4 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:15:24-05:00 + + + + modifyDate + + 2010-04-22T13:08:31-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277171 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250815 + + + + softwareId + + 277171 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:15:26-05:00 + + + + modifyDate + + 2010-04-22T12:15:26-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + privateNetworkOnlyFlag + + 0 + + + + id + + 19284 + + + + accountId + + 11111 + + + + statusId + + 1001 + + + + uuid + + f3c73738-7731-1372-f3c3-e6808082f824 + + + + hostname + + test5 + + + + domain + + libcloud.org + + + + maxCpu + + 2 + + + + maxCpuUnits + + CORE + + + + maxMemory + + 1024 + + + + startCpus + + 2 + + + + createDate + + 2010-04-22T12:11:23-05:00 + + + + modifyDate + + 2010-04-22T13:08:31-05:00 + + + + metricPollDate + + 2010-04-22T13:08:00-05:00 + + + + dedicatedAccountHostOnlyFlag + + 1 + + + + powerState + + + + name + + Running + + + + keyName + + RUNNING + + + + + + + softwareComponents + + + + + + + id + + 277167 + + + + hardwareId + + + + + + manufacturerLicenseInstance + + + + + + passwords + + + + + + + id + + 250811 + + + + softwareId + + 277167 + + + + username + + root + + + + password + + TEST + + + + createDate + + 2010-04-22T12:11:27-05:00 + + + + modifyDate + + 2010-04-22T12:11:27-05:00 + + + + port + + + + + + + + + + + + + + + + + + primaryIpAddress + + 174.254.254.254 + + + + primaryBackendIpAddress + + 10.254.254.254 + + + + + + + + + diff --git a/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml b/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml new file mode 100644 index 0000000000..9b3799dee6 --- /dev/null +++ b/trunk/test/compute/fixtures/softlayer/v3_SoftLayer_Location_Datacenter_getDatacenters.xml @@ -0,0 +1,99 @@ + + + + + + + + + + id + + 2 + + + + name + + dal00 + + + + longName + + Corporate HQ + + + + + + + + id + + 3 + + + + name + + dal01 + + + + longName + + Dallas + + + + + + + + id + + 18171 + + + + name + + sea01 + + + + longName + + Seattle + + + + + + + + id + + 37473 + + + + name + + wdc01 + + + + longName + + Washington, DC + + + + + + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml new file mode 100644 index 0000000000..005ca534e9 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_catalogItem_5.xml @@ -0,0 +1,6 @@ + + + + + 0 + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_login.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_login.xml new file mode 100644 index 0000000000..7888f04673 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_login.xml @@ -0,0 +1,3 @@ + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_org_240.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_org_240.xml new file mode 100644 index 0000000000..e097046489 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_org_240.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_task_10496.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_task_10496.xml new file mode 100644 index 0000000000..25baec7e9b --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_task_10496.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_task_11001.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_task_11001.xml new file mode 100644 index 0000000000..a81d59ec47 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_task_11001.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml new file mode 100644 index 0000000000..35692ad4ba --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031.xml @@ -0,0 +1,132 @@ + + + + + + + 10.112.78.69 + + + + The kind of installed guest operating system + Red Hat Enterprise Linux 5 (32-bit) + + + Virtual Hardware + + + + + + + + + + + + + Virtual Hardware Family + 0 + + + + + + testerpart2 + vmx-07 + + +
+ + hertz * 10^6 + + + + + Number of Virtual CPUs + 2 virtual CPU(s) + 1 + + + + + + + + 3 + 2 + count + + + +
+ + byte * 2^20 + + + + + Memory Size + 512MB of memory + 2 + + + + + + + + 4 + 512 + byte * 2^20 + + + +
0
+ + + + + + + SCSI Controller + SCSI Controller 0 + 3 + + + + + + + lsilogic + 6 + + + +
+ +
+ 0 + + + + + + + Hard Disk 1 + 10485760 + 9 + + + + 3 + + + + 17 + 10485760 + + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml new file mode 100644 index 0000000000..4203cfd41a --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_action_deploy.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml new file mode 100644 index 0000000000..35692ad4ba --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_get.xml @@ -0,0 +1,132 @@ + + + + + + + 10.112.78.69 + + + + The kind of installed guest operating system + Red Hat Enterprise Linux 5 (32-bit) + + + Virtual Hardware + + + + + + + + + + + + + Virtual Hardware Family + 0 + + + + + + testerpart2 + vmx-07 + + +
+ + hertz * 10^6 + + + + + Number of Virtual CPUs + 2 virtual CPU(s) + 1 + + + + + + + + 3 + 2 + count + + + +
+ + byte * 2^20 + + + + + Memory Size + 512MB of memory + 2 + + + + + + + + 4 + 512 + byte * 2^20 + + + +
0
+ + + + + + + SCSI Controller + SCSI Controller 0 + 3 + + + + + + + lsilogic + 6 + + + +
+ +
+ 0 + + + + + + + Hard Disk 1 + 10485760 + 9 + + + + 3 + + + + 17 + 10485760 + + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml new file mode 100644 index 0000000000..4f0b52bac6 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_powerOn.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml new file mode 100644 index 0000000000..22dd84e9f5 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_poweroff.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml new file mode 100644 index 0000000000..35fdc8ecf6 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vapp_14031_power_action_reset.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml new file mode 100644 index 0000000000..9da42da547 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml new file mode 100644 index 0000000000..b6a216b076 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_action_instantiateVAppTemplate.xml @@ -0,0 +1,3 @@ + + + diff --git a/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml new file mode 100644 index 0000000000..286a3befe4 --- /dev/null +++ b/trunk/test/compute/fixtures/terremark/api_v0_8_vdc_224_catalog.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/trunk/test/compute/fixtures/voxel/create_node.xml b/trunk/test/compute/fixtures/voxel/create_node.xml new file mode 100644 index 0000000000..e33bdd6c44 --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/create_node.xml @@ -0,0 +1,7 @@ + + + 1234 + 1235386846 + QUEUED + + diff --git a/trunk/test/compute/fixtures/voxel/failure.xml b/trunk/test/compute/fixtures/voxel/failure.xml new file mode 100644 index 0000000000..1424e812fd --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/failure.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/compute/fixtures/voxel/images.xml b/trunk/test/compute/fixtures/voxel/images.xml new file mode 100644 index 0000000000..7d83ea6294 --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/images.xml @@ -0,0 +1,19 @@ + + + + + 32 + Linux + 2.6.18 + CentOS + 5.1 + root + + + ext3 + 880 + + Base install of CentOS 5.1 i386. + + + diff --git a/trunk/test/compute/fixtures/voxel/locations.xml b/trunk/test/compute/fixtures/voxel/locations.xml new file mode 100644 index 0000000000..a8b81b1d23 --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/locations.xml @@ -0,0 +1,10 @@ + + + + Amsterdam + + + New York + + + diff --git a/trunk/test/compute/fixtures/voxel/nodes.xml b/trunk/test/compute/fixtures/voxel/nodes.xml new file mode 100644 index 0000000000..f9a3117bab --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/nodes.xml @@ -0,0 +1,38 @@ + + + + Z100.12 + Virtual Server + Voxrox Intel Platform + + LGA7 - XO / 12th Floor + Private cage + primary + Row Z + Rack 100 + 12 + + + + 172.x.x.x + + + + + user + + + root + + +
zz.zz.us.voxel.net
+ 55555 + user +
+
+ Voxel TruManaged Server Configuration 1 +
+ ... +
+
diff --git a/trunk/test/compute/fixtures/voxel/success.xml b/trunk/test/compute/fixtures/voxel/success.xml new file mode 100644 index 0000000000..c2d0c8d48e --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/success.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/compute/fixtures/voxel/unauthorized.xml b/trunk/test/compute/fixtures/voxel/unauthorized.xml new file mode 100644 index 0000000000..89d7df0b04 --- /dev/null +++ b/trunk/test/compute/fixtures/voxel/unauthorized.xml @@ -0,0 +1,11 @@ + + + + voxel.devices.list + + 2010-02-10T23:39:25.808107+0000 + authshouldfail + ae069bb835e998622caaddaeff8c98e0 + + YOUR_SECRETtimestamp2010-02-10T23:39:25.808107+0000methodvoxel.devices.listkeyauthshouldfail + diff --git a/trunk/test/compute/test_backward_compatibility.py b/trunk/test/compute/test_backward_compatibility.py new file mode 100644 index 0000000000..b51c422e7f --- /dev/null +++ b/trunk/test/compute/test_backward_compatibility.py @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +class BackwardCompatibilityTests(unittest.TestCase): + def test_all_the_old_paths_works(self): + # Common + from libcloud.types import InvalidCredsError + from libcloud.base import Node, NodeImage, NodeSize, NodeLocation + from libcloud.types import NodeState + from libcloud.types import LibcloudError + + from libcloud.base import Response + from libcloud.base import ConnectionKey, ConnectionUserAndKey + from libcloud.base import NodeAuthPassword + + # Driver specific + from libcloud.drivers.brightbox import BrightboxNodeDriver + from libcloud.drivers.cloudsigma import CloudSigmaZrhNodeDriver + from libcloud.drivers.rimuhosting import RimuHostingNodeDriver + from libcloud.drivers.elastichosts import ElasticHostsBaseNodeDriver + from libcloud.drivers.gogrid import GoGridNodeDriver + from libcloud.common.gogrid import GoGridIpAddress + from libcloud.drivers.linode import LinodeNodeDriver + from libcloud.drivers.vpsnet import VPSNetNodeDriver + from libcloud.drivers.opennebula import OpenNebulaNodeDriver + from libcloud.drivers.ibm_sbc import IBMNodeDriver as IBM + from libcloud.drivers.rackspace import RackspaceNodeDriver as Rackspace + from libcloud.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver + from libcloud.drivers.ec2 import EC2APNENodeDriver, IdempotentParamError + from libcloud.drivers.voxel import VoxelNodeDriver as Voxel + from libcloud.drivers.vcloud import TerremarkDriver + from libcloud.drivers.vcloud import VCloudNodeDriver + from libcloud.drivers.slicehost import SlicehostNodeDriver as Slicehost + from libcloud.drivers.softlayer import SoftLayerNodeDriver as SoftLayer + from libcloud.drivers.ecp import ECPNodeDriver + + from libcloud.drivers.cloudsigma import str2dicts, str2list, dict2str + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_base.py b/trunk/test/compute/test_base.py new file mode 100644 index 0000000000..5fb222d667 --- /dev/null +++ b/trunk/test/compute/test_base.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.common.base import Response +from libcloud.common.base import ConnectionKey, ConnectionUserAndKey +from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver + +from test import MockResponse # pylint: disable-msg=E0611 + +class FakeDriver(object): + type = 0 + +class BaseTests(unittest.TestCase): + + def test_base_node(self): + Node(id=0, name=0, state=0, public_ip=0, private_ip=0, + driver=FakeDriver()) + + def test_base_node_size(self): + NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0, + driver=FakeDriver()) + + def test_base_node_image(self): + NodeImage(id=0, name=0, driver=FakeDriver()) + + def test_base_response(self): + Response(MockResponse(status=200, body='foo'), ConnectionKey('foo')) + + def test_base_node_driver(self): + NodeDriver('foo') + + def test_base_connection_key(self): + ConnectionKey('foo') + + def test_base_connection_userkey(self): + ConnectionUserAndKey('foo', 'bar') + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_bluebox.py b/trunk/test/compute/test_bluebox.py new file mode 100644 index 0000000000..5f8a5dfd92 --- /dev/null +++ b/trunk/test/compute/test_bluebox.py @@ -0,0 +1,112 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.drivers.bluebox import BlueboxNodeDriver as Bluebox +from libcloud.compute.base import Node, NodeAuthPassword +from libcloud.compute.types import NodeState + + +from test import MockHttp +from test.file_fixtures import ComputeFileFixtures +from test.secrets import BLUEBOX_PARAMS + +class BlueboxTest(unittest.TestCase): + + def setUp(self): + Bluebox.connectionCls.conn_classes = (None, BlueboxMockHttp) + self.driver = Bluebox(*BLUEBOX_PARAMS) + + def test_create_node(self): + node = self.driver.create_node( + name='foo', + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + auth=NodeAuthPassword("test123") + ) + self.assertTrue(isinstance(node, Node)) + self.assertEqual(node.state, NodeState.PENDING) + self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + self.assertEqual(node.name, 'foo.apitest.blueboxgrid.com') + self.assertEqual(node.state, NodeState.RUNNING) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 4) + + ids = [s.id for s in sizes] + + for size in sizes: + self.assertTrue(size.price > 0) + + self.assertTrue('94fd37a7-2606-47f7-84d5-9000deda52ae' in ids) + self.assertTrue('b412f354-5056-4bf0-a42f-6ddd998aa092' in ids) + self.assertTrue('0cd183d3-0287-4b1a-8288-b3ea8302ed58' in ids) + self.assertTrue('b9b87a5b-2885-4a2e-b434-44a163ca6251' in ids) + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(len(images), 10) + self.assertEqual(image.name, 'CentOS 5 (Latest Release)') + self.assertEqual(image.id, 'c66b8145-f768-45ef-9878-395bf8b1b7ff') + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + +class BlueboxMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('bluebox') + + def _api_blocks_json(self, method, url, body, headers): + if method == "POST": + body = self.fixtures.load('api_blocks_json_post.json') + else: + body = self.fixtures.load('api_blocks_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_block_products_json(self, method, url, body, headers): + body = self.fixtures.load('api_block_products_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_block_templates_json(self, method, url, body, headers): + body = self.fixtures.load('api_block_templates_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json_delete.json') + else: + body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json(self, method, url, body, headers): + body = self.fixtures.load('api_blocks_99df878c_6e5c_4945_a635_d94da9fd3146_reboot_json.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_brightbox.py b/trunk/test/compute/test_brightbox.py new file mode 100644 index 0000000000..12b7fe36df --- /dev/null +++ b/trunk/test/compute/test_brightbox.py @@ -0,0 +1,132 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.brightbox import BrightboxNodeDriver +from libcloud.compute.types import NodeState + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures +from test.secrets import BRIGHTBOX_PARAMS + + +class BrightboxTest(unittest.TestCase, TestCaseMixin): + def setUp(self): + BrightboxNodeDriver.connectionCls.conn_classes = (None, BrightboxMockHttp) + BrightboxMockHttp.type = None + self.driver = BrightboxNodeDriver(*BRIGHTBOX_PARAMS) + + def test_authentication(self): + BrightboxMockHttp.type = 'INVALID_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + BrightboxMockHttp.type = 'UNAUTHORIZED_CLIENT' + self.assertRaises(InvalidCredsError, self.driver.list_nodes) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + self.assertTrue('109.107.42.129' in nodes[0].public_ip) + self.assertTrue('10.110.24.54' in nodes[0].private_ip) + self.assertEqual(nodes[0].state, NodeState.RUNNING) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 1) + self.assertEqual(sizes[0].id, 'typ-4nssg') + self.assertEqual(sizes[0].name, 'Brightbox Nano Instance') + self.assertEqual(sizes[0].ram, 512) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 1) + self.assertEqual(images[0].id, 'img-9vxqi') + self.assertEqual(images[0].name, 'Brightbox Lucid 32') + self.assertEqual(images[0].extra['arch'], '32-bit') + + def test_reboot_node_response(self): + node = self.driver.list_nodes()[0] + self.assertRaises(NotImplementedError, self.driver.reboot_node, [node]) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_create_node(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node(name='Test Node', image=image, size=size) + self.assertEqual('srv-3a97e', node.id) + self.assertEqual('Test Node', node.name) + + +class BrightboxMockHttp(MockHttp): + fixtures = ComputeFileFixtures('brightbox') + + def _token(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.OK, self.fixtures.load('token.json')) + + def _token_INVALID_CLIENT(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.BAD_REQUEST, '{"error":"invalid_client"}') + + def _token_UNAUTHORIZED_CLIENT(self, method, url, body, headers): + if method == 'POST': + return self.response(httplib.UNAUTHORIZED, '{"error":"unauthorized_client"}') + + def _1_0_images(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_images.json')) + + def _1_0_servers(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_servers.json')) + elif method == 'POST': + body = json.loads(body) + + node = json.loads(self.fixtures.load('create_server.json')) + + node['name'] = body['name'] + + return self.response(httplib.ACCEPTED, json.dumps(node)) + + def _1_0_servers_srv_3a97e(self, method, url, body, headers): + if method == 'DELETE': + return self.response(httplib.ACCEPTED, '') + + def _1_0_server_types(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_server_types.json')) + + def _1_0_zones(self, method, url, body, headers): + if method == 'GET': + return self.response(httplib.OK, self.fixtures.load('list_zones.json')) + + def response(self, status, body): + return (status, body, {'content-type': 'application/json'}, httplib.responses[status]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_cloudsigma.py b/trunk/test/compute/test_cloudsigma.py new file mode 100644 index 0000000000..f825d10d56 --- /dev/null +++ b/trunk/test/compute/test_cloudsigma.py @@ -0,0 +1,204 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import httplib + +from libcloud.compute.base import Node +from libcloud.compute.drivers.cloudsigma import CloudSigmaZrhNodeDriver +from libcloud.utils import str2dicts, str2list, dict2str + +from test import MockHttp # pylint: disable-msg=E0611 +from test.compute import TestCaseMixin # pylint: disable-msg=E0611 +from test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 + + +class CloudSigmaTestCase(unittest.TestCase, TestCaseMixin): + def setUp(self): + CloudSigmaZrhNodeDriver.connectionCls.conn_classes = (None, + CloudSigmaHttp) + self.driver = CloudSigmaZrhNodeDriver('foo', 'bar') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + self.assertEqual(len(nodes), 1) + + node = nodes[0] + self.assertEqual(node.public_ip[0], "1.2.3.4") + self.assertEqual(node.extra['smp'], 1) + self.assertEqual(node.extra['cpu'], 1100) + self.assertEqual(node.extra['mem'], 640) + + def test_list_sizes(self): + images = self.driver.list_sizes() + self.assertEqual(len(images), 9) + + def test_list_images(self): + sizes = self.driver.list_images() + self.assertEqual(len(sizes), 10) + + def test_list_locations_response(self): + pass + + def test_start_node(self): + nodes = self.driver.list_nodes() + node = nodes[0] + self.assertTrue(self.driver.ex_start_node(node)) + + def test_shutdown_node(self): + nodes = self.driver.list_nodes() + node = nodes[0] + self.assertTrue(self.driver.ex_stop_node(node)) + self.assertTrue(self.driver.ex_shutdown_node(node)) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + self.driver.list_nodes() + + def test_create_node(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node( + name="cloudsigma node", image=image, size = size) + self.assertTrue(isinstance(node, Node)) + + def test_ex_static_ip_list(self): + ips = self.driver.ex_static_ip_list() + self.assertEqual(len(ips), 3) + + def test_ex_static_ip_create(self): + result = self.driver.ex_static_ip_create() + self.assertEqual(len(result), 2) + self.assertEqual(len(result[0].keys()), 6) + self.assertEqual(len(result[1].keys()), 6) + + def test_ex_static_ip_destroy(self): + result = self.driver.ex_static_ip_destroy('1.2.3.4') + self.assertTrue(result) + + def test_ex_drives_list(self): + result = self.driver.ex_drives_list() + self.assertEqual(len(result), 2) + + def test_ex_drive_destroy(self): + result = self.driver.ex_drive_destroy( + # @@TR: this should be soft-coded: + 'd18119ce_7afa_474a_9242_e0384b160220') + self.assertTrue(result) + + def test_ex_set_node_configuration(self): + node = self.driver.list_nodes()[0] + result = self.driver.ex_set_node_configuration(node, **{'smp': 2}) + self.assertTrue(result) + + def test_str2dicts(self): + string = 'mem 1024\ncpu 2200\n\nmem2048\cpu 1100' + result = str2dicts(string) + self.assertEqual(len(result), 2) + + def test_str2list(self): + string = 'ip 1.2.3.4\nip 1.2.3.5\nip 1.2.3.6' + result = str2list(string) + self.assertEqual(len(result), 3) + self.assertEqual(result[0], '1.2.3.4') + self.assertEqual(result[1], '1.2.3.5') + self.assertEqual(result[2], '1.2.3.6') + + def test_dict2str(self): + d = {'smp': 5, 'cpu': 2200, 'mem': 1024} + result = dict2str(d) + self.assertTrue(len(result) > 0) + self.assertTrue(result.find('smp 5') >= 0) + self.assertTrue(result.find('cpu 2200') >= 0) + self.assertTrue(result.find('mem 1024') >= 0) + +class CloudSigmaHttp(MockHttp): + fixtures = ComputeFileFixtures('cloudsigma') + + def _drives_standard_info(self, method, url, body, headers): + body = self.fixtures.load('drives_standard_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_start( + self, method, url, body, headers): + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_stop( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_destroy( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, + body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_d18119ce_7afa_474a_9242_e0384b160220_clone( + self, method, url, body, headers): + + body = self.fixtures.load('drives_clone.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_a814def5_1789_49a0_bf88_7abe7bb1682a_info( + self, method, url, body, headers): + + body = self.fixtures.load('drives_single_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_info(self, method, url, body, headers): + body = self.fixtures.load('drives_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_create(self, method, url, body, headers): + body = self.fixtures.load('servers_create.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_info(self, method, url, body, headers): + body = self.fixtures.load('servers_info.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_list(self, method, url, body, headers): + body = self.fixtures.load('resources_ip_list.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_create(self, method, url, body, headers): + body = self.fixtures.load('resources_ip_create.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _resources_ip_1_2_3_4_destroy(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _drives_d18119ce_7afa_474a_9242_e0384b160220_destroy( + self, method, url, body, headers): + + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.OK]) + + def _servers_62fe7cde_4fb9_4c63_bd8c_e757930066a0_set( + self, method, url, body, headers): + + body = self.fixtures.load('servers_set.txt') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_cloudstack.py b/trunk/test/compute/test_cloudstack.py new file mode 100644 index 0000000000..77f454ae27 --- /dev/null +++ b/trunk/test/compute/test_cloudstack.py @@ -0,0 +1,94 @@ +import httplib +import sys +import unittest +import urlparse + +try: + import simplejson as json +except ImportError: + import json + +try: + parse_qsl = urlparse.parse_qsl +except AttributeError: + import cgi + parse_qsl = cgi.parse_qsl + +from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver +from libcloud.compute.types import DeploymentError + +from test import MockHttpTestCase +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +class CloudStackNodeDriverTest(unittest.TestCase, TestCaseMixin): + def setUp(self): + CloudStackNodeDriver.connectionCls.conn_classes = \ + (None, CloudStackMockHttp) + self.driver = CloudStackNodeDriver('apikey', 'secret') + self.driver.path = '/test/path' + self.driver.type = -1 + CloudStackMockHttp.fixture_tag = 'default' + + def test_create_node_immediate_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + CloudStackMockHttp.fixture_tag = 'deployfail' + try: + node = self.driver.create_node(name='node-name', + image=image, + size=size) + except: + return + self.assertTrue(False) + + def test_create_node_delayed_failure(self): + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + CloudStackMockHttp.fixture_tag = 'deployfail2' + try: + node = self.driver.create_node(name='node-name', + image=image, + size=size) + except: + return + self.assertTrue(False) + +class CloudStackMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('cloudstack') + fixture_tag = 'default' + + def _load_fixture(self, fixture): + body = self.fixtures.load(fixture) + return body, json.loads(body) + + def _test_path(self, method, url, body, headers): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + del query['apiKey'] + del query['response'] + del query['signature'] + command = query.pop('command') + + if hasattr(self, '_cmd_' + command): + return getattr(self, '_cmd_' + command)(**query) + else: + fixture = command + '_' + self.fixture_tag + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + + def _cmd_queryAsyncJobResult(self, jobid): + fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_deployment.py b/trunk/test/compute/test_deployment.py new file mode 100644 index 0000000000..85595b08ed --- /dev/null +++ b/trunk/test/compute/test_deployment.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import time +import unittest +import httplib + +from libcloud.compute.deployment import MultiStepDeployment, Deployment +from libcloud.compute.deployment import SSHKeyDeployment, ScriptDeployment +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState, DeploymentError, LibcloudError +from libcloud.compute.ssh import BaseSSHClient +from libcloud.compute.drivers.rackspace import RackspaceNodeDriver as Rackspace + +from test import MockHttp, XML_HEADERS +from test.file_fixtures import ComputeFileFixtures, OpenStackFixtures +from mock import Mock, patch + +from test.secrets import RACKSPACE_PARAMS + +class MockDeployment(Deployment): + def run(self, node, client): + return node + +class MockClient(BaseSSHClient): + def __init__(self, *args, **kwargs): + self.stdout = '' + self.stderr = '' + self.exit_status = 0 + + def put(self, path, contents, chmod=755): + return contents + + def run(self, name): + return self.stdout, self.stderr, self.exit_status + + def delete(self, name): + return True + +class DeploymentTests(unittest.TestCase): + + def setUp(self): + Rackspace.connectionCls.conn_classes = (None, RackspaceMockHttp) + RackspaceMockHttp.type = None + self.driver = Rackspace(*RACKSPACE_PARAMS) + self.driver.features = {'create_node': ['generates_password']} + self.node = Node(id=12345, name='test', state=NodeState.RUNNING, + public_ip=['1.2.3.4'], private_ip='1.2.3.5', + driver=Rackspace) + + def test_multi_step_deployment(self): + msd = MultiStepDeployment() + self.assertEqual(len(msd.steps), 0) + + msd.add(MockDeployment()) + self.assertEqual(len(msd.steps), 1) + + self.assertEqual(self.node, msd.run(node=self.node, client=None)) + + def test_ssh_key_deployment(self): + sshd = SSHKeyDeployment(key='1234') + + self.assertEqual(self.node, sshd.run(node=self.node, + client=MockClient(hostname='localhost'))) + + def test_script_deployment(self): + sd1 = ScriptDeployment(script='foobar', delete=True) + sd2 = ScriptDeployment(script='foobar', delete=False) + sd3 = ScriptDeployment(script='foobar', delete=False, name='foobarname') + + self.assertTrue(sd1.name.find('deployment') != '1') + self.assertEqual(sd3.name, 'foobarname') + + self.assertEqual(self.node, sd1.run(node=self.node, + client=MockClient(hostname='localhost'))) + self.assertEqual(self.node, sd2.run(node=self.node, + client=MockClient(hostname='localhost'))) + + def test_script_deployment_and_sshkey_deployment_argument_types(self): + class FileObject(object): + def __init__(self, name): + self.name = name + + def read(self): + return 'bar' + + ScriptDeployment(script='foobar') + ScriptDeployment(script=unicode('foobar')) + ScriptDeployment(script=FileObject('test')) + + SSHKeyDeployment(key='foobar') + SSHKeyDeployment(key=unicode('foobar')) + SSHKeyDeployment(key=FileObject('test')) + + try: + ScriptDeployment(script=[]) + except TypeError: + pass + else: + self.fail('TypeError was not thrown') + + try: + SSHKeyDeployment(key={}) + except TypeError: + pass + else: + self.fail('TypeError was not thrown') + + def test_wait_until_running_running_instantly(self): + node2 = self.driver._wait_until_running(node=self.node, wait_period=1, + timeout=10) + self.assertEqual(self.node.uuid, node2.uuid) + + def test_wait_until_running_running_after_1_second(self): + RackspaceMockHttp.type = '1_SECOND_DELAY' + node2 = self.driver._wait_until_running(node=self.node, wait_period=1, + timeout=10) + self.assertEqual(self.node.uuid, node2.uuid) + + def test_wait_until_running_timeout(self): + RackspaceMockHttp.type = 'TIMEOUT' + + try: + self.driver._wait_until_running(node=self.node, wait_period=0.5, + timeout=1) + except LibcloudError, e: + self.assertTrue(e.value.find('Timed out') != -1) + else: + self.fail('Exception was not thrown') + + + def test_wait_until_running_running_node_missing_from_list_nodes(self): + RackspaceMockHttp.type = 'MISSING' + + try: + self.driver._wait_until_running(node=self.node, wait_period=0.5, + timeout=1) + except LibcloudError, e: + self.assertTrue(e.value.find('is missing from list_nodes') != -1) + else: + self.fail('Exception was not thrown') + + def test_wait_until_running_running_multiple_nodes_have_same_uuid(self): + RackspaceMockHttp.type = 'SAME_UUID' + + try: + self.driver._wait_until_running(node=self.node, wait_period=0.5, + timeout=1) + except LibcloudError, e: + self.assertTrue(e.value.find('multiple nodes have same UUID') != -1) + else: + self.fail('Exception was not thrown') + + + def test_ssh_client_connect_success(self): + mock_ssh_client = Mock() + mock_ssh_client.return_value = None + + ssh_client = self.driver._ssh_client_connect(ssh_client=mock_ssh_client, + timeout=10) + self.assertEqual(mock_ssh_client, ssh_client) + + def test_ssh_client_connect_timeout(self): + mock_ssh_client = Mock() + mock_ssh_client.connect = Mock() + mock_ssh_client.connect.side_effect = IOError('bam') + + try: + self.driver._ssh_client_connect(ssh_client=mock_ssh_client, + timeout=1) + except LibcloudError, e: + self.assertTrue(e.value.find('Giving up') != -1) + else: + self.fail('Exception was not thrown') + + def test_run_deployment_script_success(self): + task = Mock() + ssh_client = Mock() + + ssh_client2 = self.driver._run_deployment_script(task=task, + node=self.node, + ssh_client=ssh_client, + max_tries=2) + self.assertTrue(isinstance(ssh_client2, Mock)) + + def test_run_deployment_script_exception(self): + task = Mock() + task.run = Mock() + task.run.side_effect = Exception('bar') + ssh_client = Mock() + + try: + self.driver._run_deployment_script(task=task, + node=self.node, + ssh_client=ssh_client, + max_tries=2) + except LibcloudError, e: + self.assertTrue(e.value.find('Failed after 2 tries') != -1) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_success(self, mock_ssh_module, _): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + mock_ssh_module.have_paramiko = True + + deploy = Mock() + + node = self.driver.deploy_node(deploy=deploy) + self.assertEqual(self.node.id, node.id) + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_exception_run_deployment_script(self, mock_ssh_module, + _): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + mock_ssh_module.have_paramiko = True + + deploy = Mock() + deploy.run = Mock() + deploy.run.side_effect = Exception('foo') + + try: + self.driver.deploy_node(deploy=deploy) + except DeploymentError, e: + self.assertTrue(e.node.id, self.node.id) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_exception_ssh_client_connect(self, mock_ssh_module, + ssh_client): + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + mock_ssh_module.have_paramiko = True + + deploy = Mock() + ssh_client.side_effect = IOError('bar') + + try: + self.driver.deploy_node(deploy=deploy) + except DeploymentError, e: + self.assertTrue(e.node.id, self.node.id) + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.ssh') + def test_deploy_node_depoy_node_not_implemented(self, mock_ssh_module): + self.driver.features = {'create_node': []} + mock_ssh_module.have_paramiko = True + + try: + self.driver.deploy_node(deploy=Mock()) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + self.driver.features = {} + + try: + self.driver.deploy_node(deploy=Mock()) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_deploy_node_password_auth(self, mock_ssh_module, _): + self.driver.features = {'create_node': ['password']} + mock_ssh_module.have_paramiko = True + + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + node = self.driver.deploy_node(deploy=Mock()) + self.assertEqual(self.node.id, node.id) + + @patch('libcloud.compute.base.SSHClient') + @patch('libcloud.compute.ssh') + def test_exception_is_thrown_is_paramiko_is_not_available(self, + mock_ssh_module, + _): + self.driver.features = {'create_node': ['password']} + self.driver.create_node = Mock() + self.driver.create_node.return_value = self.node + + mock_ssh_module.have_paramiko = False + + try: + self.driver.deploy_node(deploy=Mock()) + except RuntimeError, e: + self.assertTrue(str(e).find('paramiko is not installed') != -1) + else: + self.fail('Exception was not thrown') + + mock_ssh_module.have_paramiko = True + node = self.driver.deploy_node(deploy=Mock()) + self.assertEqual(self.node.id, node.id) + +class RackspaceMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('openstack') + auth_fixtures = OpenStackFixtures() + + def _v1_1__auth(self, method, url, body, headers): + body = self.auth_fixtures.load('_v1_1__auth.json') + return (httplib.OK, body, {'content-type': 'application/json; charset=UTF-8'}, httplib.responses[httplib.OK]) + + # fake auth token response + def _v1_0(self, method, url, body, headers): + headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_deployment_success.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_1_SECOND_DELAY(self, method, url, body, headers): + time.sleep(1) + body = self.fixtures.load('v1_slug_servers_detail_deployment_success.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_TIMEOUT(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_deployment_pending.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_MISSING(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_deployment_missing.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_SAME_UUID(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_deployment_same_uuid.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_dreamhost.py b/trunk/test/compute/test_dreamhost.py new file mode 100644 index 0000000000..7da272393b --- /dev/null +++ b/trunk/test/compute/test_dreamhost.py @@ -0,0 +1,279 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.dreamhost import DreamhostNodeDriver +from libcloud.compute.types import NodeState + +from test import MockHttp +from test.compute import TestCaseMixin +from test.secrets import DREAMHOST_PARAMS + +class DreamhostTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + DreamhostNodeDriver.connectionCls.conn_classes = ( + None, + DreamhostMockHttp + ) + DreamhostMockHttp.type = None + DreamhostMockHttp.use_param = 'cmd' + self.driver = DreamhostNodeDriver(*DREAMHOST_PARAMS) + + def test_invalid_creds(self): + """ + Tests the error-handling for passing a bad API Key to the DreamHost API + """ + DreamhostMockHttp.type = 'BAD_AUTH' + try: + self.driver.list_nodes() + self.assertTrue(False) # Above command should have thrown an InvalidCredsException + except InvalidCredsError: + self.assertTrue(True) + + + def test_list_nodes(self): + """ + Test list_nodes for DreamHost PS driver. Should return a list of two nodes: + - account_id: 000000 + ip: 75.119.203.51 + memory_mb: 500 + ps: ps22174 + start_date: 2010-02-25 + type: web + - account_id: 000000 + ip: 75.119.203.52 + memory_mb: 1500 + ps: ps22175 + start_date: 2010-02-25 + type: mysql + """ + + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + web_node = nodes[0] + mysql_node = nodes[1] + + # Web node tests + self.assertEqual(web_node.id, 'ps22174') + self.assertEqual(web_node.state, NodeState.UNKNOWN) + self.assertTrue('75.119.203.51' in web_node.public_ip) + self.assertTrue( + web_node.extra.has_key('current_size') and + web_node.extra['current_size'] == 500 + ) + self.assertTrue( + web_node.extra.has_key('account_id') and + web_node.extra['account_id'] == 000000 + ) + self.assertTrue( + web_node.extra.has_key('type') and + web_node.extra['type'] == 'web' + ) + # MySql node tests + self.assertEqual(mysql_node.id, 'ps22175') + self.assertEqual(mysql_node.state, NodeState.UNKNOWN) + self.assertTrue('75.119.203.52' in mysql_node.public_ip) + self.assertTrue( + mysql_node.extra.has_key('current_size') and + mysql_node.extra['current_size'] == 1500 + ) + self.assertTrue( + mysql_node.extra.has_key('account_id') and + mysql_node.extra['account_id'] == 000000 + ) + self.assertTrue( + mysql_node.extra.has_key('type') and + mysql_node.extra['type'] == 'mysql' + ) + + def test_create_node(self): + """ + Test create_node for DreamHost PS driver. + This is not remarkably compatible with libcloud. The DH API allows + users to specify what image they want to create and whether to move + all their data to the (web) PS. It does NOT accept a name, size, or + location. The only information it returns is the PS's context id + Once the PS is ready it will appear in the list generated by list_ps. + """ + new_node = self.driver.create_node( + image=self.driver.list_images()[0], + size=self.driver.list_sizes()[0], + movedata='no', + ) + self.assertEqual(new_node.id, 'ps12345') + self.assertEqual(new_node.state, NodeState.PENDING) + self.assertTrue( + new_node.extra.has_key('type') and + new_node.extra['type'] == 'web' + ) + + def test_destroy_node(self): + """ + Test destroy_node for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_destroy_node_failure(self): + """ + Test destroy_node failure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver.destroy_node(node)) + + def test_reboot_node(self): + """ + Test reboot_node for DreamHost PS driver. + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_reboot_node_failure(self): + """ + Test reboot_node failure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver.reboot_node(node)) + + def test_resize_node(self): + """ + Test resize_node for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver._resize_node(node, 400)) + + def test_resize_node_failure(self): + """ + Test reboot_node faliure for DreamHost PS driver + """ + node = self.driver.list_nodes()[0] + + DreamhostMockHttp.type = 'API_FAILURE' + self.assertFalse(self.driver._resize_node(node, 400)) + + def test_list_images(self): + """ + Test list_images for DreamHost PS driver. + """ + images = self.driver.list_images() + self.assertEqual(len(images), 2) + self.assertEqual(images[0].id, 'web') + self.assertEqual(images[0].name, 'web') + self.assertEqual(images[1].id, 'mysql') + self.assertEqual(images[1].name, 'mysql') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 5) + + self.assertEqual(sizes[0].id, 'default') + self.assertEqual(sizes[0].bandwidth, None) + self.assertEqual(sizes[0].disk, None) + self.assertEqual(sizes[0].ram, 2300) + self.assertEqual(sizes[0].price, 115) + + def test_list_locations(self): + try: + self.driver.list_locations() + except NotImplementedError: + pass + + def test_list_locations_response(self): + self.assertRaises(NotImplementedError, self.driver.list_locations) + +class DreamhostMockHttp(MockHttp): + + def _BAD_AUTH_dreamhost_ps_list_ps(self, method, url, body, headers): + body = json.dumps({'data' : 'invalid_api_key', 'result' : 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_add_ps(self, method, url, body, headers): + body = json.dumps({'data' : {'added_web' : 'ps12345'}, 'result' : 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_list_ps(self, method, url, body, headers): + data = [{ + 'account_id' : 000000, + 'ip': '75.119.203.51', + 'memory_mb' : 500, + 'ps' : 'ps22174', + 'start_date' : '2010-02-25', + 'type' : 'web' + }, + { + 'account_id' : 000000, + 'ip' : '75.119.203.52', + 'memory_mb' : 1500, + 'ps' : 'ps22175', + 'start_date' : '2010-02-25', + 'type' : 'mysql' + }] + result = 'success' + body = json.dumps({'data' : data, 'result' : result}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_list_images(self, method, url, body, headers): + data = [{ + 'description' : 'Private web server', + 'image' : 'web' + }, + { + 'description' : 'Private MySQL server', + 'image' : 'mysql' + }] + result = 'success' + body = json.dumps({'data' : data, 'result' : result}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_reboot(self, method, url, body, headers): + body = json.dumps({'data' : 'reboot_scheduled', 'result' : 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_reboot(self, method, url, body, headers): + body = json.dumps({'data' : 'no_such_ps', 'result' : 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_set_size(self, method, url, body, headers): + body = json.dumps({'data' : {'memory-mb' : '500'}, 'result' : 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_set_size(self, method, url, body, headers): + body = json.dumps({'data' : 'internal_error_setting_size', 'result' : 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dreamhost_ps_remove_ps(self, method, url, body, headers): + body = json.dumps({'data' : 'removed_web', 'result' : 'success'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _API_FAILURE_dreamhost_ps_remove_ps(self, method, url, body, headers): + body = json.dumps({'data' : 'no_such_ps', 'result' : 'error'}) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) + diff --git a/trunk/test/compute/test_ec2.py b/trunk/test/compute/test_ec2.py new file mode 100644 index 0000000000..aaeda99b19 --- /dev/null +++ b/trunk/test/compute/test_ec2.py @@ -0,0 +1,445 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver +from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver +from libcloud.compute.drivers.ec2 import EC2APNENodeDriver +from libcloud.compute.drivers.ec2 import IdempotentParamError +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + +from test import MockHttp, LibcloudTestCase +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import EC2_PARAMS + + +class EC2Tests(LibcloudTestCase, TestCaseMixin): + image_name = 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml' + + def setUp(self): + EC2MockHttp.test = self + EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = EC2NodeDriver(*EC2_PARAMS) + + def test_create_node(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', image=image, size=size) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.name, 'foo') + self.assertEqual(node.extra['tags']['Name'], 'foo') + self.assertEqual(len(node.extra['tags']), 1) + + def test_create_node_idempotent(self): + EC2MockHttp.type = 'idempotent' + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + token = 'testclienttoken' + node = self.driver.create_node(name='foo', image=image, size=size, + ex_clienttoken=token) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.extra['clienttoken'], token) + + # from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html + + # If you repeat the request with the same client token, but change + # another request parameter, Amazon EC2 returns an + # IdempotentParameterMismatch error. + # In our case, changing the parameter doesn't actually matter since we + # are forcing the error response fixture. + EC2MockHttp.type = 'idempotent_mismatch' + + idem_error = None + # different count + try: + self.driver.create_node(name='foo', image=image, size=size, + ex_mincount='2', ex_maxcount='2', + ex_clienttoken=token) + except IdempotentParamError, e: + idem_error = e + self.assertTrue(idem_error is not None) + + def test_create_node_no_availability_zone(self): + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='foo', image=image, size=size) + location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver) + self.assertEqual(node.id, 'i-2ba64342') + node = self.driver.create_node(name='foo', image=image, size=size, + location=location) + self.assertEqual(node.id, 'i-2ba64342') + self.assertEqual(node.name, 'foo') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + public_ips = sorted(node.public_ip) + self.assertEqual(node.id, 'i-4382922a') + self.assertEqual(node.name, node.id) + self.assertEqual(len(node.public_ip), 2) + self.assertEqual(node.extra['launchdatetime'], '2009-08-07T05:47:04.000Z') + self.assertTrue('instancetype' in node.extra) + + self.assertEqual(public_ips[0], '1.2.3.4') + self.assertEqual(public_ips[1], '1.2.3.5') + + def test_list_nodes_with_name_tag(self): + EC2MockHttp.type = 'WITH_TAGS' + node = self.driver.list_nodes()[0] + self.assertEqual(node.id, 'i-8474834a') + self.assertEqual(node.name, 'foobar1') + + def test_list_location(self): + locations = self.driver.list_locations() + self.assertTrue(len(locations) > 0) + self.assertTrue(locations[0].availability_zone != None) + + def test_reboot_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_sizes(self): + region_old = self.driver.region_name + + names = [('ec2_us_east', 'us-east-1'), + ('ec2_us_west', 'us-west-1'), + ('ec2_eu_west', 'eu-west-1'), + ('ec2_ap_southeast', 'ap-southeast-1'), + ('ec2_ap_northeast', 'ap-northeast-1') + ] + for api_name, region_name in names: + self.driver.api_name = api_name + self.driver.region_name = region_name + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('t1.micro' in ids) + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + self.assertTrue('c1.medium' in ids) + self.assertTrue('c1.xlarge' in ids) + self.assertTrue('m2.xlarge' in ids) + self.assertTrue('m2.2xlarge' in ids) + self.assertTrue('m2.4xlarge' in ids) + + if region_name == 'us-east-1': + self.assertEqual(len(sizes), 11) + self.assertTrue('cg1.4xlarge' in ids) + self.assertTrue('cc1.4xlarge' in ids) + else: + self.assertEqual(len(sizes), 9) + + self.driver.region_name = region_old + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(len(images), 1) + self.assertEqual(image.name, 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml') + self.assertEqual(image.id, 'ami-be3adfd7') + + def test_ex_list_availability_zones(self): + availability_zones = self.driver.ex_list_availability_zones() + availability_zone = availability_zones[0] + self.assertTrue(len(availability_zones) > 0) + self.assertEqual(availability_zone.name, 'eu-west-1a') + self.assertEqual(availability_zone.zone_state, 'available') + self.assertEqual(availability_zone.region_name, 'eu-west-1') + + def test_ex_describe_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + tags = self.driver.ex_describe_tags(node) + + self.assertEqual(len(tags), 3) + self.assertTrue('tag' in tags) + self.assertTrue('owner' in tags) + self.assertTrue('stack' in tags) + + def test_ex_create_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + self.driver.ex_create_tags(node, {'sample': 'tag'}) + + def test_ex_delete_tags(self): + node = Node('i-4382922a', None, None, None, None, self.driver) + self.driver.ex_delete_tags(node, {'sample': 'tag'}) + + def test_ex_describe_addresses_for_node(self): + node1 = Node('i-4382922a', None, None, None, None, self.driver) + ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1) + node2 = Node('i-4382922b', None, None, None, None, self.driver) + ip_addresses2 = sorted(self.driver.ex_describe_addresses_for_node(node2)) + node3 = Node('i-4382922g', None, None, None, None, self.driver) + ip_addresses3 = sorted(self.driver.ex_describe_addresses_for_node(node3)) + + self.assertEqual(len(ip_addresses1), 1) + self.assertEqual(ip_addresses1[0], '1.2.3.4') + + self.assertEqual(len(ip_addresses2), 2) + self.assertEqual(ip_addresses2[0], '1.2.3.5') + self.assertEqual(ip_addresses2[1], '1.2.3.6') + + self.assertEqual(len(ip_addresses3), 0) + + def test_ex_describe_addresses(self): + node1 = Node('i-4382922a', None, None, None, None, self.driver) + node2 = Node('i-4382922g', None, None, None, None, self.driver) + nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1]) + nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2]) + + self.assertEqual(len(nodes_elastic_ips1), 1) + self.assertTrue(node1.id in nodes_elastic_ips1) + self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4']) + + self.assertEqual(len(nodes_elastic_ips2), 1) + self.assertTrue(node2.id in nodes_elastic_ips2) + self.assertEqual(nodes_elastic_ips2[node2.id], []) + + def test_ex_change_node_size_same_size(self): + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver) + node = Node('i-4382922a', None, None, None, None, self.driver, + extra={'instancetype': 'm1.small'}) + + try: + self.driver.ex_change_node_size(node=node, new_size=size) + except ValueError: + pass + else: + self.fail('Same size was passed, but an exception was not thrown') + + def test_ex_change_node_size(self): + size = NodeSize('m1.large', 'Small Instance', None, None, None, None, driver=self.driver) + node = Node('i-4382922a', None, None, None, None, self.driver, + extra={'instancetype': 'm1.small'}) + + result = self.driver.ex_change_node_size(node=node, new_size=size) + self.assertTrue(result) + + +class EC2MockHttp(MockHttp): + + fixtures = ComputeFileFixtures('ec2') + + def _DescribeInstances(self, method, url, body, headers): + body = self.fixtures.load('describe_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _WITH_TAGS_DescribeInstances(self, method, url, body, headers): + body = self.fixtures.load('describe_instances_with_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeAvailabilityZones(self, method, url, body, headers): + body = self.fixtures.load('describe_availability_zones.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RebootInstances(self, method, url, body, headers): + body = self.fixtures.load('reboot_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeImages(self, method, url, body, headers): + body = self.fixtures.load('describe_images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances_idem.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_mismatch_RunInstances(self, method, url, body, headers): + body = self.fixtures.load('run_instances_idem_mismatch.xml') + return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST]) + + def _TerminateInstances(self, method, url, body, headers): + body = self.fixtures.load('terminate_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeTags(self, method, url, body, headers): + body = self.fixtures.load('describe_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CreateTags(self, method, url, body, headers): + body = self.fixtures.load('create_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DeleteTags(self, method, url, body, headers): + body = self.fixtures.load('delete_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DescribeAddresses(self, method, url, body, headers): + body = self.fixtures.load('describe_addresses_multi.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _WITH_TAGS_DescribeAddresses(self, method, url, body, headers): + body = self.fixtures.load('describe_addresses_multi.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ModifyInstanceAttribute(self, method, url, body, headers): + body = self.fixtures.load('modify_instance_attribute.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _idempotent_CreateTags(self, method, url, body, headers): + body = self.fixtures.load('create_tags.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +class EucMockHttp(EC2MockHttp): + fixtures = ComputeFileFixtures('ec2') + + def _services_Eucalyptus_DescribeInstances(self, method, url, body, + headers): + return self._DescribeInstances(method, url, body, headers) + + def _services_Eucalyptus_DescribeImages(self, method, url, body, + headers): + return self._DescribeImages(method, url, body, headers) + + def _services_Eucalyptus_DescribeAddresses(self, method, url, body, + headers): + return self._DescribeAddresses(method, url, body, headers) + + def _services_Eucalyptus_RebootInstances(self, method, url, body, + headers): + return self._RebootInstances(method, url, body, headers) + + def _services_Eucalyptus_TerminateInstances(self, method, url, body, + headers): + return self._TerminateInstances(method, url, body, headers) + + def _services_Eucalyptus_RunInstances(self, method, url, body, + headers): + return self._RunInstances(method, url, body, headers) + + def _services_Eucalyptus_CreateTags(self, method, url, body, + headers): + return self._CreateTags(method, url, body, headers) + + +class EC2APSETests(EC2Tests): + def setUp(self): + EC2APSENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = EC2APSENodeDriver(*EC2_PARAMS) + + +class EC2APNETests(EC2Tests): + def setUp(self): + EC2APNENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = EC2APNENodeDriver(*EC2_PARAMS) + + +class NimbusTests(EC2Tests): + def setUp(self): + NimbusNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = NimbusNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], + host='some.nimbuscloud.com') + + def test_ex_describe_addresses_for_node(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = Node('i-4382922a', None, None, None, None, self.driver) + ip_addresses = self.driver.ex_describe_addresses_for_node(node) + self.assertEqual(len(ip_addresses), 0) + + def test_ex_describe_addresses(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = Node('i-4382922a', None, None, None, None, self.driver) + nodes_elastic_ips = self.driver.ex_describe_addresses([node]) + + self.assertEqual(len(nodes_elastic_ips), 1) + self.assertEqual(len(nodes_elastic_ips[node.id]), 0) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + + def test_list_nodes(self): + # overridden from EC2Tests -- Nimbus doesn't support elastic IPs. + node = self.driver.list_nodes()[0] + self.assertExecutedMethodCount(0) + public_ips = node.public_ip + self.assertEqual(node.id, 'i-4382922a') + self.assertEqual(len(node.public_ip), 1) + self.assertEqual(public_ips[0], '1.2.3.5') + self.assertEqual(node.extra['tags'], {}) + + node = self.driver.list_nodes()[1] + self.assertExecutedMethodCount(0) + public_ips = node.public_ip + self.assertEqual(node.id, 'i-8474834a') + self.assertEqual(len(node.public_ip), 1) + self.assertEqual(public_ips[0], '1.2.3.5') + self.assertEqual(node.extra['tags'], {'user_key0': 'user_val0', 'user_key1': 'user_val1'}) + + def test_ex_create_tags(self): + # Nimbus doesn't support creating tags so this one should be a + # passthrough + node = self.driver.list_nodes()[0] + self.driver.ex_create_tags(node=node, tags={'foo': 'bar'}) + self.assertExecutedMethodCount(0) + + +class EucTests(LibcloudTestCase, TestCaseMixin): + def setUp(self): + EucNodeDriver.connectionCls.conn_classes = (None, EucMockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = EucNodeDriver(key=EC2_PARAMS[0], secret=EC2_PARAMS[1], + host='some.eucalyptus.com') + + def test_list_locations_response(self): + try: + self.driver.list_locations() + except Exception: + pass + else: + self.fail('Exception was not thrown') + + def test_list_location(self): + pass + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_ecp.py b/trunk/test/compute/test_ecp.py new file mode 100644 index 0000000000..3cf9c28efb --- /dev/null +++ b/trunk/test/compute/test_ecp.py @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.drivers.ecp import ECPNodeDriver +from libcloud.compute.types import NodeState + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import ECP_PARAMS + +class ECPTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + ECPNodeDriver.connectionCls.conn_classes = (None, + ECPMockHttp) + self.driver = ECPNodeDriver(*ECP_PARAMS) + + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + node = nodes[0] + self.assertEqual(node.id, '1') + self.assertEqual(node.name, 'dummy-1') + self.assertEqual(node.public_ip[0], "42.78.124.75") + self.assertEqual(node.state, NodeState.RUNNING) + + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 3) + size = sizes[0] + self.assertEqual(size.id, '1') + self.assertEqual(size.ram, 512) + self.assertEqual(size.disk, 0) + self.assertEqual(size.bandwidth, 0) + self.assertEqual(size.price, 0) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 2) + self.assertEqual(images[0].name, "centos54: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") + self.assertEqual(images[0].id, "1") + self.assertEqual(images[1].name, "centos54 two: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2") + self.assertEqual(images[1].id, "2") + + def test_reboot_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node(self): + # Raises exception on failure + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + node = self.driver.create_node(name="api.ivan.net.nz", image=image, size=size) + self.assertEqual(node.name, "api.ivan.net.nz") + self.assertEqual(node.id, "1234") + +class ECPMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('ecp') + + def _modules_hosting(self, method, url, body, headers): + headers = {} + headers['set-cookie'] = 'vcloud-token=testtoken' + body = 'Anything' + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _rest_hosting_vm_1(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('vm_1_get.json') + if method == 'POST': + if body.find('delete', 0): + body = self.fixtures.load('vm_1_action_delete.json') + if body.find('stop', 0): + body = self.fixtures.load('vm_1_action_stop.json') + if body.find('start', 0): + body = self.fixtures.load('vm_1_action_start.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_vm(self, method, url, body, headers): + if method == 'PUT': + body = self.fixtures.load('vm_put.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_htemplate_list(self, method, url, body, headers): + body = self.fixtures.load('htemplate_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_network_list(self, method, url, body, headers): + body = self.fixtures.load('network_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_hosting_ptemplate_list(self, method, url, body, headers): + body = self.fixtures.load('ptemplate_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_elasticstack.py b/trunk/test/compute/test_elasticstack.py new file mode 100644 index 0000000000..9ffb1d204e --- /dev/null +++ b/trunk/test/compute/test_elasticstack.py @@ -0,0 +1,243 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import httplib + +from libcloud.compute.base import Node +from libcloud.compute.types import Provider +from libcloud.compute.drivers.elasticstack import (ElasticStackException, + ElasticStackBaseConnection, + ElasticStackBaseNodeDriver as ElasticStack) +from libcloud.compute.drivers.elastichosts import \ + (ElasticHostsBaseNodeDriver as ElasticHosts) +from libcloud.compute.drivers.skalicloud import \ + (SkaliCloudNodeDriver as SkaliCloud) +from libcloud.compute.drivers.serverlove import \ + (ServerLoveNodeDriver as ServerLove) +from libcloud.common.types import InvalidCredsError, MalformedResponseError + +from test import MockHttp +from test.file_fixtures import ComputeFileFixtures + +class ElasticStackTestCase(object): + + def setUp(self): + # Re-use ElasticHosts fixtures for the base ElasticStack platform tests + """ElasticStack.type = Provider.ELASTICHOSTS + ElasticStack.api_name = 'elastichosts' + + ElasticStackBaseConnection.host = 'test.com' + ElasticStack.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + ElasticStack._standard_drives = ElasticHosts._standard_drives + + self.driver = ElasticStack('foo', 'bar') + """ + self.mockHttp = ElasticStackMockHttp + self.mockHttp.type = None + + self.node = Node(id=72258, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + + def test_invalid_creds(self): + self.mockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + except InvalidCredsError, e: + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_malformed_response(self): + self.mockHttp.type = 'MALFORMED' + try: + self.driver.list_nodes() + except MalformedResponseError: + pass + else: + self.fail('test should have thrown') + + def test_parse_error(self): + self.mockHttp.type = 'PARSE_ERROR' + try: + self.driver.list_nodes() + except Exception, e: + self.assertTrue(str(e).find('X-Elastic-Error') != -1) + else: + self.fail('test should have thrown') + + def test_ex_set_node_configuration(self): + success = self.driver.ex_set_node_configuration(node=self.node, + name='name', + cpu='2') + self.assertTrue(success) + + def test_ex_set_node_configuration_invalid_keys(self): + try: + self.driver.ex_set_node_configuration(node=self.node, foo='bar') + except ElasticStackException: + pass + else: + self.fail('Invalid option specified, but an exception was not thrown') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + self.assertEqual(len(nodes), 1) + + node = nodes[0] + self.assertEqual(node.public_ip[0], "1.2.3.4") + self.assertEqual(node.public_ip[1], "1.2.3.5") + self.assertEqual(node.extra['smp'], 1) + + def test_list_sizes(self): + images = self.driver.list_sizes() + self.assertEqual(len(images), 6) + image = [i for i in images if i.id == 'small'][0] + self.assertEqual(image.id, 'small') + self.assertEqual(image.name, 'Small instance') + self.assertEqual(image.cpu, 2000) + self.assertEqual(image.ram, 1700) + self.assertEqual(image.disk, 160) + self.assertTrue(isinstance(image.price, float)) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), len(self.driver._standard_drives)) + + for uuid, values in self.driver._standard_drives.iteritems(): + self.assertEqual(len([image for image in images if image.id == uuid]), 1) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.reboot_node(node)) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + self.assertTrue(self.driver.destroy_node(node)) + + def test_create_node(self): + sizes = self.driver.list_sizes() + size = [s for s in sizes if \ + s.id == 'large'][0] + image = self.image + + self.assertTrue(self.driver.create_node(name="api.ivan.net.nz", + image=image, size=size)) + + +class ElasticHostsTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + ElasticHosts.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = ElasticHosts('foo', 'bar') + images = self.driver.list_images() + self.image = [i for i in images if \ + i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0] + + ElasticStackTestCase.setUp(self) + unittest.TestCase.setUp(self) + + +class SkaliCloudTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + SkaliCloud.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = SkaliCloud('foo', 'bar') + + images = self.driver.list_images() + self.image = [i for i in images if \ + i.id == '90aa51f2-15c0-4cff-81ee-e93aa20b9468'][0] + + ElasticStackTestCase.setUp(self) + unittest.TestCase.setUp(self) + + +class ServerLoveTestCase(ElasticStackTestCase, unittest.TestCase): + + def setUp(self): + ServerLove.connectionCls.conn_classes = (None, + ElasticStackMockHttp) + + self.driver = ServerLove('foo', 'bar') + + images = self.driver.list_images() + self.image = [i for i in images if \ + i.id == '679f5f44-0be7-4745-a658-cccd4334c1aa'][0] + + ElasticStackTestCase.setUp(self) + unittest.TestCase.setUp(self) + + +class ElasticStackMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('elastichosts') + + def _servers_info_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_info_MALFORMED(self, method, url, body, headers): + body = "{malformed: '" + return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_info_PARSE_ERROR(self, method, url, body, headers): + return (505, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers): + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_create(self, method, url, body, headers): + body = self.fixtures.load('drives_create.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method, url, body, headers): + # ElasticHosts image + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_90aa51f2_15c0_4cff_81ee_e93aa20b9468_gunzip(self, method, url, body, headers): + # Skalikloud image + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_679f5f44_0be7_4745_a658_cccd4334c1aa_gunzip(self, method, url, body, headers): + # ServerLove image + return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) + + def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers): + body = self.fixtures.load('drives_info.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_create(self, method, url, body, headers): + body = self.fixtures.load('servers_create.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_info(self, method, url, body, headers): + body = self.fixtures.load('servers_info.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _servers_72258_set(self, method, url, body, headers): + body = '{}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_gandi.py b/trunk/test/compute/test_gandi.py new file mode 100644 index 0000000000..84d273aee8 --- /dev/null +++ b/trunk/test/compute/test_gandi.py @@ -0,0 +1,233 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import sys +import random +import string +import httplib +import xmlrpclib + +from libcloud.compute.drivers.gandi import GandiNodeDriver as Gandi +from libcloud.common.gandi import GandiException +from libcloud.compute.types import NodeState + +from xml.etree import ElementTree as ET +from test import MockHttp +from test.file_fixtures import ComputeFileFixtures +from test.secrets import GANDI_PARAMS + + +class MockGandiTransport(xmlrpclib.Transport): + + def request(self, host, handler, request_body, verbose=0): + self.verbose = 0 + method = ET.XML(request_body).find('methodName').text + mock = GandiMockHttp(host, 80) + mock.request('POST', "%s/%s" % (handler, method)) + resp = mock.getresponse() + + if sys.version[0] == '2' and sys.version[2] == '7': + response = self.parse_response(resp) + else: + response = self.parse_response(resp.body) + return response + + +class GandiTests(unittest.TestCase): + + node_name = 'test2' + + def setUp(self): + Gandi.connectionCls.proxyCls.transportCls = \ + [MockGandiTransport, MockGandiTransport] + self.driver = Gandi(*GANDI_PARAMS) + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertTrue(len(nodes) > 0) + + def test_list_locations(self): + loc = filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations())[0] + self.assertEqual(loc.country, 'France') + + def test_list_images(self): + loc = filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations())[0] + images = self.driver.list_images(loc) + self.assertTrue(len(images) > 2) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertTrue(len(sizes) >= 1) + + def test_destroy_node_running(self): + nodes = self.driver.list_nodes() + test_node = filter(lambda x: x.state == NodeState.RUNNING, nodes)[0] + self.assertTrue(self.driver.destroy_node(test_node)) + + def test_destroy_node_halted(self): + nodes = self.driver.list_nodes() + test_node = filter(lambda x: x.state == NodeState.TERMINATED, nodes)[0] + self.assertTrue(self.driver.destroy_node(test_node)) + + def test_reboot_node(self): + nodes = self.driver.list_nodes() + test_node = filter(lambda x: x.state == NodeState.RUNNING, nodes)[0] + self.assertTrue(self.driver.reboot_node(test_node)) + + def test_create_node(self): + login = 'libcloud' + passwd = ''.join(random.choice(string.letters + string.digits) + for i in xrange(10)) + # Get france datacenter + loc = filter(lambda x: 'france' in x.country.lower(), + self.driver.list_locations())[0] + # Get a debian image + images = self.driver.list_images(loc) + images = [x for x in images if x.name.lower().startswith('debian')] + img = filter(lambda x: '5' in x.name, images)[0] + # Get a configuration size + size = self.driver.list_sizes()[0] + node = self.driver.create_node(name=self.node_name, login=login, + password=passwd, image=img, location=loc, size=size) + self.assertEqual(node.name, self.node_name) + + def test_ex_list_disks(self): + disks = self.driver.ex_list_disks() + self.assertTrue(len(disks) > 0) + + def test_ex_list_interfaces(self): + ifaces = self.driver.ex_list_interfaces() + self.assertTrue(len(ifaces) > 0) + + def test_ex_attach_interface(self): + ifaces = self.driver.ex_list_interfaces() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_attach_interface(nodes[0], ifaces[0]) + self.assertTrue(res) + + def test_ex_detach_interface(self): + ifaces = self.driver.ex_list_interfaces() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_detach_interface(nodes[0], ifaces[0]) + self.assertTrue(res) + + def test_ex_attach_disk(self): + disks = self.driver.ex_list_disks() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_attach_disk(nodes[0], disks[0]) + self.assertTrue(res) + + def test_ex_detach_disk(self): + disks = self.driver.ex_list_disks() + nodes = self.driver.list_nodes() + res = self.driver.ex_node_detach_disk(nodes[0], disks[0]) + self.assertTrue(res) + + def test_ex_snapshot_disk(self): + disks = self.driver.ex_list_disks() + self.assertTrue(self.driver.ex_snapshot_disk(disks[2])) + self.assertRaises(GandiException, + self.driver.ex_snapshot_disk, disks[0]) + + def test_ex_update_disk(self): + disks = self.driver.ex_list_disks() + self.assertTrue(self.driver.ex_update_disk(disks[0], new_size=4096)) + + +class GandiMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('gandi') + + def _xmlrpc_2_0__datacenter_list(self, method, url, body, headers): + body = self.fixtures.load('datacenter_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list_dc0.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_list(self, method, url, body, headers): + body = self.fixtures.load('vm_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__account_info(self, method, url, body, headers): + body = self.fixtures.load('account_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_info(self, method, url, body, headers): + body = self.fixtures.load('vm_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_delete(self, method, url, body, headers): + body = self.fixtures.load('vm_delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__operation_info(self, method, url, body, headers): + body = self.fixtures.load('operation_info.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_create_from(self, method, url, body, headers): + body = self.fixtures.load('vm_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_reboot(self, method, url, body, headers): + body = self.fixtures.load('vm_reboot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_stop(self, method, url, body, headers): + body = self.fixtures.load('vm_stop.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__iface_list(self, method, url, body, headers): + body = self.fixtures.load('iface_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__disk_list(self, method, url, body, headers): + body = self.fixtures.load('disk_list.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_iface_attach(self, method, url, body, headers): + body = self.fixtures.load('iface_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_iface_detach(self, method, url, body, headers): + body = self.fixtures.load('iface_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_disk_attach(self, method, url, body, headers): + body = self.fixtures.load('disk_attach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__vm_disk_detach(self, method, url, body, headers): + body = self.fixtures.load('disk_detach.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__disk_create_from(self, method, url, body, headers): + body = self.fixtures.load('disk_create_from.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_2_0__disk_update(self, method, url, body, headers): + body = self.fixtures.load('disk_update.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_gogrid.py b/trunk/test/compute/test_gogrid.py new file mode 100644 index 0000000000..c671c81966 --- /dev/null +++ b/trunk/test/compute/test_gogrid.py @@ -0,0 +1,280 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import httplib +import sys +import unittest +import urlparse + +from libcloud.compute.base import NodeState, NodeLocation +from libcloud.common.types import LibcloudError, InvalidCredsError +from libcloud.common.gogrid import GoGridIpAddress +from libcloud.compute.drivers.gogrid import GoGridNodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize + +from test import MockHttp # pylint: disable-msg=E0611 +from test.compute import TestCaseMixin # pylint: disable-msg=E0611 +from test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 + +class GoGridTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + GoGridNodeDriver.connectionCls.conn_classes = (None, GoGridMockHttp) + GoGridMockHttp.type = None + self.driver = GoGridNodeDriver("foo", "bar") + + def _get_test_512Mb_node_size(self): + return NodeSize(id='512Mb', + name=None, + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self.driver) + + def test_create_node(self): + image = NodeImage(1531, None, self.driver) + node = self.driver.create_node( + name='test1', + image=image, + size=self._get_test_512Mb_node_size()) + self.assertEqual(node.name, 'test1') + self.assertTrue(node.id is not None) + self.assertEqual(node.extra['password'], 'bebebe') + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + + self.assertEqual(node.id, '90967') + self.assertEqual(node.extra['password'], 'bebebe') + self.assertEqual(node.extra['description'], 'test server') + + def test_reboot_node(self): + node = Node(90967, None, None, None, None, self.driver) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_reboot_node_not_successful(self): + GoGridMockHttp.type = 'FAIL' + node = Node(90967, None, None, None, None, self.driver) + + try: + self.driver.reboot_node(node) + except Exception: + pass + else: + self.fail('Exception was not thrown') + + def test_destroy_node(self): + node = Node(90967, None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(len(images), 4) + self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') + self.assertEqual(image.id, '1531') + + location = NodeLocation(id='gogrid/GSI-939ef909-84b8-4a2f-ad56-02ccd7da05ff.img', + name='test location', country='Slovenia', + driver=self.driver) + images = self.driver.list_images(location=location) + image = images[0] + self.assertEqual(len(images), 4) + self.assertEqual(image.name, 'CentOS 5.3 (32-bit) w/ None') + self.assertEqual(image.id, '1531') + + def test_malformed_reply(self): + GoGridMockHttp.type = 'FAIL' + try: + self.driver.list_images() + except LibcloudError, e: + self.assertTrue(isinstance(e, LibcloudError)) + else: + self.fail("test should have thrown") + + def test_invalid_creds(self): + GoGridMockHttp.type = 'FAIL' + try: + self.driver.list_nodes() + except InvalidCredsError, e: + self.assertTrue(e.driver is not None) + self.assertEqual(e.driver.name, self.driver.name) + else: + self.fail("test should have thrown") + + def test_node_creation_without_free_public_ips(self): + GoGridMockHttp.type = 'NOPUBIPS' + try: + image = NodeImage(1531, None, self.driver) + self.driver.create_node( + name='test1', + image=image, + size=self._get_test_512Mb_node_size()) + except LibcloudError, e: + self.assertTrue(isinstance(e, LibcloudError)) + self.assertTrue(e.driver is not None) + self.assertEqual(e.driver.name, self.driver.name) + else: + self.fail("test should have thrown") + + def test_list_locations(self): + locations = self.driver.list_locations() + location_names = [location.name for location in locations] + + self.assertEqual(len(locations), 2) + for i in 0, 1: + self.assertTrue(isinstance(locations[i], NodeLocation)) + self.assertTrue("US-West-1" in location_names) + self.assertTrue("US-East-1" in location_names) + + def test_ex_save_image(self): + node = self.driver.list_nodes()[0] + image = self.driver.ex_save_image(node, "testimage") + self.assertEqual(image.name, "testimage") + + def test_ex_edit_image(self): + image = self.driver.list_images()[0] + ret = self.driver.ex_edit_image(image=image, public=False, + ex_description="test", name="testname") + + self.assertTrue(isinstance(ret, NodeImage)) + + def test_ex_edit_node(self): + node = Node(id=90967, name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = self.driver.ex_edit_node(node=node, + size=self._get_test_512Mb_node_size()) + + self.assertTrue(isinstance(ret, Node)) + + def test_ex_list_ips(self): + ips = self.driver.ex_list_ips() + + expected_ips = {"192.168.75.66": GoGridIpAddress(id="5348099", + ip="192.168.75.66", public=True, state="Unassigned", + subnet="192.168.75.64/255.255.255.240"), + "192.168.75.67": GoGridIpAddress(id="5348100", + ip="192.168.75.67", public=True, state="Assigned", + subnet="192.168.75.64/255.255.255.240"), + "192.168.75.68": GoGridIpAddress(id="5348101", + ip="192.168.75.68", public=False, state="Unassigned", + subnet="192.168.75.64/255.255.255.240")} + + self.assertEqual(len(expected_ips), 3) + + for ip in ips: + self.assertTrue(ip.ip in expected_ips) + self.assertEqual(ip.public, expected_ips[ip.ip].public) + self.assertEqual(ip.state, expected_ips[ip.ip].state) + self.assertEqual(ip.subnet, expected_ips[ip.ip].subnet) + + del expected_ips[ip.ip] + + self.assertEqual(len(expected_ips), 0) + + def test_get_state_invalid(self): + state = self.driver._get_state('invalid') + self.assertEqual(state, NodeState.UNKNOWN) + +class GoGridMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('gogrid') + + def _api_grid_image_list(self, method, url, body, headers): + body = self.fixtures.load('image_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_image_list_FAIL(self, method, url, body, headers): + body = "

some non valid json here

" + return (httplib.SERVICE_UNAVAILABLE, body, {}, + httplib.responses[httplib.SERVICE_UNAVAILABLE]) + + def _api_grid_server_list(self, method, url, body, headers): + body = self.fixtures.load('server_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_grid_server_list_NOPUBIPS = _api_grid_server_list + + def _api_grid_server_list_FAIL(self, method, url, body, headers): + return (httplib.FORBIDDEN, + "123", {}, httplib.responses[httplib.FORBIDDEN]) + + def _api_grid_ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list_NOPUBIPS(self, method, url, body, headers): + body = self.fixtures.load('ip_list_empty.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_power(self, method, url, body, headers): + body = self.fixtures.load('server_power.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_power_FAIL(self, method, url, body, headers): + body = self.fixtures.load('server_power_fail.json') + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_add(self, method, url, body, headers): + body = self.fixtures.load('server_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_grid_server_add_NOPUBIPS = _api_grid_server_add + + def _api_grid_server_delete(self, method, url, body, headers): + body = self.fixtures.load('server_delete.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_server_edit(self, method, url, body, headers): + body = self.fixtures.load('server_edit.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_support_password_list(self, method, url, body, headers): + body = self.fixtures.load('password_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + _api_support_password_list_NOPUBIPS = _api_support_password_list + + def _api_grid_image_save(self, method, url, body, headers): + body = self.fixtures.load('image_save.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_image_edit(self, method, url, body, headers): + # edit method is quite similar to save method from the response + # perspective + body = self.fixtures.load('image_save.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_common_lookup_list(self, method, url, body, headers): + _valid_lookups = ("ip.datacenter",) + + try: + from urlparse import parse_qs + except ImportError: + from cgi import parse_qs + + lookup = parse_qs(urlparse.urlparse(url).query)["lookup"][0] + if lookup in _valid_lookups: + fixture_path = "lookup_list_%s.json" % \ + (lookup.replace(".", "_")) + else: + raise NotImplementedError + body = self.fixtures.load(fixture_path) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_ibm_sbc.py b/trunk/test/compute/test_ibm_sbc.py new file mode 100644 index 0000000000..a63fd81403 --- /dev/null +++ b/trunk/test/compute/test_ibm_sbc.py @@ -0,0 +1,206 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +import unittest +import httplib +import sys + +from libcloud.compute.types import InvalidCredsError +from libcloud.compute.drivers.ibm_sbc import IBMNodeDriver as IBM +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures +from test.secrets import IBM_PARAMS + +class IBMTests(unittest.TestCase, TestCaseMixin): + """ + Tests the IBM Developer Cloud driver. + """ + + def setUp(self): + IBM.connectionCls.conn_classes = (None, IBMMockHttp) + IBMMockHttp.type = None + self.driver = IBM(*IBM_PARAMS) + + def test_auth(self): + IBMMockHttp.type = 'UNAUTHORIZED' + + try: + self.driver.list_nodes() + except InvalidCredsError, e: + self.assertTrue(isinstance(e, InvalidCredsError)) + self.assertEquals(e.value, '401: Unauthorized') + else: + self.fail('test should have thrown') + + def test_list_nodes(self): + ret = self.driver.list_nodes() + self.assertEquals(len(ret), 3) + self.assertEquals(ret[0].id, '26557') + self.assertEquals(ret[0].name, 'Insight Instance') + self.assertEquals(ret[0].public_ip, '129.33.196.128') + self.assertEquals(ret[0].private_ip, None) # Private IPs not supported + self.assertEquals(ret[1].public_ip, None) # Node is non-active (no IP) + self.assertEquals(ret[1].private_ip, None) + self.assertEquals(ret[1].id, '28193') + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEquals(len(ret), 9) # 9 instance configurations supported + self.assertEquals(ret[0].id, 'BRZ32.1/2048/60*175') + self.assertEquals(ret[1].id, 'BRZ64.2/4096/60*500*350') + self.assertEquals(ret[2].id, 'COP32.1/2048/60') + self.assertEquals(ret[0].name, 'Bronze 32 bit') + self.assertEquals(ret[0].disk, None) + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual(len(ret), 21) + self.assertEqual(ret[10].name, "Rational Asset Manager 7.2.0.1") + self.assertEqual(ret[9].id, '10002573') + + def test_list_locations(self): + ret = self.driver.list_locations() + self.assertEquals(len(ret), 1) + self.assertEquals(ret[0].id, '1') + self.assertEquals(ret[0].name, 'US North East: Poughkeepsie, NY') + self.assertEquals(ret[0].country, 'US') + + def test_create_node(self): + # Test creation of node + IBMMockHttp.type = 'CREATE' + image = NodeImage(id=11, name='Rational Insight', driver=self.driver) + size = NodeSize('LARGE', 'LARGE', None, None, None, None, self.driver) + location = NodeLocation('1', 'POK', 'US', driver=self.driver) + ret = self.driver.create_node(name='RationalInsight4', + image=image, + size=size, + location=location, + publicKey='MyPublicKey', + configurationData={ + 'insight_admin_password': 'myPassword1', + 'db2_admin_password': 'myPassword2', + 'report_user_password': 'myPassword3'}) + self.assertTrue(isinstance(ret, Node)) + self.assertEquals(ret.name, 'RationalInsight4') + + # Test creation attempt with invalid location + IBMMockHttp.type = 'CREATE_INVALID' + location = NodeLocation('3', 'DOESNOTEXIST', 'US', driver=self.driver) + try: + ret = self.driver.create_node(name='RationalInsight5', + image=image, + size=size, + location=location, + publicKey='MyPublicKey', + configurationData={ + 'insight_admin_password': 'myPassword1', + 'db2_admin_password': 'myPassword2', + 'report_user_password': 'myPassword3'}) + except Exception, e: + self.assertEquals(e.args[0], 'Error 412: No DataCenter with id: 3') + else: + self.fail('test should have thrown') + + def test_destroy_node(self): + # Delete existant node + nodes = self.driver.list_nodes() # retrieves 3 nodes + self.assertEquals(len(nodes), 3) + IBMMockHttp.type = 'DELETE' + toDelete = nodes[1] + ret = self.driver.destroy_node(toDelete) + self.assertTrue(ret) + + # Delete non-existant node + IBMMockHttp.type = 'DELETED' + nodes = self.driver.list_nodes() # retrieves 2 nodes + self.assertEquals(len(nodes), 2) + try: + self.driver.destroy_node(toDelete) # delete non-existent node + except Exception, e: + self.assertEquals(e.args[0], 'Error 404: Invalid Instance ID 28193') + else: + self.fail('test should have thrown') + + def test_reboot_node(self): + nodes = self.driver.list_nodes() + IBMMockHttp.type = 'REBOOT' + + # Reboot active node + self.assertEquals(len(nodes), 3) + ret = self.driver.reboot_node(nodes[0]) + self.assertTrue(ret) + + # Reboot inactive node + try: + ret = self.driver.reboot_node(nodes[1]) + except Exception, e: + self.assertEquals(e.args[0], 'Error 412: Instance must be in the Active state') + else: + self.fail('test should have thrown') + +class IBMMockHttp(MockHttp): + fixtures = ComputeFileFixtures('ibm_sbc') + + def _computecloud_enterprise_api_rest_20100331_instances(self, method, url, body, headers): + body = self.fixtures.load('instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_DELETED(self, method, url, body, headers): + body = self.fixtures.load('instances_deleted.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _computecloud_enterprise_api_rest_20100331_offerings_image(self, method, url, body, headers): + body = self.fixtures.load('images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_locations(self, method, url, body, headers): + body = self.fixtures.load('locations.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_26557_REBOOT(self, method, url, body, headers): + body = self.fixtures.load('reboot_active.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_28193_REBOOT(self, method, url, body, headers): + return (412, 'Error 412: Instance must be in the Active state', {}, 'Precondition Failed') + + def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETE(self, method, url, body, headers): + body = self.fixtures.load('delete.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_28193_DELETED(self, method, url, body, headers): + return (404, 'Error 404: Invalid Instance ID 28193', {}, 'Precondition Failed') + + def _computecloud_enterprise_api_rest_20100331_instances_CREATE(self, method, url, body, headers): + body = self.fixtures.load('create.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _computecloud_enterprise_api_rest_20100331_instances_CREATE_INVALID(self, method, url, body, headers): + return (412, 'Error 412: No DataCenter with id: 3', {}, 'Precondition Failed') + + # This is only to accomodate the response tests built into test\__init__.py + def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers): + if method == 'DELETE': + body = self.fixtures.load('delete.xml') + else: + body = self.fixtures.load('reboot_active.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_linode.py b/trunk/test/compute/test_linode.py new file mode 100644 index 0000000000..dfa6c7f74f --- /dev/null +++ b/trunk/test/compute/test_linode.py @@ -0,0 +1,148 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Maintainer: Jed Smith +# Based upon code written by Alex Polvi +# + +import sys +import unittest +import httplib + +from libcloud.compute.drivers.linode import LinodeNodeDriver +from libcloud.compute.base import Node, NodeAuthPassword + +from test import MockHttp +from test.compute import TestCaseMixin + +class LinodeTest(unittest.TestCase, TestCaseMixin): + # The Linode test suite + + def setUp(self): + LinodeNodeDriver.connectionCls.conn_classes = (None, LinodeMockHttp) + LinodeMockHttp.use_param = 'api_action' + self.driver = LinodeNodeDriver('foo') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 1) + node = nodes[0] + self.assertEqual(node.id, "8098") + self.assertEqual(node.name, 'api-node3') + self.assertTrue('75.127.96.245' in node.public_ip) + self.assertEqual(node.private_ip, []) + + def test_reboot_node(self): + # An exception would indicate failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # An exception would indicate failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node(self): + # Will exception on failure + self.driver.create_node(name="Test", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[6], + auth=NodeAuthPassword("test123")) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 10) + for size in sizes: + self.assertEqual(size.ram, int(size.name.split(" ")[1])) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 22) + + def test_create_node_response(self): + # should return a node object + node = self.driver.create_node(name="node-name", + location=self.driver.list_locations()[0], + size=self.driver.list_sizes()[0], + image=self.driver.list_images()[0], + auth=NodeAuthPassword("foobar")) + self.assertTrue(isinstance(node[0], Node)) + + +class LinodeMockHttp(MockHttp): + def _avail_datacenters(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"avail.datacenters","DATA":[{"DATACENTERID":2,"LOCATION":"Dallas, TX, USA"},{"DATACENTERID":3,"LOCATION":"Fremont, CA, USA"},{"DATACENTERID":4,"LOCATION":"Atlanta, GA, USA"},{"DATACENTERID":6,"LOCATION":"Newark, NJ, USA"}]}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_linodeplans(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"avail.linodeplans","DATA":[{"AVAIL":{"2":27,"3":0,"4":0,"6":0},"DISK":16,"PRICE":19.95,"PLANID":1,"LABEL":"Linode 360","RAM":360,"XFER":200},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":24,"PRICE":29.95,"PLANID":2,"LABEL":"Linode 540","RAM":540,"XFER":300},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":32,"PRICE":39.95,"PLANID":3,"LABEL":"Linode 720","RAM":720,"XFER":400},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":48,"PRICE":59.95,"PLANID":4,"LABEL":"Linode 1080","RAM":1080,"XFER":600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":64,"PRICE":79.95,"PLANID":5,"LABEL":"Linode 1440","RAM":1440,"XFER":800},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":128,"PRICE":159.95,"PLANID":6,"LABEL":"Linode 2880","RAM":2880,"XFER":1600},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":256,"PRICE":319.95,"PLANID":7,"LABEL":"Linode 5760","RAM":5760,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":384,"PRICE":479.95,"PLANID":8,"LABEL":"Linode 8640","RAM":8640,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":512,"PRICE":639.95,"PLANID":9,"LABEL":"Linode 11520","RAM":11520,"XFER":2000},{"AVAIL":{"2":0,"3":0,"4":0,"6":0},"DISK":640,"PRICE":799.95,"PLANID":10,"LABEL":"Linode 14400","RAM":14400,"XFER":2000}]}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_distributions(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"avail.distributions","DATA":[{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Arch Linux 2007.08","MINIMAGESIZE":436,"DISTRIBUTIONID":38,"CREATE_DT":"2007-10-24 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Centos 5.0","MINIMAGESIZE":594,"DISTRIBUTIONID":32,"CREATE_DT":"2007-04-27 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Centos 5.2","MINIMAGESIZE":950,"DISTRIBUTIONID":46,"CREATE_DT":"2008-11-30 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Centos 5.2 64bit","MINIMAGESIZE":980,"DISTRIBUTIONID":47,"CREATE_DT":"2008-11-30 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Debian 4.0","MINIMAGESIZE":200,"DISTRIBUTIONID":28,"CREATE_DT":"2007-04-18 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Debian 4.0 64bit","MINIMAGESIZE":220,"DISTRIBUTIONID":48,"CREATE_DT":"2008-12-02 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Debian 5.0","MINIMAGESIZE":200,"DISTRIBUTIONID":50,"CREATE_DT":"2009-02-19 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Debian 5.0 64bit","MINIMAGESIZE":300,"DISTRIBUTIONID":51,"CREATE_DT":"2009-02-19 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Fedora 8","MINIMAGESIZE":740,"DISTRIBUTIONID":40,"CREATE_DT":"2007-11-09 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Fedora 9","MINIMAGESIZE":1175,"DISTRIBUTIONID":43,"CREATE_DT":"2008-06-09 15:15:21.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Gentoo 2007.0","MINIMAGESIZE":1800,"DISTRIBUTIONID":35,"CREATE_DT":"2007-08-29 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Gentoo 2008.0","MINIMAGESIZE":1500,"DISTRIBUTIONID":52,"CREATE_DT":"2009-03-20 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Gentoo 2008.0 64bit","MINIMAGESIZE":2500,"DISTRIBUTIONID":53,"CREATE_DT":"2009-04-04 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"OpenSUSE 11.0","MINIMAGESIZE":850,"DISTRIBUTIONID":44,"CREATE_DT":"2008-08-21 08:32:16.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Slackware 12.0","MINIMAGESIZE":315,"DISTRIBUTIONID":34,"CREATE_DT":"2007-07-16 00:00:00.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":0,"LABEL":"Slackware 12.2","MINIMAGESIZE":500,"DISTRIBUTIONID":54,"CREATE_DT":"2009-04-04 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 8.04 LTS","MINIMAGESIZE":400,"DISTRIBUTIONID":41,"CREATE_DT":"2008-04-23 15:11:29.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Ubuntu 8.04 LTS 64bit","MINIMAGESIZE":350,"DISTRIBUTIONID":42,"CREATE_DT":"2008-06-03 12:51:11.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 8.10","MINIMAGESIZE":220,"DISTRIBUTIONID":45,"CREATE_DT":"2008-10-30 23:23:03.0"},{"REQUIRESPVOPSKERNEL":1,"IS64BIT":1,"LABEL":"Ubuntu 8.10 64bit","MINIMAGESIZE":230,"DISTRIBUTIONID":49,"CREATE_DT":"2008-12-02 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":0,"LABEL":"Ubuntu 9.04","MINIMAGESIZE":350,"DISTRIBUTIONID":55,"CREATE_DT":"2009-04-23 00:00:00.0"},{"REQUIRESPVOPSKERNEL":0,"IS64BIT":1,"LABEL":"Ubuntu 9.04 64bit","MINIMAGESIZE":350,"DISTRIBUTIONID":56,"CREATE_DT":"2009-04-23 00:00:00.0"}]}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.create","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_disk_createfromdistribution(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.disk.createFromDistribution","DATA":{"JobID":1298,"DiskID":55647}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_delete(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.delete","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_update(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.update","DATA":{"LinodeID":8098}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_reboot(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.reboot","DATA":{"JobID":1305}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _avail_kernels(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"avail.kernels","DATA":[{"LABEL":"Latest 2.6 Stable (2.6.18.8-linode19)","ISXEN":1,"KERNELID":60},{"LABEL":"2.6.18.8-linode19","ISXEN":1,"KERNELID":103},{"LABEL":"2.6.30.5-linode20","ISXEN":1,"KERNELID":105},{"LABEL":"Latest 2.6 Stable (2.6.18.8-x86_64-linode7)","ISXEN":1,"KERNELID":107},{"LABEL":"2.6.18.8-x86_64-linode7","ISXEN":1,"KERNELID":104},{"LABEL":"2.6.30.5-x86_64-linode8","ISXEN":1,"KERNELID":106},{"LABEL":"pv-grub-x86_32","ISXEN":1,"KERNELID":92},{"LABEL":"pv-grub-x86_64","ISXEN":1,"KERNELID":95},{"LABEL":"Recovery - Finnix (kernel)","ISXEN":1,"KERNELID":61},{"LABEL":"2.6.18.8-domU-linode7","ISXEN":1,"KERNELID":81},{"LABEL":"2.6.18.8-linode10","ISXEN":1,"KERNELID":89},{"LABEL":"2.6.18.8-linode16","ISXEN":1,"KERNELID":98},{"LABEL":"2.6.24.4-linode8","ISXEN":1,"KERNELID":84},{"LABEL":"2.6.25-linode9","ISXEN":1,"KERNELID":88},{"LABEL":"2.6.25.10-linode12","ISXEN":1,"KERNELID":90},{"LABEL":"2.6.26-linode13","ISXEN":1,"KERNELID":91},{"LABEL":"2.6.27.4-linode14","ISXEN":1,"KERNELID":93},{"LABEL":"2.6.28-linode15","ISXEN":1,"KERNELID":96},{"LABEL":"2.6.28.3-linode17","ISXEN":1,"KERNELID":99},{"LABEL":"2.6.29-linode18","ISXEN":1,"KERNELID":101},{"LABEL":"2.6.16.38-x86_64-linode2","ISXEN":1,"KERNELID":85},{"LABEL":"2.6.18.8-x86_64-linode1","ISXEN":1,"KERNELID":86},{"LABEL":"2.6.27.4-x86_64-linode3","ISXEN":1,"KERNELID":94},{"LABEL":"2.6.28-x86_64-linode4","ISXEN":1,"KERNELID":97},{"LABEL":"2.6.28.3-x86_64-linode5","ISXEN":1,"KERNELID":100},{"LABEL":"2.6.29-x86_64-linode6","ISXEN":1,"KERNELID":102}]}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_disk_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.disk.create","DATA":{"JobID":1299,"DiskID":55648}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_boot(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.boot","DATA":{"JobID":1300}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_config_create(self, method, url, body, headers): + body = '{"ERRORARRAY":[],"ACTION":"linode.config.create","DATA":{"ConfigID":31239}}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_list(self, method, url, body, headers): + body = '{"ACTION": "linode.list", "DATA": [{"ALERT_DISKIO_ENABLED": 1, "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "DATACENTERID": 5, "ALERT_BWOUT_ENABLED": 1, "ALERT_CPU_THRESHOLD": 10, "TOTALHD": 100, "ALERT_BWQUOTA_THRESHOLD": 81, "ALERT_BWQUOTA_ENABLED": 1, "TOTALXFER": 200, "STATUS": 2, "ALERT_BWIN_ENABLED": 1, "ALERT_BWIN_THRESHOLD": 5, "ALERT_DISKIO_THRESHOLD": 200, "WATCHDOG": 1, "LINODEID": 8098, "BACKUPWINDOW": 1, "TOTALRAM": 540, "LPM_DISPLAYGROUP": "", "ALERT_BWOUT_THRESHOLD": 5, "BACKUPSENABLED": 1, "ALERT_CPU_ENABLED": 1}], "ERRORARRAY": []}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _linode_ip_list(self, method, url, body, headers): + body = '{"ACTION": "linode.ip.list", "DATA": [{"RDNS_NAME": "li22-54.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.54", "IPADDRESSID": 5384, "LINODEID": 8098}, {"RDNS_NAME": "li22-245.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.245", "IPADDRESSID": 5575, "LINODEID": 8098}], "ERRORARRAY": []}' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _batch(self, method, url, body, headers): + body = '[{"ACTION": "linode.ip.list", "DATA": [{"RDNS_NAME": "li22-54.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.54", "IPADDRESSID": 5384, "LINODEID": 8098}, {"RDNS_NAME": "li22-245.members.linode.com", "ISPUBLIC": 1, "IPADDRESS": "75.127.96.245", "IPADDRESSID": 5575, "LINODEID": 8098}], "ERRORARRAY": []}]' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_opennebula.py b/trunk/test/compute/test_opennebula.py new file mode 100644 index 0000000000..0623bb555d --- /dev/null +++ b/trunk/test/compute/test_opennebula.py @@ -0,0 +1,148 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import OPENNEBULA_PARAMS + + +class OpenNebula_1_4_Tests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + OpenNebulaNodeDriver.connectionCls.conn_classes = (None, + OpenNebulaMockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',)) + + def test_create_node(self): + image = NodeImage(id=1, name='UbuntuServer9.04-Contextualized', + driver=self.driver) + size = NodeSize(1, 'small', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='MyCompute', image=image, + size=size) + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'MyCompute') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + node = nodes[0] + self.assertEqual(node.id, '5') + self.assertEqual(node.name, 'MyCompute') + + def test_reboot_node(self): + node = Node(5, None, None, None, None, self.driver) + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = Node(5, None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 3) + self.assertTrue('small' in [s.name for s in sizes]) + self.assertTrue('medium' in [s.name for s in sizes]) + self.assertTrue('large' in [s.name for s in sizes]) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 2) + image = images[0] + self.assertEqual(image.id, '1') + self.assertEqual(image.name, 'UbuntuServer9.04-Contextualized') + + +class OpenNebula_3_0_Tests(unittest.TestCase): + + def setUp(self): + OpenNebulaNodeDriver.connectionCls.conn_classes = (None, + OpenNebulaMockHttp) + self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',)) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + names = [s.name for s in sizes] + self.assertEqual(len(sizes), 4) + self.assertTrue('small' in names) + self.assertTrue('medium' in names) + self.assertTrue('large' in names) + self.assertTrue('custom' in names) + self.assertEqual([s for s in sizes if s.id == '3'][0].cpu, 8) + + +class OpenNebulaMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('opennebula') + + def _compute(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('computes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'POST': + body = self.fixtures.load('compute.xml') + return (httplib.CREATED, body, {}, + httplib.responses[httplib.CREATED]) + + def _storage(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('storage.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _compute_5(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('compute.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + if method == 'PUT': + body = "" + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + if method == 'DELETE': + body = "" + return (httplib.NO_CONTENT, body, {}, + httplib.responses[httplib.NO_CONTENT]) + + def _compute_15(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('compute.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _storage_1(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('disk.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _storage_8(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('disk.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_openstack.py b/trunk/test/compute/test_openstack.py new file mode 100644 index 0000000000..a1c7897faf --- /dev/null +++ b/trunk/test/compute/test_openstack.py @@ -0,0 +1,785 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver +from libcloud.compute.drivers.openstack import ( + OpenStack_1_0_NodeDriver, OpenStack_1_0_Response, + OpenStack_1_1_NodeDriver +) +from libcloud.compute.base import Node, NodeImage, NodeSize +from libcloud.pricing import set_pricing, clear_pricing_data + +from test import MockResponse, MockHttpTestCase, XML_HEADERS +from test.file_fixtures import ComputeFileFixtures, OpenStackFixtures +from test.compute import TestCaseMixin + +from test.secrets import OPENSTACK_PARAMS + + +class OpenStack_1_0_ResponseTestCase(unittest.TestCase): + XML = """""" + + def test_simple_xml_content_type_handling(self): + http_response = MockResponse(200, OpenStack_1_0_ResponseTestCase.XML, headers={'content-type': 'application/xml'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML") + + def test_extended_xml_content_type_handling(self): + http_response = MockResponse(200, + OpenStack_1_0_ResponseTestCase.XML, + headers={'content-type': 'application/xml; charset=UTF-8'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML") + + def test_non_xml_content_type_handling(self): + RESPONSE_BODY = "Accepted" + + http_response = MockResponse(202, RESPONSE_BODY, headers={'content-type': 'text/html'}) + body = OpenStack_1_0_Response(http_response, None).parse_body() + + self.assertEqual(body, RESPONSE_BODY, "Non-XML body should be returned as is") + + +class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): + should_list_locations = False + + driver_klass = OpenStack_1_0_NodeDriver + driver_args = OPENSTACK_PARAMS + driver_kwargs = {} + + @classmethod + def create_driver(self): + if self is not OpenStack_1_0_FactoryMethodTests: + self.driver_type = self.driver_klass + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v1.1/" + OpenStackMockHttp.type = None + self.driver = self.create_driver() + clear_pricing_data() + + def test_auth(self): + OpenStackMockHttp.type = 'UNAUTHORIZED' + try: + self.driver = self.create_driver() + except InvalidCredsError, e: + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_auth_missing_key(self): + OpenStackMockHttp.type = 'UNAUTHORIZED_MISSING_KEY' + try: + self.driver = self.create_driver() + except MalformedResponseError, e: + self.assertEqual(True, isinstance(e, MalformedResponseError)) + else: + self.fail('test should have thrown') + + def test_auth_server_error(self): + OpenStackMockHttp.type = 'INTERNAL_SERVER_ERROR' + try: + self.driver = self.create_driver() + except MalformedResponseError, e: + self.assertEqual(True, isinstance(e, MalformedResponseError)) + else: + self.fail('test should have thrown') + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 1) + + def test_list_nodes(self): + OpenStackMockHttp.type = 'EMPTY' + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 0) + OpenStackMockHttp.type = None + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 1) + node = ret[0] + self.assertEqual('67.23.21.33', node.public_ip[0]) + self.assertEqual('10.176.168.218', node.private_ip[0]) + self.assertEqual(node.extra.get('flavorId'), '1') + self.assertEqual(node.extra.get('imageId'), '11') + self.assertEqual(type(node.extra.get('metadata')), type(dict())) + OpenStackMockHttp.type = 'METADATA' + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 1) + node = ret[0] + self.assertEqual(type(node.extra.get('metadata')), type(dict())) + self.assertEqual(node.extra.get('metadata').get('somekey'), + 'somevalue') + OpenStackMockHttp.type = None + + def test_list_images(self): + ret = self.driver.list_images() + expected = {10: {'serverId': None, + 'status': 'ACTIVE', + 'created': '2009-07-20T09:14:37-05:00', + 'updated': '2009-07-20T09:14:37-05:00', + 'progress': None}, + 11: {'serverId': '91221', + 'status': 'ACTIVE', + 'created': '2009-11-29T20:22:09-06:00', + 'updated': '2009-11-29T20:24:08-06:00', + 'progress': '100'}} + for ret_idx, extra in expected.items(): + for key, value in extra.items(): + self.assertEqual(ret[ret_idx].extra[key], value) + + def test_create_node(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + + def test_create_node_ex_shared_ip_group(self): + OpenStackMockHttp.type = 'EX_SHARED_IP_GROUP' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + ex_shared_ip_group_id='12345') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + + def test_create_node_with_metadata(self): + OpenStackMockHttp.type = 'METADATA' + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + metadata = {'a': 'b', 'c': 'd'} + files = {'/file1': 'content1', '/file2': 'content2'} + node = self.driver.create_node(name='racktest', image=image, size=size, + metadata=metadata, files=files) + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra.get('password'), 'racktestvJq7d3') + self.assertEqual(node.extra.get('metadata'), metadata) + + def test_reboot_node(self): + node = Node(id=72258, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + ret = node.reboot() + self.assertTrue(ret is True) + + def test_destroy_node(self): + node = Node(id=72258, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + ret = node.destroy() + self.assertTrue(ret is True) + + def test_ex_limits(self): + limits = self.driver.ex_limits() + self.assertTrue("rate" in limits) + self.assertTrue("absolute" in limits) + + def test_ex_save_image(self): + node = Node(id=444222, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + image = self.driver.ex_save_image(node, "imgtest") + self.assertEqual(image.name, "imgtest") + self.assertEqual(image.id, "12345") + + def test_ex_delete_image(self): + image = NodeImage(id=333111, name='Ubuntu 8.10 (intrepid)', + driver=self.driver) + ret = self.driver.ex_delete_image(image) + self.assertTrue(ret) + + def test_ex_list_ip_addresses(self): + ret = self.driver.ex_list_ip_addresses(node_id=72258) + self.assertEquals(2, len(ret.public_addresses)) + self.assertTrue('67.23.10.131' in ret.public_addresses) + self.assertTrue('67.23.10.132' in ret.public_addresses) + self.assertEquals(1, len(ret.private_addresses)) + self.assertTrue('10.176.42.16' in ret.private_addresses) + + def test_ex_list_ip_groups(self): + ret = self.driver.ex_list_ip_groups() + self.assertEquals(2, len(ret)) + self.assertEquals('1234', ret[0].id) + self.assertEquals('Shared IP Group 1', ret[0].name) + self.assertEquals('5678', ret[1].id) + self.assertEquals('Shared IP Group 2', ret[1].name) + self.assertTrue(ret[0].servers is None) + + def test_ex_list_ip_groups_detail(self): + ret = self.driver.ex_list_ip_groups(details=True) + + self.assertEquals(2, len(ret)) + + self.assertEquals('1234', ret[0].id) + self.assertEquals('Shared IP Group 1', ret[0].name) + self.assertEquals(2, len(ret[0].servers)) + self.assertEquals('422', ret[0].servers[0]) + self.assertEquals('3445', ret[0].servers[1]) + + self.assertEquals('5678', ret[1].id) + self.assertEquals('Shared IP Group 2', ret[1].name) + self.assertEquals(3, len(ret[1].servers)) + self.assertEquals('23203', ret[1].servers[0]) + self.assertEquals('2456', ret[1].servers[1]) + self.assertEquals('9891', ret[1].servers[2]) + + def test_ex_create_ip_group(self): + ret = self.driver.ex_create_ip_group('Shared IP Group 1', '5467') + self.assertEquals('1234', ret.id) + self.assertEquals('Shared IP Group 1', ret.name) + self.assertEquals(1, len(ret.servers)) + self.assertEquals('422', ret.servers[0]) + + def test_ex_delete_ip_group(self): + ret = self.driver.ex_delete_ip_group('5467') + self.assertEquals(True, ret) + + def test_ex_share_ip(self): + ret = self.driver.ex_share_ip('1234', '3445', '67.23.21.133') + self.assertEquals(True, ret) + + def test_ex_unshare_ip(self): + ret = self.driver.ex_unshare_ip('3445', '67.23.21.133') + self.assertEquals(True, ret) + + def test_ex_resize(self): + node = Node(id=444222, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + self.assertTrue(self.driver.ex_resize(node=node, size=size)) + + def test_ex_confirm_resize(self): + node = Node(id=444222, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + self.assertTrue(self.driver.ex_confirm_resize(node=node)) + + def test_ex_revert_resize(self): + node = Node(id=444222, name=None, state=None, public_ip=None, + private_ip=None, driver=self.driver) + self.assertTrue(self.driver.ex_revert_resize(node=node)) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 7, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + + if self.driver.api_name == 'openstack': + self.assertEqual(size.price, 0, + 'Size price should be zero by default') + + def test_list_sizes_with_specified_pricing(self): + if self.driver.api_name != 'openstack': + return + + pricing = dict((str(i), i) for i in range(1, 8)) + + set_pricing(driver_type='compute', driver_name='openstack', + pricing=pricing) + + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 7, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + self.assertEqual(float(size.price), float(pricing[size.id])) + + +class OpenStack_1_0_FactoryMethodTests(OpenStack_1_0_Tests): + should_list_locations = False + + driver_klass = OpenStack_1_0_NodeDriver + driver_type = get_driver(Provider.OPENSTACK) + driver_args = OPENSTACK_PARAMS + ('1.0',) + + def test_factory_method_invalid_version(self): + try: + self.driver_type(*(OPENSTACK_PARAMS + ('15.5',))) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + +class OpenStackMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('openstack') + auth_fixtures = OpenStackFixtures() + json_content_headers = {'content-type': 'application/json; charset=UTF-8'} + + # fake auth token response + def _v1_0(self, method, url, body, headers): + headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _v1_0_INTERNAL_SERVER_ERROR(self, method, url, body, headers): + return (httplib.INTERNAL_SERVER_ERROR, "

500: Internal Server Error

", {}, httplib.responses[httplib.INTERNAL_SERVER_ERROR]) + + def _v1_0_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers): + headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_slug_servers_detail_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_empty.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers_detail_METADATA(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_detail_metadata.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_images_333111(self, method, url, body, headers): + if method != "DELETE": + raise NotImplementedError() + # this is currently used for deletion of an image + # as such it should not accept GET/POST + return(httplib.NO_CONTENT,"","",httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_slug_images(self, method, url, body, headers): + if method != "POST": + raise NotImplementedError() + # this is currently used for creation of new image with + # POST request, don't handle GET to avoid possible confusion + body = self.fixtures.load('v1_slug_images_post.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_images_detail(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_images_detail.xml') + return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK]) + + def _v1_0_slug_servers(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_EX_SHARED_IP_GROUP(self, method, url, body, headers): + # test_create_node_ex_shared_ip_group + # Verify that the body contains sharedIpGroupId XML element + self.assertTrue(body.find('sharedIpGroupId="12345"') != -1) + body = self.fixtures.load('v1_slug_servers.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_METADATA(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_servers_metadata.xml') + return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_servers_72258_action(self, method, url, body, headers): + if method != "POST" or body[:8] != "500: Internal Server Error", {'content-type': 'text/html'}, httplib.responses[httplib.INTERNAL_SERVER_ERROR]) + +class OpenStack_1_1_Tests(unittest.TestCase, TestCaseMixin): + should_list_locations = False + + driver_klass = OpenStack_1_1_NodeDriver + driver_type = OpenStack_1_1_NodeDriver + driver_args = OPENSTACK_PARAMS + driver_kwargs = {'ex_force_auth_version': '1.0'} + + @classmethod + def create_driver(self): + if self is not OpenStack_1_1_FactoryMethodTests: + self.driver_type = self.driver_klass + return self.driver_type(*self.driver_args, **self.driver_kwargs) + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (OpenStack_1_1_MockHttp, OpenStack_1_1_MockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v1.0/" + OpenStack_1_1_MockHttp.type = None + self.driver = self.create_driver() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + node = nodes[0] + + self.assertEqual('12065', node.id) + self.assertEqual('50.57.94.35', node.public_ip[0]) + self.assertEqual('2001:4801:7808:52:16:3eff:fe47:788a', node.public_ip[1]) + self.assertEqual('10.182.64.34', node.private_ip[0]) + self.assertEqual('fec0:4801:7808:52:16:3eff:fe60:187d', node.private_ip[1]) + + self.assertEqual(node.extra.get('flavorId'), '2') + self.assertEqual(node.extra.get('imageId'), '7') + self.assertEqual(node.extra.get('metadata'), {}) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 8, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + self.assertEqual(size.price, 0, + 'Size price should be zero by default') + + def test_list_sizes_with_specified_pricing(self): + + pricing = dict((str(i), i*5.0) for i in range(1, 9)) + + set_pricing(driver_type='compute', driver_name='openstack', pricing=pricing) + + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 8, 'Wrong sizes count') + + for size in sizes: + self.assertTrue(isinstance(size.price, float), + 'Wrong size price type') + self.assertEqual(size.price, pricing[size.id], + 'Size price should match') + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images), 13, 'Wrong images count') + + image = images[0] + self.assertEqual(image.id, '13') + self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') + self.assertEqual(image.extra['updated'], '2011-08-06T18:14:02Z') + self.assertEqual(image.extra['created'], '2011-08-06T18:13:11Z') + self.assertEqual(image.extra['status'], 'ACTIVE') + self.assertEqual(image.extra['metadata']['os_type'], 'windows') + + def test_create_node(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size) + self.assertEqual(node.id, '52415800-8b69-11e0-9b19-734f565bc83b') + self.assertEqual(node.name, 'new-server-test') + self.assertEqual(node.extra['password'], 'GFf1j9aP') + self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') + + def test_destroy_node(self): + self.assertTrue(self.node.destroy()) + + def test_reboot_node(self): + self.assertTrue(self.node.reboot()) + + def test_ex_set_password(self): + try: + self.driver.ex_set_password(self.node, 'New1&53jPass') + except Exception, e: + self.fail('An error was raised: ' + repr(e)) + + def test_ex_rebuild(self): + image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + try: + self.driver.ex_rebuild(self.node, image=image) + except Exception, e: + self.fail('An error was raised: ' + repr(e)) + + def test_ex_resize(self): + size = NodeSize(1, '256 slice', None, None, None, None, + driver=self.driver) + try: + self.driver.ex_resize(self.node, size) + except Exception, e: + self.fail('An error was raised: ' + repr(e)) + + def test_ex_confirm_resize(self): + try: + self.driver.ex_confirm_resize(self.node) + except Exception, e: + self.fail('An error was raised: ' + repr(e)) + + def test_ex_revert_resize(self): + try: + self.driver.ex_revert_resize(self.node) + except Exception, e: + self.fail('An error was raised: ' + repr(e)) + + def test_ex_save_image(self): + try: + self.driver.ex_save_image(self.node, 'new_image') + except NotImplementedError: + pass + else: + self.fail('An expected error was not raised') + + def test_ex_set_server_name(self): + old_node = Node( + id='12064', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver, + ) + new_node = self.driver.ex_set_server_name(old_node, 'Bob') + self.assertEqual('Bob', new_node.name) + + def test_ex_set_metadata(self): + old_node = Node( + id='12063', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver, + ) + metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} + returned_metadata = self.driver.ex_set_metadata(old_node, metadata) + self.assertEqual(metadata, returned_metadata) + + def test_ex_get_metadata(self): + node = Node( + id='12063', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver, + ) + + metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'} + returned_metadata = self.driver.ex_get_metadata(node) + self.assertEqual(metadata, returned_metadata) + + def test_ex_update_node(self): + old_node = Node( + id='12064', + name=None, state=None, public_ip=None, private_ip=None, driver=self.driver, + ) + + new_node = self.driver.ex_update_node(old_node, name='Bob') + + self.assertTrue(new_node) + self.assertEqual('Bob', new_node.name) + self.assertEqual('50.57.94.30', new_node.public_ip[0]) + + def test_ex_get_node_details(self): + node_id = '12064' + node = self.driver.ex_get_node_details(node_id) + self.assertEqual(node.id, '12064') + self.assertEqual(node.name, 'lc-test') + + def test_ex_get_size(self): + size_id = '7' + size = self.driver.ex_get_size(size_id) + self.assertEqual(size.id, size_id) + self.assertEqual(size.name, '15.5GB slice') + + def test_ex_get_image(self): + image_id = '13' + image = self.driver.ex_get_image(image_id) + self.assertEqual(image.id, image_id) + self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') + + def test_ex_delete_image(self): + image = NodeImage(id='26365521-8c62-11f9-2c33-283d153ecc3a', name='My Backup', driver=self.driver) + try: + self.driver.ex_delete_image(image) + except NotImplementedError: + pass + else: + self.fail('An expected error was not raised') + +class OpenStack_1_1_FactoryMethodTests(OpenStack_1_1_Tests): + should_list_locations = False + + driver_klass = OpenStack_1_1_NodeDriver + driver_type = get_driver(Provider.OPENSTACK) + driver_args = OPENSTACK_PARAMS + ('1.1',) + + +class OpenStack_1_1_MockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('openstack_v1.1') + auth_fixtures = OpenStackFixtures() + json_content_headers = {'content-type': 'application/json; charset=UTF-8'} + + def _v2_0_tokens(self, method, url, body, headers): + body = self.auth_fixtures.load('_v2_0__auth.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_0_(self, method, url, body, headers): + headers = { + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-server-management-url': 'https://api.example.com/v1.1/slug', + } + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_1_slug_servers_detail(self, method, url, body, headers): + body = self.fixtures.load('_servers_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_flavors_detail(self, method, url, body, headers): + body = self.fixtures.load('_flavors_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_images_detail(self, method, url, body, headers): + body = self.fixtures.load('_images_detail.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers(self, method, url, body, headers): + body = self.fixtures.load('_servers.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12065_action(self, method, url, body, headers): + if method != "POST": + self.fail('HTTP method other than POST to action URL') + + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + def _v1_1_slug_servers_12064_action(self, method, url, body, headers): + if method != "POST": + self.fail('HTTP method other than POST to action URL') + + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + def _v1_1_slug_servers_12065(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_12064(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12064.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "PUT": + body = self.fixtures.load('_servers_12064_updated_name_bob.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + + def _v1_1_slug_servers_12062(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12064.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_servers_12063_metadata(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_servers_12063_metadata_two_keys.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == "PUT": + body = self.fixtures.load('_servers_12063_metadata_two_keys.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + + def _v1_1_slug_flavors_7(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_flavors_7.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_images_13(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_images_13.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + else: + raise NotImplementedError() + + def _v1_1_slug_images_DELETEUUID(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + else: + raise NotImplementedError() + +class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests): + driver_kwargs = {'ex_force_auth_version': '2.0'} + + def setUp(self): + self.driver_klass.connectionCls.conn_classes = (OpenStack_1_1_MockHttp, OpenStack_1_1_MockHttp) + self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com/v2.0/" + OpenStack_1_1_MockHttp.type = None + self.driver = self.create_driver() + clear_pricing_data() + self.node = self.driver.list_nodes()[1] + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_opsource.py b/trunk/test/compute/test_opsource.py new file mode 100644 index 0000000000..8459699222 --- /dev/null +++ b/trunk/test/compute/test_opsource.py @@ -0,0 +1,222 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.common.types import InvalidCredsError +from libcloud.compute.drivers.opsource import OpsourceNodeDriver as Opsource +from libcloud.compute.drivers.opsource import OpsourceAPIException +from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import OPSOURCE_PARAMS + +class OpsourceTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + Opsource.connectionCls.conn_classes = (None, OpsourceMockHttp) + OpsourceMockHttp.type = None + self.driver = Opsource(*OPSOURCE_PARAMS) + + def test_invalid_creds(self): + OpsourceMockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + self.assertTrue(False) # Above command should have thrown an InvalidCredsException + except InvalidCredsError: + self.assertTrue(True) + + def test_list_sizes_response(self): + OpsourceMockHttp.type = None + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 1) + size = ret[0] + self.assertEqual(size.name, 'default') + + def test_reboot_node_response(self): + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = node.reboot() + self.assertTrue(ret is True) + + def test_reboot_node_response_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + try: + node.reboot() + self.assertTrue(False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_destroy_node_response(self): + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = node.destroy() + self.assertTrue(ret is True) + + def test_destroy_node_response_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + try: + node.destroy() + self.assertTrue(False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_create_node_response(self): + rootPw = NodeAuthPassword('pass123') + image = self.driver.list_images()[0] + network = self.driver.ex_list_networks()[0] + node = self.driver.create_node(name='test2', image=image, auth=rootPw, + ex_description='test2 node', ex_network=network, + ex_isStarted=False) + self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') + self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') + + def test_ex_shutdown_graceful(self): + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = self.driver.ex_shutdown_graceful(node) + self.assertTrue(ret is True) + + def test_ex_shutdown_graceful_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + try: + self.driver.ex_shutdown_graceful(node) + self.assertTrue(False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_start_node(self): + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = self.driver.ex_start_node(node) + self.assertTrue(ret is True) + + def test_ex_start_node_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + try: + self.driver.ex_start_node(node) + self.assertTrue(False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_power_off(self): + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + ret = self.driver.ex_power_off(node) + self.assertTrue(ret is True) + + def test_ex_power_off_INPROGRESS(self): + OpsourceMockHttp.type = 'INPROGRESS' + node = Node(id='11', name=None, state=None, + public_ip=None, private_ip=None, driver=self.driver) + try: + self.driver.ex_power_off(node) + self.assertTrue(False) # above command should have thrown OpsourceAPIException + except OpsourceAPIException: + self.assertTrue(True) + + def test_ex_list_networks(self): + nets = self.driver.ex_list_networks() + self.assertEqual(nets[0].name, 'test-net1') + self.assertTrue(isinstance(nets[0].location, NodeLocation)) + +class OpsourceMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('opsource') + + def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) + + def _oec_0_9_myaccount(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_myaccount.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_myaccount.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_base_image(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_base_image.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers): + body = None + action = url.split('?')[-1] + + if action == 'restart': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml') + elif action == 'shutdown': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml') + elif action == 'delete': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml') + elif action == 'start': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml') + elif action == 'poweroff': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml') + + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers): + body = None + action = url.split('?')[-1] + + if action == 'restart': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml') + elif action == 'shutdown': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml') + elif action == 'delete': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml') + elif action == 'start': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml') + elif action == 'poweroff': + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml') + + return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): + body = self.fixtures.load('_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers): + body = self.fixtures.load('oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_rackspace.py b/trunk/test/compute/test_rackspace.py new file mode 100644 index 0000000000..b5c9671c58 --- /dev/null +++ b/trunk/test/compute/test_rackspace.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from libcloud.compute.drivers.rackspace import RackspaceNodeDriver +from test.compute.test_openstack import OpenStack_1_0_Tests + +from test.secrets import RACKSPACE_PARAMS + + +class RackspaceTests(OpenStack_1_0_Tests): + should_list_locations = True + should_have_pricing = True + + driver_klass = RackspaceNodeDriver + driver_type = RackspaceNodeDriver + driver_args = RACKSPACE_PARAMS + + def test_list_sizes_pricing(self): + sizes = self.driver.list_sizes() + + for size in sizes: + self.assertTrue(size.price > 0) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_rimuhosting.py b/trunk/test/compute/test_rimuhosting.py new file mode 100644 index 0000000000..6f87dc0927 --- /dev/null +++ b/trunk/test/compute/test_rimuhosting.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Copyright 2009 RedRata Ltd + +import sys +import unittest +import httplib + +from libcloud.compute.drivers.rimuhosting import RimuHostingNodeDriver + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +class RimuHostingTest(unittest.TestCase, TestCaseMixin): + def setUp(self): + RimuHostingNodeDriver.connectionCls.conn_classes = (None, + RimuHostingMockHttp) + self.driver = RimuHostingNodeDriver('foo') + + def test_list_nodes(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes),1) + node = nodes[0] + self.assertEqual(node.public_ip[0], "1.2.3.4") + self.assertEqual(node.public_ip[1], "1.2.3.5") + self.assertEqual(node.extra['order_oid'], 88833465) + self.assertEqual(node.id, "order-88833465-api-ivan-net-nz") + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes),1) + size = sizes[0] + self.assertEqual(size.ram,950) + self.assertEqual(size.disk,20) + self.assertEqual(size.bandwidth,75) + self.assertEqual(size.price,32.54) + + def test_list_images(self): + images = self.driver.list_images() + self.assertEqual(len(images),6) + image = images[0] + self.assertEqual(image.name,"Debian 5.0 (aka Lenny, RimuHosting"\ + " recommended distro)") + self.assertEqual(image.id, "lenny") + + def test_reboot_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.reboot_node(node) + + def test_destroy_node(self): + # Raises exception on failure + node = self.driver.list_nodes()[0] + self.driver.destroy_node(node) + + def test_create_node(self): + # Raises exception on failure + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + self.driver.create_node(name="api.ivan.net.nz", image=image, size=size) + +class RimuHostingMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('rimuhosting') + + def _r_orders(self,method,url,body,headers): + body = self.fixtures.load('r_orders.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_pricing_plans(self,method,url,body,headers): + body = self.fixtures.load('r_pricing_plans.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_distributions(self, method, url, body, headers): + body = self.fixtures.load('r_distributions.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_new_vps(self, method, url, body, headers): + body = self.fixtures.load('r_orders_new_vps.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers): + body = self.fixtures.load('r_orders_order_88833465_api_ivan_net_nz_vps.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state(self, method, + url, body, + headers): + body = self.fixtures.load('r_orders_order_88833465_api_ivan_net_nz_vps_running_state.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_slicehost.py b/trunk/test/compute/test_slicehost.py new file mode 100644 index 0000000000..64def13425 --- /dev/null +++ b/trunk/test/compute/test_slicehost.py @@ -0,0 +1,155 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from xml.etree import ElementTree as ET + +from libcloud.compute.drivers.slicehost import SlicehostNodeDriver as Slicehost +from libcloud.compute.types import NodeState, InvalidCredsError +from libcloud.compute.base import Node, NodeImage, NodeSize + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures +from test.secrets import SLICEHOST_PARAMS + +class SlicehostTest(unittest.TestCase, TestCaseMixin): + + def setUp(self): + + Slicehost.connectionCls.conn_classes = (None, SlicehostMockHttp) + SlicehostMockHttp.type = None + self.driver = Slicehost(*SLICEHOST_PARAMS) + + def test_list_nodes(self): + ret = self.driver.list_nodes() + self.assertEqual(len(ret), 1) + node = ret[0] + self.assertTrue('174.143.212.229' in node.public_ip) + self.assertTrue('10.176.164.199' in node.private_ip) + self.assertEqual(node.state, NodeState.PENDING) + + SlicehostMockHttp.type = 'UNAUTHORIZED' + try: + ret = self.driver.list_nodes() + except InvalidCredsError, e: + self.assertEqual(e.value, 'HTTP Basic: Access denied.') + else: + self.fail('test should have thrown') + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 7) + size = ret[0] + self.assertEqual(size.name, '256 slice') + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual(len(ret), 11) + image = ret[0] + self.assertEqual(image.name, 'CentOS 5.2') + self.assertEqual(image.id, '2') + + def test_reboot_node(self): + node = Node(id=1, name=None, state=None, public_ip=None, private_ip=None, + driver=self.driver) + + ret = node.reboot() + self.assertTrue(ret is True) + + ret = self.driver.reboot_node(node) + self.assertTrue(ret is True) + + SlicehostMockHttp.type = 'FORBIDDEN' + try: + ret = self.driver.reboot_node(node) + except Exception, e: + self.assertEqual(e.args[0], 'Permission denied') + else: + self.fail('test should have thrown') + + def test_destroy_node(self): + node = Node(id=1, name=None, state=None, public_ip=None, private_ip=None, + driver=self.driver) + + ret = node.destroy() + self.assertTrue(ret is True) + + ret = self.driver.destroy_node(node) + self.assertTrue(ret is True) + + def test_create_node(self): + image = NodeImage(id=11, name='ubuntu 8.10', driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='slicetest', image=image, size=size) + self.assertEqual(node.name, 'slicetest') + self.assertEqual(node.extra.get('password'), 'fooadfa1231') + +class SlicehostMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('slicehost') + + def _slices_xml(self, method, url, body, headers): + if method == 'POST': + tree = ET.XML(body) + name = tree.findtext('name') + image_id = int(tree.findtext('image-id')) + flavor_id = int(tree.findtext('flavor-id')) + + # TODO: would be awesome to get the slicehost api developers to fill in the + # the correct validation logic + if not (name and image_id and flavor_id) \ + or tree.tag != 'slice' \ + or not headers.has_key('Content-Type') \ + or headers['Content-Type'] != 'application/xml': + + err_body = self.fixtures.load('slices_error.xml') + return (httplib.UNPROCESSABLE_ENTITY, err_body, {}, '') + + body = self.fixtures.load('slices_post.xml') + return (httplib.CREATED, body, {}, '') + else: + body = self.fixtures.load('slices_get.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _slices_xml_UNAUTHORIZED(self, method, url, body, headers): + err_body = 'HTTP Basic: Access denied.' + return (httplib.UNAUTHORIZED, err_body, {}, + httplib.responses[httplib.UNAUTHORIZED]) + + def _flavors_xml(self, method, url, body, headers): + body = self.fixtures.load('flavors.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _images_xml(self, method, url, body, headers): + body = self.fixtures.load('images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _slices_1_reboot_xml(self, method, url, body, headers): + body = self.fixtures.load('slices_1_reboot.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _slices_1_reboot_xml_FORBIDDEN(self, method, url, body, headers): + body = self.fixtures.load('slices_1_reboot_forbidden.xml') + return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) + + def _slices_1_destroy_xml(self, method, url, body, headers): + body = '' + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_softlayer.py b/trunk/test/compute/test_softlayer.py new file mode 100644 index 0000000000..74b0204956 --- /dev/null +++ b/trunk/test/compute/test_softlayer.py @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import unittest +import sys + +from xml.etree import ElementTree as ET +import xmlrpclib + +from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer +from libcloud.compute.types import NodeState + +from test import MockHttp # pylint: disable-msg=E0611 +from test.file_fixtures import ComputeFileFixtures # pylint: disable-msg=E0611 +from test.secrets import SOFTLAYER_PARAMS + +class MockSoftLayerTransport(xmlrpclib.Transport): + + def request(self, host, handler, request_body, verbose=0): + self.verbose = 0 + method = ET.XML(request_body).find('methodName').text + mock = SoftLayerMockHttp(host, 80) + mock.request('POST', "%s/%s" % (handler, method)) + resp = mock.getresponse() + + if sys.version[0] == '2' and sys.version[2] == '7': + response = self.parse_response(resp) + else: + response = self.parse_response(resp.body) + return response + +class SoftLayerTests(unittest.TestCase): + + def setUp(self): + SoftLayer.connectionCls.proxyCls.transportCls = [ + MockSoftLayerTransport, MockSoftLayerTransport] + self.driver = SoftLayer(*SOFTLAYER_PARAMS) + + def test_list_nodes(self): + node = self.driver.list_nodes()[0] + self.assertEqual(node.name, 'test1') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.extra['password'], 'TEST') + + def test_list_locations(self): + locations = self.driver.list_locations() + seattle = (l for l in locations if l.name == 'sea01').next() + self.assertEqual(seattle.country, 'US') + self.assertEqual(seattle.id, '18171') + + def test_list_images(self): + images = self.driver.list_images() + image = images[0] + self.assertEqual(image.id, '1684') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + self.assertEqual(len(sizes), 2) + self.assertEqual(sizes[0].id, 'sl1') + +class SoftLayerMockHttp(MockHttp): + fixtures = ComputeFileFixtures('softlayer') + + def _xmlrpc_v3__SoftLayer_Account_getVirtualGuests( + self, method, url, body, headers): + + body = self.fixtures.load('v3_SoftLayer_Account_getVirtualGuests.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3__SoftLayer_Location_Datacenter_getDatacenters( + self, method, url, body, headers): + + body = self.fixtures.load( + 'v3_SoftLayer_Location_Datacenter_getDatacenters.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_ssh_client.py b/trunk/test/compute/test_ssh_client.py new file mode 100644 index 0000000000..921ba4c4ec --- /dev/null +++ b/trunk/test/compute/test_ssh_client.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import libcloud.compute.ssh + +from mock import Mock + + +class ParamikoSSHClientTests(unittest.TestCase): + + def test_either_key_or_password_must_be_provided(self): + libcloud.compute.ssh.paramiko = Mock() + client = libcloud.compute.ssh.ParamikoSSHClient(hostname='foo.bar.com') + + try: + client.connect() + except Exception, e: + self.assertTrue(str(e).find('must specify either password or') + != -1) + else: + self.fail('Exception was not thrown') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_vcloud.py b/trunk/test/compute/test_vcloud.py new file mode 100644 index 0000000000..362cfd823d --- /dev/null +++ b/trunk/test/compute/test_vcloud.py @@ -0,0 +1,142 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.drivers.vcloud import TerremarkDriver +from libcloud.compute.drivers.vcloud import VCloudNodeDriver +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState + +from test import MockHttp +from test.compute import TestCaseMixin +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import VCLOUD_PARAMS + +class TerremarkTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VCloudNodeDriver.connectionCls.host = "test" + VCloudNodeDriver.connectionCls.conn_classes = (None, TerremarkMockHttp) + TerremarkMockHttp.type = None + self.driver = TerremarkDriver(*VCLOUD_PARAMS) + + def test_list_images(self): + ret = self.driver.list_images() + self.assertEqual(ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5') + + def test_list_sizes(self): + ret = self.driver.list_sizes() + self.assertEqual(ret[0].ram, 512) + + def test_create_node(self): + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node( + name='testerpart2', + image=image, + size=size, + vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224', + network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725', + cpus=2, + ) + self.assertTrue(isinstance(node, Node)) + self.assertEqual(node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') + self.assertEqual(node.name, 'testerpart2') + + def test_list_nodes(self): + ret = self.driver.list_nodes() + node = ret[0] + self.assertEqual(node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031') + self.assertEqual(node.name, 'testerpart2') + self.assertEqual(node.state, NodeState.RUNNING) + self.assertEqual(node.public_ip, []) + self.assertEqual(node.private_ip, ['10.112.78.69']) + + def test_reboot_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.reboot_node(node) + self.assertTrue(ret) + + def test_destroy_node(self): + node = self.driver.list_nodes()[0] + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + + +class TerremarkMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('terremark') + + def _api_v0_8_login(self, method, url, body, headers): + headers['set-cookie'] = 'vcloud-token=testtoken' + body = self.fixtures.load('api_v0_8_login.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_org_240(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_org_240.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vdc_224.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224_catalog(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_catalogItem_5(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_catalogItem_5.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vdc_224_action_instantiateVAppTemplate.xml') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_task_10496(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_task_10496.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_power_action_powerOn.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('api_v0_8_vapp_14031_get.xml') + elif method == 'DELETE': + body = '' + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_vapp_14031_power_action_poweroff.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _api_v0_8_task_11001(self, method, url, body, headers): + body = self.fixtures.load('api_v0_8_task_11001.xml') + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_voxel.py b/trunk/test/compute/test_voxel.py new file mode 100644 index 0000000000..79935f06f3 --- /dev/null +++ b/trunk/test/compute/test_voxel.py @@ -0,0 +1,161 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import httplib + +from libcloud.compute.base import Node, NodeSize, NodeImage, NodeLocation +from libcloud.compute.drivers.voxel import VoxelNodeDriver as Voxel +from libcloud.compute.types import InvalidCredsError + +from test import MockHttp +from test.file_fixtures import ComputeFileFixtures + +from test.secrets import VOXEL_PARAMS + +class VoxelTest(unittest.TestCase): + + def setUp(self): + + Voxel.connectionCls.conn_classes = (None, VoxelMockHttp) + VoxelMockHttp.type = None + self.driver = Voxel(*VOXEL_PARAMS) + + def test_auth_failed(self): + VoxelMockHttp.type = 'UNAUTHORIZED' + try: + self.driver.list_nodes() + except Exception, e: + self.assertTrue(isinstance(e, InvalidCredsError)) + else: + self.fail('test should have thrown') + + def test_response_failure(self): + VoxelMockHttp.type = 'FAILURE' + + try: + self.driver.list_nodes() + except Exception: + pass + else: + self.fail('Invalid response, but exception was not thrown') + + def test_list_nodes(self): + VoxelMockHttp.type = 'LIST_NODES' + nodes = self.driver.list_nodes() + + self.assertEqual(len(nodes), 1) + self.assertEqual(nodes[0].name, 'www.voxel.net') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 13) + + def test_list_images(self): + VoxelMockHttp.type = 'LIST_IMAGES' + images = self.driver.list_images() + + self.assertEqual(len(images), 1) + + def test_list_locations(self): + VoxelMockHttp.type = 'LIST_LOCATIONS' + locations = self.driver.list_locations() + + self.assertEqual(len(locations), 2) + self.assertEqual(locations[0].name, 'Amsterdam') + + def test_create_node_invalid_disk_size(self): + image = NodeImage(id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) + location = NodeLocation(id=1, name='Europe', country='England', + driver=self.driver) + + try: + self.driver.create_node(name='foo', image=image, size=size, + location=location) + except ValueError: + pass + else: + self.fail('Invalid disk size provided but an exception was not' + ' thrown') + + def test_create_node(self): + VoxelMockHttp.type = 'CREATE_NODE' + image = NodeImage(id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize(1, '256 slice', 1024, 500, None, None, driver=self.driver) + location = NodeLocation(id=1, name='Europe', country='England', + driver=self.driver) + + node = self.driver.create_node(name='foo', image=image, size=size, + location=location) + self.assertEqual(node.id, '1234') + + node = self.driver.create_node(name='foo', image=image, size=size, + location=location, voxel_access=True) + self.assertEqual(node.id, '1234') + + def test_reboot_node(self): + VoxelMockHttp.type = 'REBOOT_NODE' + node = Node(id=72258, name=None, state=None, public_ip=None, private_ip=None, + driver=self.driver) + + self.assertTrue(node.reboot()) + + def test_destroy_node(self): + VoxelMockHttp.type = 'DESTROY_NODE' + node = Node(id=72258, name=None, state=None, public_ip=None, private_ip=None, + driver=self.driver) + + self.assertTrue(node.destroy()) + +class VoxelMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('voxel') + + def _UNAUTHORIZED(self, method, url, body, headers): + body = self.fixtures.load('unauthorized.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _FAILURE(self, method, url, body, headers): + body = self.fixtures.load('failure.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_NODES(self, method, url, body, headers): + body = self.fixtures.load('nodes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_IMAGES(self, method, url, body, headers): + body = self.fixtures.load('images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _LIST_LOCATIONS(self, method, url, body, headers): + body = self.fixtures.load('locations.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _CREATE_NODE(self, method, url, body, headers): + body = self.fixtures.load('create_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _REBOOT_NODE(self, method, url, body, headers): + body = self.fixtures.load('success.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _DESTROY_NODE(self, method, url, body, headers): + body = self.fixtures.load('success.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/compute/test_vpsnet.py b/trunk/test/compute/test_vpsnet.py new file mode 100644 index 0000000000..48effc3824 --- /dev/null +++ b/trunk/test/compute/test_vpsnet.py @@ -0,0 +1,209 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest +import exceptions +import httplib + +from libcloud.compute.drivers.vpsnet import VPSNetNodeDriver +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState + +from test import MockHttp +from test.compute import TestCaseMixin + +from test.secrets import VPSNET_PARAMS + +class VPSNetTests(unittest.TestCase, TestCaseMixin): + + def setUp(self): + VPSNetNodeDriver.connectionCls.conn_classes = (None, VPSNetMockHttp) + self.driver = VPSNetNodeDriver(*VPSNET_PARAMS) + + def test_create_node(self): + VPSNetMockHttp.type = 'create' + image = self.driver.list_images()[0] + size = self.driver.list_sizes()[0] + node = self.driver.create_node('foo', image, size) + self.assertEqual(node.name, 'foo') + + def test_list_nodes(self): + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + self.assertEqual(node.id, '1384') + self.assertEqual(node.state, NodeState.RUNNING) + + def test_reboot_node(self): + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + + VPSNetMockHttp.type = 'reboot' + ret = self.driver.reboot_node(node) + self.assertEqual(ret, True) + + def test_destroy_node(self): + VPSNetMockHttp.type = 'delete' + node = Node('2222', None, None, None, None, self.driver) + ret = self.driver.destroy_node(node) + self.assertTrue(ret) + VPSNetMockHttp.type = 'delete_fail' + node = Node('2223', None, None, None, None, self.driver) + self.assertRaises(exceptions.Exception, self.driver.destroy_node, node) + + def test_list_images(self): + VPSNetMockHttp.type = 'templates' + ret = self.driver.list_images() + self.assertEqual(ret[0].id, '9') + self.assertEqual(ret[-1].id, '160') + + def test_list_sizes(self): + VPSNetMockHttp.type = 'sizes' + ret = self.driver.list_sizes() + self.assertEqual(len(ret), 1) + self.assertEqual(ret[0].id, '1') + self.assertEqual(ret[0].name, '1 Node') + + def test_destroy_node_response(self): + # should return a node object + node = Node('2222', None, None, None, None, self.driver) + VPSNetMockHttp.type = 'delete' + ret = self.driver.destroy_node(node) + self.assertTrue(isinstance(ret, bool)) + + def test_reboot_node_response(self): + # should return a node object + VPSNetMockHttp.type = 'virtual_machines' + node = self.driver.list_nodes()[0] + VPSNetMockHttp.type = 'reboot' + ret = self.driver.reboot_node(node) + self.assertTrue(isinstance(ret, bool)) + + + +class VPSNetMockHttp(MockHttp): + + + def _nodes_api10json_sizes(self, method, url, body, headers): + body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _nodes_api10json_create(self, method, url, body, headers): + body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}}, + {"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_2222_api10json_delete_fail(self, method, url, body, headers): + return (httplib.FORBIDDEN, '', {}, httplib.responses[httplib.FORBIDDEN]) + + def _virtual_machines_2222_api10json_delete(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _virtual_machines_1384_reboot_api10json_reboot(self, method, url, body, headers): + body = """{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "foo", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_api10json_create(self, method, url, body, headers): + body = """{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "foo", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _virtual_machines_api10json_virtual_machines(self, method, url, body, headers): + body = """ [{ + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1384, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "web01", + "consumer_id": 0, + "backups_enabled": false, + "password": "a8hjsjnbs91", + "label": "Web Server 01", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }, + { + "virtual_machine": + { + "running": true, + "updated_at": "2009-05-15T06:55:02-04:00", + "power_action_pending": false, + "system_template_id": 41, + "id": 1385, + "cloud_id": 3, + "domain_name": "demodomain.com", + "hostname": "mysql01", + "consumer_id": 0, + "backups_enabled": false, + "password": "dsi8h38hd2s", + "label": "MySQL Server 01", + "slices_count": null, + "created_at": "2009-04-16T08:17:39-04:00" + } + }]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _available_clouds_api10json_templates(self, method, url, body, headers): + body = """[{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"},{"id":10,"label":"CentOS 5.2 x64"},{"id":11,"label":"Gentoo 2008.0 x64"},{"id":18,"label":"Ubuntu 8.04 x64 LAMP"},{"id":19,"label":"Ubuntu 8.04 x64 MySQL"},{"id":20,"label":"Ubuntu 8.04 x64 Postfix"},{"id":21,"label":"Ubuntu 8.04 x64 Apache"},{"id":22,"label":"CentOS 5.2 x64 MySQL"},{"id":23,"label":"CentOS 5.2 x64 LAMP"},{"id":24,"label":"CentOS 5.2 x64 HAProxy"},{"id":25,"label":"CentOS 5.2 x64 Postfix"},{"id":26,"label":"CentOS 5.2 x64 Varnish"},{"id":27,"label":"CentOS 5.2 x64 Shoutcast"},{"id":28,"label":"CentOS 5.2 x64 Apache"},{"id":40,"label":"cPanel"},{"id":42,"label":"Debian 5.0 (Lenny) x64"},{"id":58,"label":"Django on Ubuntu 8.04 (x86)"},{"id":59,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":60,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":61,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":62,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":63,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":64,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":65,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":66,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":67,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":68,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":69,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":70,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":71,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":72,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":128,"label":"CentOS Optimised"},{"id":129,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":130,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":131,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":132,"label":"Debian Optimised"},{"id":133,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":134,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":135,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":136,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":137,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":138,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":139,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":140,"label":"Optimised Debian + Apache + MySQL + PHP + Magento"},{"id":141,"label":"Optimised Debian + NGINX + MySQL + PHP + Magento"},{"id":142,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":2,"label":"USA VPS Cloud"}},{"cloud":{"system_templates":[{"id":15,"label":"Ubuntu 8.04 x64"},{"id":16,"label":"CentOS 5.2 x64"},{"id":17,"label":"Gentoo 2008.0 x64"},{"id":29,"label":"Ubuntu 8.04 x64 LAMP"},{"id":30,"label":"Ubuntu 8.04 x64 MySQL"},{"id":31,"label":"Ubuntu 8.04 x64 Postfix"},{"id":32,"label":"Ubuntu 8.04 x64 Apache"},{"id":33,"label":"CentOS 5.2 x64 MySQL"},{"id":34,"label":"CentOS 5.2 x64 LAMP"},{"id":35,"label":"CentOS 5.2 x64 HAProxy"},{"id":36,"label":"CentOS 5.2 x64 Postfix"},{"id":37,"label":"CentOS 5.2 x64 Varnish"},{"id":38,"label":"CentOS 5.2 x64 Shoutcast"},{"id":39,"label":"CentOS 5.2 x64 Apache"},{"id":41,"label":"cPanel"},{"id":43,"label":"Debian 5.0 (Lenny) x64"},{"id":44,"label":"Django on Ubuntu 8.04 (x86)"},{"id":45,"label":"Drupal 5 on Ubuntu 8.04 (x86)"},{"id":46,"label":"Drupal 6 on Ubuntu 8.04 (x86)"},{"id":47,"label":"Google App Engine on Ubuntu 8.04 (x86)"},{"id":48,"label":"LAMP on Ubuntu 8.04 (x86)"},{"id":49,"label":"LAPP on Ubuntu 8.04 (x86)"},{"id":50,"label":"MediaWiki on Ubuntu 8.04 (x86)"},{"id":51,"label":"MySQL on Ubuntu 8.04 (x86)"},{"id":52,"label":"phpBB on Ubuntu 8.04 (x86)"},{"id":53,"label":"PostgreSQL on Ubuntu 8.04 (x86)"},{"id":54,"label":"Rails on Ubuntu 8.04 (x86)"},{"id":55,"label":"Tomcat on Ubuntu 8.04 (x86)"},{"id":56,"label":"Wordpress on Ubuntu 8.04 (x86)"},{"id":57,"label":"Joomla on Ubuntu 8.04 (x86)"},{"id":73,"label":"Ubuntu 8.04 Default Install (turnkey)"},{"id":148,"label":"CentOS Optimised"},{"id":149,"label":"Optimised CentOS + Apache + MySQL + PHP"},{"id":150,"label":"Optimised CentOS + Apache + MySQL + Ruby"},{"id":151,"label":"Optimised CentOS + Apache + MySQL + Ruby + PHP"},{"id":152,"label":"Debian Optimised"},{"id":153,"label":"Optimised Debian + Apache + MySQL + PHP"},{"id":154,"label":"Optimised Debian + NGINX + MySQL + PHP"},{"id":155,"label":"Optimised Debian + Lighttpd + MySQL + PHP"},{"id":156,"label":"Optimised Debian + Apache + MySQL + Ruby + PHP"},{"id":157,"label":"Optimised Debian + Apache + MySQL + Ruby"},{"id":158,"label":"Optimised Debian + NGINX + MySQL + Ruby + PHP"},{"id":159,"label":"Optimised Debian + NGINX + MySQL + Ruby"},{"id":160,"label":"Optimised Debian + Lighttpd + MySQL + PHP + Wordpress"}],"id":3,"label":"UK VPS Cloud"}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _available_clouds_api10json_create(self, method, url, body, headers): + body = """[{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"}],"id":2,"label":"USA VPS Cloud"}}]""" + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/dns/__init__.py b/trunk/test/dns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/trunk/test/dns/fixtures/linode/create_domain.json b/trunk/test/dns/fixtures/linode/create_domain.json new file mode 100644 index 0000000000..566cfa2f09 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/create_domain.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.create", + "DATA":{ + "DomainID":5123 + } +} diff --git a/trunk/test/dns/fixtures/linode/create_domain_validation_error.json b/trunk/test/dns/fixtures/linode/create_domain_validation_error.json new file mode 100644 index 0000000000..5a9be30ed4 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/create_domain_validation_error.json @@ -0,0 +1 @@ +{"ERRORARRAY":[{"ERRORCODE":2,"ERRORMESSAGE":"The domain 'test.foo3455.net' already exists in our database. Please open a ticket if you think this is in error."},{"ERRORCODE":2,"ERRORMESSAGE":"Invalid domain type."}],"DATA":{},"ACTION":"domain.create"} diff --git a/trunk/test/dns/fixtures/linode/create_resource.json b/trunk/test/dns/fixtures/linode/create_resource.json new file mode 100644 index 0000000000..78f3e691e1 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/create_resource.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.resource.create", + "DATA":{ + "ResourceID":28537 + } +} diff --git a/trunk/test/dns/fixtures/linode/delete_domain.json b/trunk/test/dns/fixtures/linode/delete_domain.json new file mode 100644 index 0000000000..4c580d5d76 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/delete_domain.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.delete", + "DATA":{ + "DomainID":5123 + } +} diff --git a/trunk/test/dns/fixtures/linode/delete_domain_does_not_exist.json b/trunk/test/dns/fixtures/linode/delete_domain_does_not_exist.json new file mode 100644 index 0000000000..5c1d4023d1 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/delete_domain_does_not_exist.json @@ -0,0 +1 @@ +{"ERRORARRAY":[{"ERRORCODE":5,"ERRORMESSAGE":"Object not found"}],"DATA":{},"ACTION":"domain.delete"} diff --git a/trunk/test/dns/fixtures/linode/delete_resource.json b/trunk/test/dns/fixtures/linode/delete_resource.json new file mode 100644 index 0000000000..0f73816e74 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/delete_resource.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.delete", + "DATA":{ + "ResourceID":5123 + } +} diff --git a/trunk/test/dns/fixtures/linode/delete_resource_does_not_exist.json b/trunk/test/dns/fixtures/linode/delete_resource_does_not_exist.json new file mode 100644 index 0000000000..3b90911763 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/delete_resource_does_not_exist.json @@ -0,0 +1 @@ +{"ERRORARRAY":[{"ERRORCODE":5,"ERRORMESSAGE":"Object not found"}],"DATA":{},"ACTION":"domain.resource.delete"} diff --git a/trunk/test/dns/fixtures/linode/domain_list.json b/trunk/test/dns/fixtures/linode/domain_list.json new file mode 100644 index 0000000000..437d32b8e5 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/domain_list.json @@ -0,0 +1,32 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.list", + "DATA":[ + { + "DOMAINID":5093, + "DESCRIPTION":"", + "TYPE":"master", + "STATUS":1, + "SOA_EMAIL":"dns@example.com", + "DOMAIN":"linode.com", + "RETRY_SEC":0, + "MASTER_IPS":"", + "EXPIRE_SEC":0, + "REFRESH_SEC":0, + "TTL_SEC":0 + }, + { + "DOMAINID":5125, + "DESCRIPTION":"", + "TYPE":"slave", + "STATUS":1, + "SOA_EMAIL":"", + "DOMAIN":"nodefs.com", + "RETRY_SEC":0, + "MASTER_IPS":"1.3.5.7;2.4.6.8;", + "EXPIRE_SEC":0, + "REFRESH_SEC":0, + "TTL_SEC":0 + } + ] +} diff --git a/trunk/test/dns/fixtures/linode/get_record.json b/trunk/test/dns/fixtures/linode/get_record.json new file mode 100644 index 0000000000..ce050e3e42 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/get_record.json @@ -0,0 +1,18 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.resource.list", + "DATA":[ + { + "PROTOCOL":"", + "TTL_SEC":0, + "PRIORITY":0, + "TYPE":"A", + "TARGET":"75.127.96.245", + "WEIGHT":0, + "RESOURCEID":28536, + "PORT":0, + "DOMAINID":5093, + "NAME":"www" + } + ] +} diff --git a/trunk/test/dns/fixtures/linode/get_record_does_not_exist.json b/trunk/test/dns/fixtures/linode/get_record_does_not_exist.json new file mode 100644 index 0000000000..33438c32c2 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/get_record_does_not_exist.json @@ -0,0 +1,5 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.resource.list", + "DATA":[] +} diff --git a/trunk/test/dns/fixtures/linode/get_zone.json b/trunk/test/dns/fixtures/linode/get_zone.json new file mode 100644 index 0000000000..61215ff3ba --- /dev/null +++ b/trunk/test/dns/fixtures/linode/get_zone.json @@ -0,0 +1,19 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.list", + "DATA":[ + { + "DOMAINID":5093, + "DESCRIPTION":"", + "TYPE":"master", + "STATUS":1, + "SOA_EMAIL":"dns@example.com", + "DOMAIN":"linode.com", + "RETRY_SEC":0, + "MASTER_IPS":"", + "EXPIRE_SEC":0, + "REFRESH_SEC":0, + "TTL_SEC":0 + } + ] +} diff --git a/trunk/test/dns/fixtures/linode/get_zone_does_not_exist.json b/trunk/test/dns/fixtures/linode/get_zone_does_not_exist.json new file mode 100644 index 0000000000..d6424b5053 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/get_zone_does_not_exist.json @@ -0,0 +1,5 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.list", + "DATA":[] +} diff --git a/trunk/test/dns/fixtures/linode/resource_list.json b/trunk/test/dns/fixtures/linode/resource_list.json new file mode 100644 index 0000000000..a6bf13bf5e --- /dev/null +++ b/trunk/test/dns/fixtures/linode/resource_list.json @@ -0,0 +1,30 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.resource.list", + "DATA":[ + { + "PROTOCOL":"", + "TTL_SEC":0, + "PRIORITY":0, + "TYPE":"A", + "TARGET":"75.127.96.245", + "WEIGHT":0, + "RESOURCEID":28536, + "PORT":0, + "DOMAINID":5093, + "NAME":"www" + }, + { + "PROTOCOL":"", + "TTL_SEC":0, + "PRIORITY":0, + "TYPE":"A", + "TARGET":"75.127.96.245", + "WEIGHT":0, + "RESOURCEID":28537, + "PORT":0, + "DOMAINID":5093, + "NAME":"mail" + } + ] +} diff --git a/trunk/test/dns/fixtures/linode/resource_list_does_not_exist.json b/trunk/test/dns/fixtures/linode/resource_list_does_not_exist.json new file mode 100644 index 0000000000..fe11275546 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/resource_list_does_not_exist.json @@ -0,0 +1 @@ +{"ERRORARRAY":[{"ERRORCODE":5,"ERRORMESSAGE":"Object not found"}],"DATA":{},"ACTION":"domain.resource.list"} diff --git a/trunk/test/dns/fixtures/linode/update_domain.json b/trunk/test/dns/fixtures/linode/update_domain.json new file mode 100644 index 0000000000..a66b227d2b --- /dev/null +++ b/trunk/test/dns/fixtures/linode/update_domain.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.update", + "DATA":{ + "DomainID":5093 + } +} diff --git a/trunk/test/dns/fixtures/linode/update_resource.json b/trunk/test/dns/fixtures/linode/update_resource.json new file mode 100644 index 0000000000..bcf0a85af0 --- /dev/null +++ b/trunk/test/dns/fixtures/linode/update_resource.json @@ -0,0 +1,7 @@ +{ + "ERRORARRAY":[], + "ACTION":"domain.resource.update", + "DATA":{ + "ResourceID":28537 + } +} diff --git a/trunk/test/dns/fixtures/rackspace/auth_1_1.json b/trunk/test/dns/fixtures/rackspace/auth_1_1.json new file mode 100644 index 0000000000..1b5f6c1a83 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/auth_1_1.json @@ -0,0 +1,31 @@ +{ + "auth":{ + "token":{ + "id":"fooo-bar-fooo-bar-fooo-bar", + "expires":"2011-10-29T17:39:28.000-05:00" + }, + "serviceCatalog":{ + "cloudFilesCDN":[ + { + "region":"ORD", + "publicURL":"https:\/\/cdn2.clouddrive.com\/v1\/MossoCloudFS_f66473fb-2e1e-4a44-barr-foooooo", + "v1Default":true + } + ], + "cloudFiles":[ + { + "region":"ORD", + "publicURL":"https:\/\/storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr", + "v1Default":true, + "internalURL":"https:\/\/snet-storage101.ord1.clouddrive.com\/v1\/MossoCloudFS_fbarr-foooo-barr" + } + ], + "cloudServers":[ + { + "publicURL":"https:\/\/servers.api.rackspacecloud.com\/v1.0\/11111", + "v1Default":true + } + ] + } + } +} diff --git a/trunk/test/dns/fixtures/rackspace/create_record_success.json b/trunk/test/dns/fixtures/rackspace/create_record_success.json new file mode 100644 index 0000000000..7d09b7e48c --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/create_record_success.json @@ -0,0 +1,21 @@ +{ + "request":"{\"records\": [{\"data\": \"127.1.1.1\", \"type\": \"A\", \"name\": \"www.foo4.bar.com\"}]}", + "response":{ + "records":[ + { + "name":"www.foo4.bar.com", + "id":"A-7423317", + "type":"A", + "data":"127.1.1.1", + "updated":"2011-10-29T20:50:41.000+0000", + "ttl":3600, + "created":"2011-10-29T20:50:41.000+0000" + } + ] + }, + "status":"COMPLETED", + "verb":"POST", + "jobId":"586605c8-5739-43fb-8939-f3a2c4c0e99c", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/586605c8-5739-43fb-8939-f3a2c4c0e99c", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records" +} diff --git a/trunk/test/dns/fixtures/rackspace/create_zone_success.json b/trunk/test/dns/fixtures/rackspace/create_zone_success.json new file mode 100644 index 0000000000..b53a7af72f --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/create_zone_success.json @@ -0,0 +1,29 @@ +{ + "request":"{\"domains\": [{\"recordsList\": {\"records\": []}, \"emailAddress\": \"test@test.com\", \"name\": \"bar.foo1.com\"}]}", + "response":{ + "domains":[ + { + "name":"bar.foo1.com", + "id":2946173, + "accountId":11111, + "updated":"2011-10-29T20:28:59.000+0000", + "ttl":3600, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T20:28:59.000+0000" + } + ] + }, + "status":"COMPLETED", + "verb":"POST", + "jobId":"288795f9-e74d-48be-880b-a9e36e0de61e", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/288795f9-e74d-48be-880b-a9e36e0de61e", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains" +} diff --git a/trunk/test/dns/fixtures/rackspace/create_zone_validation_error.json b/trunk/test/dns/fixtures/rackspace/create_zone_validation_error.json new file mode 100644 index 0000000000..f604873ba0 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/create_zone_validation_error.json @@ -0,0 +1 @@ +{"validationErrors":{"messages":["Domain TTL is required and must be greater than or equal to 300"]},"code":400} diff --git a/trunk/test/dns/fixtures/rackspace/delete_record_success.json b/trunk/test/dns/fixtures/rackspace/delete_record_success.json new file mode 100644 index 0000000000..04b15833ba --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/delete_record_success.json @@ -0,0 +1,8 @@ +{ + "status":"COMPLETED", + "verb":"DELETE", + "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181/records/2346" +} + diff --git a/trunk/test/dns/fixtures/rackspace/delete_zone_success.json b/trunk/test/dns/fixtures/rackspace/delete_zone_success.json new file mode 100644 index 0000000000..4ea7528e0a --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/delete_zone_success.json @@ -0,0 +1,7 @@ +{ + "status":"COMPLETED", + "verb":"DELETE", + "jobId":"0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/0b40cd14-2e5d-490f-bb6e-fdc65d1118a9", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946181" +} diff --git a/trunk/test/dns/fixtures/rackspace/does_not_exist.json b/trunk/test/dns/fixtures/rackspace/does_not_exist.json new file mode 100644 index 0000000000..e9ec9e21c2 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/does_not_exist.json @@ -0,0 +1 @@ +{"message":"Object not Found.","code":404,"details":""} diff --git a/trunk/test/dns/fixtures/rackspace/get_record_success.json b/trunk/test/dns/fixtures/rackspace/get_record_success.json new file mode 100644 index 0000000000..d2d2df7d66 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/get_record_success.json @@ -0,0 +1,10 @@ +{ + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" +} diff --git a/trunk/test/dns/fixtures/rackspace/get_zone_success.json b/trunk/test/dns/fixtures/rackspace/get_zone_success.json new file mode 100644 index 0000000000..5f0a6ce8cd --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/get_zone_success.json @@ -0,0 +1,51 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[ + { + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717885", + "type":"NS", + "data":"dns1.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717886", + "type":"NS", + "data":"dns2.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + } + ], + "totalEntries":3 + }, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff --git a/trunk/test/dns/fixtures/rackspace/list_records_no_results.json b/trunk/test/dns/fixtures/rackspace/list_records_no_results.json new file mode 100644 index 0000000000..dc64965aab --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/list_records_no_results.json @@ -0,0 +1,22 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[], + "totalEntries":0 + }, + "emailAddress":"kami@kami.si", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff --git a/trunk/test/dns/fixtures/rackspace/list_records_success.json b/trunk/test/dns/fixtures/rackspace/list_records_success.json new file mode 100644 index 0000000000..5f0a6ce8cd --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/list_records_success.json @@ -0,0 +1,51 @@ +{ + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":855, + "recordsList":{ + "records":[ + { + "name":"test3.foo4.bar.com", + "id":"A-7423034", + "type":"A", + "comment":"lulz", + "data":"127.7.7.7", + "updated":"2011-10-29T18:42:28.000+0000", + "ttl":777, + "created":"2011-10-29T15:29:29.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717885", + "type":"NS", + "data":"dns1.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo4.bar.com", + "id":"NS-6717886", + "type":"NS", + "data":"dns2.stabletransit.com", + "updated":"2011-10-29T14:47:09.000+0000", + "ttl":3600, + "created":"2011-10-29T14:47:09.000+0000" + } + ], + "totalEntries":3 + }, + "emailAddress":"test@test.com", + "nameservers":[ + { + "name":"ns.rackspace.com" + }, + { + "name":"ns2.rackspace.com" + } + ], + "created":"2011-10-29T14:47:09.000+0000" +} diff --git a/trunk/test/dns/fixtures/rackspace/list_zones_no_results.json b/trunk/test/dns/fixtures/rackspace/list_zones_no_results.json new file mode 100644 index 0000000000..86e3bc37e8 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/list_zones_no_results.json @@ -0,0 +1,5 @@ +{ + "domains":[], + "totalEntries":0 +} + diff --git a/trunk/test/dns/fixtures/rackspace/list_zones_success.json b/trunk/test/dns/fixtures/rackspace/list_zones_success.json new file mode 100644 index 0000000000..b1f54c0ca6 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/list_zones_success.json @@ -0,0 +1,53 @@ +{ + "domains":[ + { + "name":"foo4.bar.com", + "id":2946063, + "comment":"wazaaa", + "accountId":11111, + "updated":"2011-10-29T18:42:28.000+0000", + "created":"2011-10-29T14:47:09.000+0000" + }, + { + "name":"foo5.bar.com", + "id":2946065, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:48:39.000+0000", + "created":"2011-10-29T14:48:39.000+0000" + }, + { + "name":"foo6.bar.com", + "id":2946066, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:48:59.000+0000", + "created":"2011-10-29T14:48:58.000+0000" + }, + { + "name":"foo7.bar.com", + "id":2946068, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:49:14.000+0000", + "created":"2011-10-29T14:49:13.000+0000" + }, + { + "name":"foo8.bar.com", + "id":2946069, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:49:44.000+0000", + "created":"2011-10-29T14:49:43.000+0000" + }, + { + "name":"foo9.bar.com", + "id":2946071, + "comment":"fuu", + "accountId":11111, + "updated":"2011-10-29T14:54:45.000+0000", + "created":"2011-10-29T14:54:45.000+0000" + } + ], + "totalEntries":6 +} diff --git a/trunk/test/dns/fixtures/rackspace/unauthorized.json b/trunk/test/dns/fixtures/rackspace/unauthorized.json new file mode 100644 index 0000000000..2a6fac24c0 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/unauthorized.json @@ -0,0 +1 @@ +{"unauthorized":{"message":"Username or api key is invalid","code":401}} diff --git a/trunk/test/dns/fixtures/rackspace/update_record_success.json b/trunk/test/dns/fixtures/rackspace/update_record_success.json new file mode 100644 index 0000000000..fc6b975c2c --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/update_record_success.json @@ -0,0 +1,8 @@ +{ + "request":"{\"comment\": \"lulz\", \"data\": \"127.3.3.3\", \"name\": \"www.bar.foo1.com\", \"ttl\": 777}", + "status":"COMPLETED", + "verb":"PUT", + "jobId":"251c0d0c-95bc-4e09-b99f-4b8748b66246", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/546514/status/251c0d0c-95bc-4e09-b99f-4b8748b66246", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/546514/domains/2946173/records/A-7423317" +} diff --git a/trunk/test/dns/fixtures/rackspace/update_zone_success.json b/trunk/test/dns/fixtures/rackspace/update_zone_success.json new file mode 100644 index 0000000000..1c9d8e08d8 --- /dev/null +++ b/trunk/test/dns/fixtures/rackspace/update_zone_success.json @@ -0,0 +1,8 @@ +{ + "request":"{}", + "status":"COMPLETED", + "verb":"PUT", + "jobId":"116a8f17-38ac-4862-827c-506cd04800d5", + "callbackUrl":"https://dns.api.rackspacecloud.com/v1.0/11111/status/116a8f17-38ac-4862-827c-506cd04800d5", + "requestUrl":"http://dns.api.rackspacecloud.com/v1.0/11111/domains/2946173" +} diff --git a/trunk/test/dns/fixtures/zerigo/create_record.xml b/trunk/test/dns/fixtures/zerigo/create_record.xml new file mode 100644 index 0000000000..2a44e00e22 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/create_record.xml @@ -0,0 +1,13 @@ + + 2008-12-07T02:51:13Z + 127.0.0.1 + www.example.com + A + www + 23456780 + + + + 2008-12-07T02:51:13Z + 12345678 + diff --git a/trunk/test/dns/fixtures/zerigo/create_zone.xml b/trunk/test/dns/fixtures/zerigo/create_zone.xml new file mode 100644 index 0000000000..a48156c667 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/create_zone.xml @@ -0,0 +1,18 @@ + + 2008-12-07T02:40:02Z + ns1.example.com,ns2.example.com + true + 600 + foo.bar.com + dnsadmin@example.com + 12345679 + + + pri_sec + + + + 2008-12-07T02:40:02Z + 0 + + diff --git a/trunk/test/dns/fixtures/zerigo/create_zone_validation_error.xml b/trunk/test/dns/fixtures/zerigo/create_zone_validation_error.xml new file mode 100644 index 0000000000..664b9719ed --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/create_zone_validation_error.xml @@ -0,0 +1,4 @@ + + Ns type is not included in the list + Default ttl must be greater than or equal to 60 + diff --git a/trunk/test/dns/fixtures/zerigo/get_record.xml b/trunk/test/dns/fixtures/zerigo/get_record.xml new file mode 100644 index 0000000000..31619a8935 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/get_record.xml @@ -0,0 +1,13 @@ + + 2008-12-07T02:51:13Z + 172.16.16.1 + example.com + A + www + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + diff --git a/trunk/test/dns/fixtures/zerigo/get_zone.xml b/trunk/test/dns/fixtures/zerigo/get_zone.xml new file mode 100644 index 0000000000..f94522c457 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/get_zone.xml @@ -0,0 +1,32 @@ + + 2008-12-07T02:40:02Z + ns1.example.com,ns2.example.com + true + 600 + example.com + dnsadmin@example.com + 12345678 + + + pri_sec + + + one two + 2008-12-07T02:40:02Z + 1 + + + 2008-12-07T02:51:13Z + 172.16.16.1 + example.com + A + + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + + + diff --git a/trunk/test/dns/fixtures/zerigo/list_records.xml b/trunk/test/dns/fixtures/zerigo/list_records.xml new file mode 100644 index 0000000000..d2e95309b7 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/list_records.xml @@ -0,0 +1,15 @@ + + + 2008-12-07T02:51:13Z + 172.16.16.1 + www.example.com + A + www + 23456789 + + + + 2008-12-07T02:51:13Z + 12345678 + + diff --git a/trunk/test/dns/fixtures/zerigo/list_records_no_results.xml b/trunk/test/dns/fixtures/zerigo/list_records_no_results.xml new file mode 100644 index 0000000000..7020c67a6e --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/list_records_no_results.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/dns/fixtures/zerigo/list_zones.xml b/trunk/test/dns/fixtures/zerigo/list_zones.xml new file mode 100644 index 0000000000..3870926e05 --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/list_zones.xml @@ -0,0 +1,17 @@ + + + 2008-12-07T02:40:02Z + + false + 600 + example.com + + 12345678 + test foo bar + + pri_sec + + + 2008-12-07T02:40:02Z + + diff --git a/trunk/test/dns/fixtures/zerigo/list_zones_no_results.xml b/trunk/test/dns/fixtures/zerigo/list_zones_no_results.xml new file mode 100644 index 0000000000..0572fec45c --- /dev/null +++ b/trunk/test/dns/fixtures/zerigo/list_zones_no_results.xml @@ -0,0 +1 @@ + diff --git a/trunk/test/dns/test_linode.py b/trunk/test/dns/test_linode.py new file mode 100644 index 0000000000..8279c695ad --- /dev/null +++ b/trunk/test/dns/test_linode.py @@ -0,0 +1,324 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import httplib +import unittest + +from libcloud.common.linode import LinodeException +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.linode import LinodeDNSDriver + +from test import MockHttp +from test.file_fixtures import DNSFileFixtures +from test.secrets import DNS_PARAMS_LINODE + + +class LinodeTests(unittest.TestCase): + def setUp(self): + LinodeDNSDriver.connectionCls.conn_classes = ( + None, LinodeMockHttp) + LinodeMockHttp.use_param = 'api_action' + LinodeMockHttp.type = None + self.driver = LinodeDNSDriver(*DNS_PARAMS_LINODE) + + def assertHasKeys(self, dictionary, keys): + for key in keys: + self.assertTrue(key in dictionary, 'key "%s" not in dictionary' % + (key)) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 5) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 2) + + zone = zones[0] + self.assertEqual(zone.id, '5093') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 'linode.com') + self.assertEqual(zone.ttl, None) + self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status']) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 2) + + record = records[0] + self.assertEqual(record.id, '28536') + self.assertEqual(record.name, 'www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '75.127.96.245') + self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port', + 'weight']) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_success(self): + LinodeMockHttp.type = 'GET_ZONE' + + zone = self.driver.get_zone(zone_id='5093') + self.assertEqual(zone.id, '5093') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.domain, 'linode.com') + self.assertEqual(zone.ttl, None) + self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status']) + + def test_get_zone_does_not_exist(self): + LinodeMockHttp.type = 'GET_ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + LinodeMockHttp.type = 'GET_RECORD' + record = self.driver.get_record(zone_id='1234', record_id='28536') + self.assertEqual(record.id, '28536') + self.assertEqual(record.name, 'www') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '75.127.96.245') + self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port', + 'weight']) + + def test_get_record_zone_does_not_exist(self): + LinodeMockHttp.type = 'GET_RECORD_ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + LinodeMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='4441', record_id='28536') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + zone = self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '5123') + self.assertEqual(zone.domain, 'foo.bar.com') + + def test_create_zone_validaton_error(self): + LinodeMockHttp.type = 'VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + except LinodeException: + pass + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + domain='libcloud.org', + ttl=10, + extra={'SOA_Email': + 'bar@libcloud.org'}) + + self.assertEqual(zone.extra['SOA_Email'], 'dns@example.com') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'libcloud.org') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, 10) + self.assertEqual(updated_zone.extra['SOA_Email'], 'bar@libcloud.org') + self.assertEqual(updated_zone.extra['status'], zone.extra['status']) + self.assertEqual(updated_zone.extra['description'], + zone.extra['description']) + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.0.0.1') + + self.assertEqual(record.id, '28537') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, name='www', + type=RecordType.AAAA, + data='::1') + + self.assertEqual(record.data, '75.127.96.245') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.AAAA) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + LinodeMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError, e: + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class LinodeMockHttp(MockHttp): + fixtures = DNSFileFixtures('linode') + + def _domain_list(self, method, url, body, headers): + body = self.fixtures.load('domain_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_list(self, method, url, body, headers): + body = self.fixtures.load('resource_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, body, + headers): + body = self.fixtures.load('resource_list_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_ZONE_domain_list(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_domain_list(self, method, url, body, headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_domain_resource_list(self, method, url, body, headers): + body = self.fixtures.load('get_record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, + body, headers): + body = self.fixtures.load('get_record_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_list(self, method, url, body, + headers): + body = self.fixtures.load('get_zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_resource_list(self, method, + url, body, + headers): + body = self.fixtures.load('get_record_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_create(self, method, url, body, headers): + body = self.fixtures.load('create_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _VALIDATION_ERROR_domain_create(self, method, url, body, headers): + body = self.fixtures.load('create_domain_validation_error.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_update(self, method, url, body, headers): + body = self.fixtures.load('update_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_create(self, method, url, body, headers): + body = self.fixtures.load('create_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_update(self, method, url, body, headers): + body = self.fixtures.load('update_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_domain.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _ZONE_DOES_NOT_EXIST_domain_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_domain_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _domain_resource_delete(self, method, url, body, headers): + body = self.fixtures.load('delete_resource.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _RECORD_DOES_NOT_EXIST_domain_resource_delete(self, method, url, body, + headers): + body = self.fixtures.load('delete_resource_does_not_exist.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/dns/test_rackspace.py b/trunk/test/dns/test_rackspace.py new file mode 100644 index 0000000000..ba1ab71f30 --- /dev/null +++ b/trunk/test/dns/test_rackspace.py @@ -0,0 +1,413 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import httplib +import unittest + +from libcloud.common.types import LibcloudError +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.rackspace import RackspaceUSDNSDriver +from libcloud.dns.drivers.rackspace import RackspaceUKDNSDriver + +from test import MockHttp +from test.file_fixtures import DNSFileFixtures +from test.secrets import DNS_PARAMS_RACKSPACE + + +class RackspaceUSTests(unittest.TestCase): + klass = RackspaceUSDNSDriver + + def setUp(self): + self.klass.connectionCls.conn_classes = ( + None, RackspaceMockHttp) + RackspaceMockHttp.type = None + self.driver = self.klass(*DNS_PARAMS_RACKSPACE) + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 7) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + + self.assertEqual(len(zones), 6) + self.assertEqual(zones[0].domain, 'foo4.bar.com') + self.assertEqual(zones[0].extra['comment'], 'wazaaa') + + def test_list_zones_no_results(self): + RackspaceMockHttp.type = 'NO_RESULTS' + zones = self.driver.list_zones() + self.assertEqual(len(zones), 0) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + + self.assertEqual(len(records), 3) + self.assertEqual(records[0].name, 'test3') + self.assertEqual(records[0].type, RecordType.A) + self.assertEqual(records[0].data, '127.7.7.7') + self.assertEqual(records[0].extra['ttl'], 777) + self.assertEqual(records[0].extra['fqdn'], 'test3.%s' % + (records[0].zone.domain)) + + def test_list_records_no_results(self): + zone = self.driver.list_zones()[0] + RackspaceMockHttp.type = 'NO_RESULTS' + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 0) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + self.driver.list_records(zone=zone) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_get_zone_success(self): + RackspaceMockHttp.type = 'GET_ZONE' + zone = self.driver.get_zone(zone_id='2946063') + + self.assertEqual(zone.id, '2946063') + self.assertEqual(zone.domain, 'foo4.bar.com') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.extra['email'], 'test@test.com') + + def test_get_zone_does_not_exist(self): + RackspaceMockHttp.type = 'DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + record = self.driver.get_record(zone_id='12345678', + record_id='23456789') + self.assertEqual(record.id, 'A-7423034') + self.assertEqual(record.name, 'test3') + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.extra['comment'], 'lulz') + + def test_get_record_zone_does_not_exist(self): + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + RackspaceMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='12345678', + record_id='28536') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + RackspaceMockHttp.type = 'CREATE_ZONE' + + zone = self.driver.create_zone(domain='bar.foo1.com', type='master', + ttl=None, + extra={'email': 'test@test.com'}) + self.assertEqual(zone.id, '2946173') + self.assertEqual(zone.domain, 'bar.foo1.com') + self.assertEqual(zone.type, 'master') + self.assertEqual(zone.extra['email'], 'test@test.com') + + def test_create_zone_validaton_error(self): + RackspaceMockHttp.type = 'CREATE_ZONE_VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=10, + extra={'email': 'test@test.com'}) + except Exception, e: + self.assertEqual(str(e), 'Validation errors: Domain TTL is ' + + 'required and must be greater than ' + + 'or equal to 300') + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + extra={'comment': + 'bar foo'}) + + self.assertEqual(zone.extra['comment'], 'wazaaa') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'foo4.bar.com') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, zone.ttl) + self.assertEqual(updated_zone.extra['comment'], 'bar foo') + + def test_update_zone_domain_cannot_be_changed(self): + zone = self.driver.list_zones()[0] + + try: + self.driver.update_zone(zone=zone, domain='libcloud.org') + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'CREATE_RECORD' + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.1.1.1') + + self.assertEqual(record.id, 'A-7423317') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.1.1.1') + self.assertEqual(record.extra['fqdn'], 'www.%s' % (zone.domain)) + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, + data='127.3.3.3') + + self.assertEqual(record.name, 'test3') + self.assertEqual(record.data, '127.7.7.7') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, record.name) + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, record.type) + self.assertEqual(updated_record.data, '127.3.3.3') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + RackspaceMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + RackspaceMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError, e: + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class RackspaceUK1Tests(RackspaceUSTests): + klass = RackspaceUKDNSDriver + + +class RackspaceMockHttp(MockHttp): + fixtures = DNSFileFixtures('rackspace') + base_headers = {'content-type': 'application/json'} + + # fake auth token response + def _v1_1__auth(self, method, url, body, headers): + body = self.fixtures.load('auth_1_1.json') + headers = {'content-length': '657', 'vary': 'Accept,Accept-Encoding', + 'server': 'Apache/2.2.13 (Red Hat)', + 'connection': 'Keep-Alive', + 'date': 'Sat, 29 Oct 2011 19:29:45 GMT', + 'content-type': 'application/json'} + return (httplib.OK, body, headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains(self, method, url, body, headers): + body = self.fixtures.load('list_zones_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_NO_RESULTS(self, method, url, body, headers): + body = self.fixtures.load('list_zones_no_results.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('list_records_success.json') + elif method == 'PUT': + # Async - update_zone + body = self.fixtures.load('update_zone_success.json') + elif method == 'DELETE': + # Aync - delete_zone + body = self.fixtures.load('delete_zone_success.json') + + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_NO_RESULTS(self, method, url, body, + headers): + body = self.fixtures.load('list_records_no_results.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_2946063_GET_ZONE(self, method, url, body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_4444_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_12345678(self, method, url, body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_12345678_records_23456789(self, method, url, body, + headers): + body = self.fixtures.load('get_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_444_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_12345678_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('get_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_12345678_records_28536_RECORD_DOES_NOT_EXIST(self, + method, url, body, headers): + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + def _v1_0_11111_domains_CREATE_ZONE(self, method, url, body, headers): + # Async response - create_zone + body = self.fixtures.load('create_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_288795f9_e74d_48be_880b_a9e36e0de61e_CREATE_ZONE(self, + method, url, body, headers): + # Async status - create_zone + body = self.fixtures.load('create_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_CREATE_ZONE_VALIDATION_ERROR(self, method, url, body, headers): + body = self.fixtures.load('create_zone_validation_error.json') + return (httplib.BAD_REQUEST, body, self.base_headers, + httplib.responses[httplib.BAD_REQUEST]) + + def _v1_0_11111_status_116a8f17_38ac_4862_827c_506cd04800d5(self, method, url, body, headers): + # Aync status - update_zone + body = self.fixtures.load('update_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_586605c8_5739_43fb_8939_f3a2c4c0e99c_CREATE_RECORD(self, method, url, body, headers): + # Aync status - create_record + body = self.fixtures.load('create_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_CREATE_RECORD(self, method, url, body, headers): + # Aync response - create_record + body = self.fixtures.load('create_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_251c0d0c_95bc_4e09_b99f_4b8748b66246(self, method, url, body, headers): + # Aync response - update_record + body = self.fixtures.load('update_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_A_7423034(self, method, url, body, + headers): + # Aync response - update_record + body = self.fixtures.load('update_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_0b40cd14_2e5d_490f_bb6e_fdc65d1118a9(self, method, + url, body, + headers): + # Async status - delete_zone + body = self.fixtures.load('delete_zone_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_status_0b40cd14_2e5d_490f_bb6e_fdc65d1118a9_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + # Async status - delete_record + body = self.fixtures.load('delete_record_success.json') + return (httplib.OK, body, self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_0_11111_domains_2946063_records_A_7423034_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): + # Async response - delete_record + body = self.fixtures.load('does_not_exist.json') + return (httplib.NOT_FOUND, body, self.base_headers, + httplib.responses[httplib.NOT_FOUND]) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/dns/test_zerigo.py b/trunk/test/dns/test_zerigo.py new file mode 100644 index 0000000000..56908924e7 --- /dev/null +++ b/trunk/test/dns/test_zerigo.py @@ -0,0 +1,357 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import httplib +import unittest + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.dns.types import RecordType, ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.zerigo import ZerigoDNSDriver, ZerigoError + +from test import MockHttp +from test.file_fixtures import DNSFileFixtures +from test.secrets import DNS_PARAMS_ZERIGO + + +class ZerigoTests(unittest.TestCase): + def setUp(self): + ZerigoDNSDriver.connectionCls.conn_classes = ( + None, ZerigoMockHttp) + ZerigoMockHttp.type = None + self.driver = ZerigoDNSDriver(*DNS_PARAMS_ZERIGO) + + def test_invalid_credentials(self): + ZerigoMockHttp.type = 'INVALID_CREDS' + + try: + list(self.driver.list_zones()) + except InvalidCredsError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_record_types(self): + record_types = self.driver.list_record_types() + self.assertEqual(len(record_types), 11) + self.assertTrue(RecordType.A in record_types) + + def test_list_zones_success(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 1) + self.assertEqual(zones[0].domain, 'example.com') + self.assertEqual(zones[0].type, 'master') + self.assertEqual(zones[0].extra['notes'], 'test foo bar') + + def test_list_zones_no_results(self): + ZerigoMockHttp.type = 'NO_RESULTS' + zones = self.driver.list_zones() + self.assertEqual(len(zones), 0) + + def test_list_records_success(self): + zone = self.driver.list_zones()[0] + records = list(self.driver.list_records(zone=zone)) + + self.assertEqual(len(records), 1) + self.assertEqual(records[0].name, 'www') + self.assertEqual(records[0].type, RecordType.A) + self.assertEqual(records[0].data, '172.16.16.1') + self.assertEqual(records[0].extra['fqdn'], 'www.example.com') + + def test_list_records_no_results(self): + zone = self.driver.list_zones()[0] + ZerigoMockHttp.type = 'NO_RESULTS' + records = list(self.driver.list_records(zone=zone)) + self.assertEqual(len(records), 0) + + def test_list_records_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + try: + list(self.driver.list_records(zone=zone)) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + pass + + def test_get_zone_success(self): + zone = self.driver.get_zone(zone_id=12345678) + + self.assertEqual(zone.id, '12345678') + self.assertEqual(zone.domain, 'example.com') + self.assertEqual(zone.extra['hostmaster'], 'dnsadmin@example.com') + self.assertEqual(zone.type, 'master') + + def test_get_zone_does_not_exist(self): + ZerigoMockHttp.type = 'DOES_NOT_EXIST' + + try: + self.driver.get_zone(zone_id='4444') + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, '4444') + else: + self.fail('Exception was not thrown') + + def test_get_record_success(self): + record = self.driver.get_record(zone_id='12345678', + record_id='23456789') + self.assertEqual(record.id, '23456789') + self.assertEqual(record.name, 'www') + self.assertEqual(record.type, RecordType.A) + + def test_get_record_zone_does_not_exist(self): + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='444', record_id='28536') + except ZoneDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_record_record_does_not_exist(self): + ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.get_record(zone_id='12345678', + record_id='28536') + except RecordDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_zone_success(self): + ZerigoMockHttp.type = 'CREATE_ZONE' + + zone = self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=None, extra=None) + self.assertEqual(zone.id, '12345679') + self.assertEqual(zone.domain, 'foo.bar.com') + + def test_create_zone_validaton_error(self): + ZerigoMockHttp.type = 'CREATE_ZONE_VALIDATION_ERROR' + + try: + self.driver.create_zone(domain='foo.bar.com', type='master', + ttl=10, extra=None) + except ZerigoError, e: + self.assertEqual(len(e.errors), 2) + pass + else: + self.fail('Exception was not thrown') + + def test_update_zone_success(self): + zone = self.driver.list_zones()[0] + updated_zone = self.driver.update_zone(zone=zone, + ttl=10, + extra={'notes': + 'bar foo'}) + + self.assertEqual(zone.extra['hostmaster'], '') + self.assertEqual(zone.extra['notes'], 'test foo bar') + + self.assertEqual(updated_zone.id, zone.id) + self.assertEqual(updated_zone.domain, 'example.com') + self.assertEqual(updated_zone.type, zone.type) + self.assertEqual(updated_zone.ttl, 10) + self.assertEqual(updated_zone.extra['notes'], 'bar foo') + + def test_update_zone_domain_cannot_be_changed(self): + zone = self.driver.list_zones()[0] + + try: + self.driver.update_zone(zone=zone, domain='libcloud.org') + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_record_success(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'CREATE_RECORD' + record = self.driver.create_record(name='www', zone=zone, + type=RecordType.A, data='127.0.0.1') + + self.assertEqual(record.id, '23456780') + self.assertEqual(record.name, 'www') + self.assertEqual(record.zone, zone) + self.assertEqual(record.type, RecordType.A) + self.assertEqual(record.data, '127.0.0.1') + + def test_update_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + updated_record = self.driver.update_record(record=record, name='www', + type=RecordType.AAAA, + data='::1') + + self.assertEqual(record.data, '172.16.16.1') + + self.assertEqual(updated_record.id, record.id) + self.assertEqual(updated_record.name, 'www') + self.assertEqual(updated_record.zone, record.zone) + self.assertEqual(updated_record.type, RecordType.AAAA) + self.assertEqual(updated_record.data, '::1') + + def test_delete_zone_success(self): + zone = self.driver.list_zones()[0] + status = self.driver.delete_zone(zone=zone) + self.assertTrue(status) + + def test_delete_zone_does_not_exist(self): + zone = self.driver.list_zones()[0] + + ZerigoMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.delete_zone(zone=zone) + except ZoneDoesNotExistError, e: + self.assertEqual(e.zone_id, zone.id) + else: + self.fail('Exception was not thrown') + + def test_delete_record_success(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + status = self.driver.delete_record(record=record) + self.assertTrue(status) + + def test_delete_record_does_not_exist(self): + zone = self.driver.list_zones()[0] + record = self.driver.list_records(zone=zone)[0] + + ZerigoMockHttp.type = 'RECORD_DOES_NOT_EXIST' + + try: + self.driver.delete_record(record=record) + except RecordDoesNotExistError, e: + self.assertEqual(e.record_id, record.id) + else: + self.fail('Exception was not thrown') + + +class ZerigoMockHttp(MockHttp): + fixtures = DNSFileFixtures('zerigo') + + def _api_1_1_zones_xml_INVALID_CREDS(self, method, url, body, headers): + body = 'HTTP Basic: Access denied.\n' + return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml(self, method, url, body, headers): + body = self.fixtures.load('list_zones.xml') + return (httplib.OK, body, {'x-query-count': 1}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_NO_RESULTS(self, method, url, body, headers): + body = self.fixtures.load('list_zones_no_results.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml(self, method, url, body, headers): + body = self.fixtures.load('list_records.xml') + return (httplib.OK, body, {'x-query-count': 1}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_NO_RESULTS(self, method, url, body, + headers): + body = self.fixtures.load('list_records_no_results.xml') + return (httplib.OK, body, {'x-query-count': 0}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_ZONE_DOES_NOT_EXIST(self, method, + url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml(self, method, url, body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_4444_xml_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_23456789_xml(self, method, url, body, headers): + body = self.fixtures.load('get_record.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_444_xml_ZONE_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = self.fixtures.load('get_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_28536_xml_RECORD_DOES_NOT_EXIST(self, method, url, body, + headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_CREATE_ZONE(self, method, url, body, headers): + body = self.fixtures.load('create_zone.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_xml_CREATE_ZONE_VALIDATION_ERROR(self, method, url, + body, headers): + body = self.fixtures.load('create_zone_validation_error.xml') + return (httplib.UNPROCESSABLE_ENTITY, body, {}, + httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_hosts_xml_CREATE_RECORD(self, method, url, + body, headers): + body = self.fixtures.load('create_record.xml') + return (httplib.CREATED, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_zones_12345678_xml_ZONE_DOES_NOT_EXIST(self, method, url, + body, headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + def _api_1_1_hosts_23456789_xml_RECORD_DOES_NOT_EXIST(self, method, url, + body, headers): + body = '' + return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.OK]) + + """ + def (self, method, url, body, headers): + body = self.fixtures.load('.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def (self, method, url, body, headers): + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + """ + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/file_fixtures.py b/trunk/test/file_fixtures.py new file mode 100644 index 0000000000..9ecdc37c7b --- /dev/null +++ b/trunk/test/file_fixtures.py @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helper class for loading large fixture data +from __future__ import with_statement + +import os + +FIXTURES_ROOT = { + 'compute': 'compute/fixtures', + 'storage': 'storage/fixtures', + 'loadbalancer': 'loadbalancer/fixtures', + 'dns': 'dns/fixtures', + 'openstack': 'compute/fixtures/openstack', +} + +class FileFixtures(object): + def __init__(self, fixtures_type, sub_dir=''): + script_dir = os.path.abspath(os.path.split(__file__)[0]) + self.root = os.path.join(script_dir, FIXTURES_ROOT[fixtures_type], + sub_dir) + + def load(self, file): + path = os.path.join(self.root, file) + if os.path.exists(path): + with open(path, 'r') as fh: + content = fh.read() + return content + else: + raise IOError(path) + +class ComputeFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(ComputeFileFixtures, self).__init__(fixtures_type='compute', + sub_dir=sub_dir) + +class StorageFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(StorageFileFixtures, self).__init__(fixtures_type='storage', + sub_dir=sub_dir) + +class LoadBalancerFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(LoadBalancerFileFixtures, self).__init__(fixtures_type='loadbalancer', + sub_dir=sub_dir) + +class DNSFileFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(DNSFileFixtures, self).__init__(fixtures_type='dns', + sub_dir=sub_dir) + +class OpenStackFixtures(FileFixtures): + def __init__(self, sub_dir=''): + super(OpenStackFixtures, self).__init__(fixtures_type='openstack', + sub_dir=sub_dir) diff --git a/trunk/test/loadbalancer/__init__.py b/trunk/test/loadbalancer/__init__.py new file mode 100644 index 0000000000..ae1e83eeb3 --- /dev/null +++ b/trunk/test/loadbalancer/__init__.py @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json new file mode 100644 index 0000000000..a2175dc56e --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/assignToLoadBalancerRule_default.json @@ -0,0 +1 @@ +{ "assigntoloadbalancerruleresponse" : {"jobid":17341} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json new file mode 100644 index 0000000000..a62dd4efb6 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/associateIpAddress_default.json @@ -0,0 +1 @@ +{ "associateipaddressresponse" : {"jobid":17346,"id":34000} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json new file mode 100644 index 0000000000..4a442a0e4a --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/createLoadBalancerRule_default.json @@ -0,0 +1 @@ +{ "createloadbalancerruleresponse" : { "loadbalancer" : {"id":2253,"name":"fake","publicipid":34000,"publicip":"1.1.1.49","publicport":"80","privateport":"80","algorithm":"roundrobin","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Add"} } } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json new file mode 100644 index 0000000000..bec847a991 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/deleteLoadBalancerRule_default.json @@ -0,0 +1 @@ +{ "deleteloadbalancerruleresponse" : {"jobid":17342} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json new file mode 100644 index 0000000000..eb1ec6b396 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/disassociateIpAddress_default.json @@ -0,0 +1 @@ +{ "disassociateipaddressresponse" : {"jobid":17344} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json new file mode 100644 index 0000000000..912a268533 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRuleInstances_default.json @@ -0,0 +1 @@ +{ "listloadbalancerruleinstancesresponse" : { "loadbalancerruleinstance" : [ {"id":2614,"name":"test_1308874974","displayname":"test_1308874974","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:22:56+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":2185,"networkkbswrite":109,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3914,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.3.122","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"}, {"id":2615,"name":"test_1308875456","displayname":"test_1308875456","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","created":"2011-06-24T00:30:57+0000","state":"Running","haenable":false,"zoneid":1,"zonename":"Sydney","templateid":421,"templatename":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","templatedisplaytext":"XEN Basic Ubuntu 10.04 Server x64 PV r2.0","passwordenabled":false,"serviceofferingid":105,"serviceofferingname":"Compute Micro PRD","cpunumber":1,"cpuspeed":1200,"memory":384,"cpuused":"0.14%","networkkbsread":1118,"networkkbswrite":75,"guestosid":12,"rootdeviceid":0,"rootdevicetype":"IscsiLUN","securitygroup":[],"nic":[{"id":3915,"networkid":860,"netmask":"255.255.240.0","gateway":"1.1.1.1","ipaddress":"1.1.2.62","traffictype":"Guest","type":"Virtual","isdefault":true}],"hypervisor":"XenServer"} ] } } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json new file mode 100644 index 0000000000..f3ae05acd5 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/listLoadBalancerRules_default.json @@ -0,0 +1 @@ +{ "listloadbalancerrulesresponse" : { "loadbalancerrule" : [ {"id":2247,"name":"test","publicipid":34000,"publicip":"1.1.1.49","publicport":"80","privateport":"80","algorithm":"roundrobin","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"},{"id":2249,"name":"testmore","publicipid":34001,"publicip":"1.1.2.49","publicport":"80","privateport":"80","algorithm":"leastconn","account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","state":"Active"} ] } } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/listZones_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/listZones_default.json new file mode 100644 index 0000000000..0316936e45 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/listZones_default.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "zone" : [ {"id":1,"name":"Sydney","networktype":"Advanced","securitygroupsenabled":false} ] } } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json new file mode 100644 index 0000000000..67a51fed0f --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17340.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17340,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json new file mode 100644 index 0000000000..6ea3b16a21 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17341.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17341,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json new file mode 100644 index 0000000000..95c4e4a835 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17342.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17342,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json new file mode 100644 index 0000000000..65d29c5422 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17344.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17344,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json new file mode 100644 index 0000000000..aac339b235 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/queryAsyncJobResult_17346.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":17346,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"ipaddress":{"id":34000,"ipaddress":"1.1.1.49","allocated":"2011-06-24T05:52:55+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocating"}}} } diff --git a/trunk/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json b/trunk/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json new file mode 100644 index 0000000000..a70b5c5642 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/cloudstack/removeFromLoadBalancerRule_default.json @@ -0,0 +1 @@ +{ "removefromloadbalancerruleresponse" : {"jobid":17340} } diff --git a/trunk/test/loadbalancer/fixtures/gogrid/ip_list.json b/trunk/test/loadbalancer/fixtures/gogrid/ip_list.json new file mode 100644 index 0000000000..f13a358286 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/ip_list.json @@ -0,0 +1,316 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868102, + "ip": "10.0.0.69", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868106, + "ip": "10.0.0.73", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868107, + "ip": "10.0.0.74", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868108, + "ip": "10.0.0.75", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277337, + "ip": "10.0.0.244", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277338, + "ip": "10.0.0.245", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277342, + "ip": "10.0.0.249", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277343, + "ip": "10.0.0.250", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277344, + "ip": "10.0.0.251", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277345, + "ip": "10.0.0.252", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277346, + "ip": "10.0.0.253", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + }, + { + "datacenter": { + "description": "US East 1 Datacenter", + "id": 2, + "name": "US-East-1", + "object": "option" + }, + "id": 2277347, + "ip": "10.0.0.254", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.240/255.255.255.240" + } + ], + "method": "/grid/ip/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 16, + "start": 0, + "total": 16 + } +} diff --git a/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json new file mode 100644 index 0000000000..42bea63e1b --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_add.json @@ -0,0 +1,141 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "name": "test2", + "id": 123, + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.1.0.10", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.10/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.1.0.11", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.11/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.1.0.12", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.1.0.12/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "1.1.1.1", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "1.1.1.1/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/add", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json new file mode 100644 index 0000000000..7afc7945b8 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_edit.json @@ -0,0 +1,164 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23530, + "name": "test2", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868108, + "ip": "10.0.0.75", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/edit", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json new file mode 100644 index 0000000000..55878003f2 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_get.json @@ -0,0 +1,141 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23530, + "name": "test2", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868101, + "ip": "10.0.0.68", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/get", + "status": "success", + "summary": { + "numpages": 0, + "returned": 1, + "start": 0, + "total": 1 + } +} diff --git a/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json new file mode 100644 index 0000000000..e1c366eb5c --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/loadbalancer_list.json @@ -0,0 +1,224 @@ +{ + "list": [ + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23517, + "name": "foo", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868099, + "ip": "10.0.0.66", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + }, + { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 23526, + "name": "bar", + "object": "loadbalancer", + "os": { + "description": "The F5 Load Balancer.", + "id": 1, + "name": "F5", + "object": "option" + }, + "persistence": { + "description": "", + "id": 1, + "name": "None", + "object": "option" + }, + "realiplist": [ + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868109, + "ip": "10.0.0.76", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868110, + "ip": "10.0.0.77", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + }, + { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868111, + "ip": "10.0.0.78", + "object": "ip", + "public": true, + "state": { + "description": "IP is available to use", + "id": 1, + "name": "Unassigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + ], + "state": { + "description": "Loadbalancer is enabled and on.", + "id": 1, + "name": "On", + "object": "option" + }, + "type": { + "description": "", + "id": 1, + "name": "Round Robin", + "object": "option" + }, + "virtualip": { + "ip": { + "datacenter": { + "description": "US West 1 Datacenter", + "id": 1, + "name": "US-West-1", + "object": "option" + }, + "id": 1868100, + "ip": "10.0.0.67", + "object": "ip", + "public": true, + "state": { + "description": "IP is reserved or in use", + "id": 2, + "name": "Assigned", + "object": "option" + }, + "subnet": "10.0.0.64/255.255.255.240" + }, + "object": "ipportpair", + "port": 80 + } + } + ], + "method": "/grid/loadbalancer/list", + "status": "success", + "summary": { + "numpages": 0, + "returned": 2, + "start": 0, + "total": 2 + } +} diff --git a/trunk/test/loadbalancer/fixtures/gogrid/unexpected_error.json b/trunk/test/loadbalancer/fixtures/gogrid/unexpected_error.json new file mode 100644 index 0000000000..87ed4e56ba --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/gogrid/unexpected_error.json @@ -0,0 +1 @@ +{"summary":{"total":1,"start":0,"returned":1},"status":"failure","method":"/grid/loadbalancer/add","list":[{"message":"An unexpected server error has occured. Please email this error to apisupport@gogrid.com. Error Message : null","object":"error","errorcode":"UnexpectedException"}]} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json new file mode 100644 index 0000000000..99a8175110 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers.json @@ -0,0 +1,48 @@ +{ + "loadBalancers": [ + { + "algorithm": "RANDOM", + "created": { + "time": "2011-04-06T21:25:19+0000" + }, + "id": 8155, + "name": "test0", + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-06T21:25:31+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.25", + "id": 965, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + }, + { + "algorithm": "RANDOM", + "created": { + "time": "2011-04-06T21:26:22+0000" + }, + "id": 8156, + "name": "test1", + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-06T21:26:33+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.83", + "id": 1279, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } + ] +} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json new file mode 100644 index 0000000000..b13712ad85 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290.json @@ -0,0 +1,46 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50Z" + }, + "id": 8290, + "name": "test2", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE" + }, + { + "address": "10.1.0.10", + "condition": "ENABLED", + "id": 30945, + "port": 80, + "status": "ONLINE" + } + ], + "port": 80, + "protocol": "HTTP", + "status": "ACTIVE", + "updated": { + "time": "2011-04-07T16:28:12Z" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json new file mode 100644 index 0000000000..d10c06ec6b --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes.json @@ -0,0 +1,18 @@ +{ + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE" + }, + { + "address": "10.1.0.10", + "condition": "ENABLED", + "id": 30945, + "port": 80, + "status": "ONLINE" + } + ] +} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json new file mode 100644 index 0000000000..fa397621b0 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_8290_nodes_post.json @@ -0,0 +1,12 @@ +{ + "nodes": [ + { + "address": "10.1.0.12", + "condition": "ENABLED", + "id": 30972, + "port": 80, + "status": "ONLINE", + "weight": 1 + } + ] +} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json new file mode 100644 index 0000000000..e1d030b26c --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_post.json @@ -0,0 +1,48 @@ +{ + "loadBalancer": { + "algorithm": "RANDOM", + "cluster": { + "name": "ztm-n05.lbaas.ord1.rackspace.net" + }, + "connectionLogging": { + "enabled": false + }, + "created": { + "time": "2011-04-07T16:27:50+0000" + }, + "id": 8290, + "name": "test2", + "nodes": [ + { + "address": "10.1.0.11", + "condition": "ENABLED", + "id": 30944, + "port": 80, + "status": "ONLINE", + "weight": 1 + }, + { + "address": "10.1.0.10", + "condition": "ENABLED", + "id": 30945, + "port": 80, + "status": "ONLINE", + "weight": 1 + } + ], + "port": 80, + "protocol": "HTTP", + "status": "BUILD", + "updated": { + "time": "2011-04-07T16:27:50+0000" + }, + "virtualIps": [ + { + "address": "1.1.1.1", + "id": 1151, + "ipVersion": "IPV4", + "type": "PUBLIC" + } + ] + } +} diff --git a/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json new file mode 100644 index 0000000000..d966c396b7 --- /dev/null +++ b/trunk/test/loadbalancer/fixtures/rackspace/v1_slug_loadbalancers_protocols.json @@ -0,0 +1,43 @@ +{"protocols": [ + { + "name": "HTTP", + "port": "80" + }, + { + "name": "FTP", + "port": "21" + }, + { + "name": "IMAPv4", + "port": "143" + }, + { + "name": "POP3", + "port": "110" + }, + { + "name": "SMTP", + "port": "25" + }, + { + "name": "LDAP", + "port": "389" + }, + { + "name": "HTTPS", + "port": "443" + }, + { + "name": "IMAPS", + "port": "993" + }, + { + "name": "POP3S", + "port": "995" + }, + { + "name": "LDAPS", + "port": "636" + } + ] +} diff --git a/trunk/test/loadbalancer/test_cloudstack.py b/trunk/test/loadbalancer/test_cloudstack.py new file mode 100644 index 0000000000..399cd65013 --- /dev/null +++ b/trunk/test/loadbalancer/test_cloudstack.py @@ -0,0 +1,101 @@ +import httplib +import sys +import unittest +import urlparse + +try: + import simplejson as json +except ImportError: + import json + +try: + parse_qsl = urlparse.parse_qsl +except AttributeError: + import cgi + parse_qsl = cgi.parse_qsl + +from libcloud.common.types import LibcloudError +from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm +from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver + +from test import MockHttpTestCase +from test.file_fixtures import LoadBalancerFileFixtures + +class CloudStackLBTests(unittest.TestCase): + def setUp(self): + CloudStackLBDriver.connectionCls.conn_classes = \ + (None, CloudStackMockHttp) + self.driver = CloudStackLBDriver('apikey', 'secret') + self.driver.path = '/test/path' + self.driver.type = -1 + self.driver.name = 'CloudStack' + CloudStackMockHttp.fixture_tag = 'default' + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + for balancer in balancers: + self.assertTrue(isinstance(balancer, LoadBalancer)) + + def test_create_balancer(self): + members = [Member(1, '1.1.1.1', 80), Member(2, '1.1.1.2', 80)] + balancer = self.driver.create_balancer('fake', members) + self.assertTrue(isinstance(balancer, LoadBalancer)) + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + self.driver.destroy_balancer(balancer) + + def test_balancer_attach_member(self): + balancer = self.driver.list_balancers()[0] + member = Member(id=1234, ip='1.1.1.1', port=80) + balancer.attach_member(member) + + def test_balancer_detach_member(self): + balancer = self.driver.list_balancers()[0] + member = balancer.list_members()[0] + balancer.detach_member(member) + + def test_balancer_list_members(self): + balancer = self.driver.list_balancers()[0] + members = balancer.list_members() + for member in members: + self.assertTrue(isinstance(member, Member)) + +class CloudStackMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('cloudstack') + fixture_tag = 'default' + + def _load_fixture(self, fixture): + body = self.fixtures.load(fixture) + return body, json.loads(body) + + def _test_path(self, method, url, body, headers): + url = urlparse.urlparse(url) + query = dict(parse_qsl(url.query)) + + self.assertTrue('apiKey' in query) + self.assertTrue('command' in query) + self.assertTrue('response' in query) + self.assertTrue('signature' in query) + + self.assertTrue(query['response'] == 'json') + + del query['apiKey'] + del query['response'] + del query['signature'] + command = query.pop('command') + + if hasattr(self, '_cmd_' + command): + return getattr(self, '_cmd_' + command)(**query) + else: + fixture = command + '_' + self.fixture_tag + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + + def _cmd_queryAsyncJobResult(self, jobid): + fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json' + body, obj = self._load_fixture(fixture) + return (httplib.OK, body, obj, httplib.responses[httplib.OK]) + +if __name__ == "__main__": + sys.exit(unittest.main()) diff --git a/trunk/test/loadbalancer/test_gogrid.py b/trunk/test/loadbalancer/test_gogrid.py new file mode 100644 index 0000000000..9ce782c2a2 --- /dev/null +++ b/trunk/test/loadbalancer/test_gogrid.py @@ -0,0 +1,183 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import sys +import unittest +from urlparse import urlparse + +from libcloud.common.types import LibcloudError +from libcloud.compute.base import Node +from libcloud.compute.drivers.dummy import DummyNodeDriver +from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm +from libcloud.loadbalancer.drivers.gogrid import GoGridLBDriver + +from test import MockHttpTestCase +from test.file_fixtures import LoadBalancerFileFixtures + +class GoGridTests(unittest.TestCase): + + def setUp(self): + GoGridLBDriver.connectionCls.conn_classes = (None, + GoGridLBMockHttp) + GoGridLBMockHttp.type = None + self.driver = GoGridLBDriver('user', 'key') + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 1) + self.assertEqual(protocols[0], 'http') + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEquals(len(balancers), 2) + self.assertEquals(balancers[0].name, "foo") + self.assertEquals(balancers[0].id, "23517") + self.assertEquals(balancers[1].name, "bar") + self.assertEquals(balancers[1].id, "23526") + + def test_create_balancer(self): + balancer = self.driver.create_balancer(name='test2', + port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=(Member(None, '10.1.0.10', 80), + Member(None, '10.1.0.11', 80)) + ) + + self.assertEquals(balancer.name, 'test2') + self.assertEquals(balancer.id, '123') + + def test_create_balancer_UNEXPECTED_ERROR(self): + # Try to create new balancer and attach members with an IP address which + # does not belong to this account + GoGridLBMockHttp.type = 'UNEXPECTED_ERROR' + + try: + self.driver.create_balancer(name='test2', + port=80, + protocol='http', + algorithm=Algorithm.ROUND_ROBIN, + members=(Member(None, '10.1.0.10', 80), + Member(None, '10.1.0.11', 80)) + ) + except LibcloudError, e: + self.assertTrue(str(e).find('tried to add a member with an IP address not assigned to your account') != -1) + else: + self.fail('Exception was not thrown') + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + + ret1 = self.driver.destroy_balancer(balancer) + ret2 = balancer.destroy() + + self.assertTrue(ret1) + self.assertTrue(ret2) + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='23530') + + self.assertEquals(balancer.name, 'test2') + self.assertEquals(balancer.id, '23530') + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer(balancer_id='23530') + members1 = self.driver.balancer_list_members(balancer=balancer) + members2 = balancer.list_members() + + expected_members = set([u'10.0.0.78:80', u'10.0.0.77:80', + u'10.0.0.76:80']) + + self.assertEquals(len(members1), 3) + self.assertEquals(len(members2), 3) + self.assertEquals(expected_members, + set(["%s:%s" % (member.ip, member.port) for member in members1])) + + def test_balancer_attach_compute_node(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + node = Node(id='1', name='test', state=None, public_ip=['10.0.0.75'], + private_ip=[], driver=DummyNodeDriver) + member1 = self.driver.balancer_attach_compute_node(balancer, node) + member2 = balancer.attach_compute_node(node) + + self.assertEquals(member1.ip, '10.0.0.75') + self.assertEquals(member1.port, 80) + self.assertEquals(member2.ip, '10.0.0.75') + self.assertEquals(member2.port, 80) + + def test_balancer_attach_member(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + member = Member(None, ip='10.0.0.75', port='80') + member1 = self.driver.balancer_attach_member(balancer, member=member) + member2 = balancer.attach_member(member=member) + + self.assertEquals(member1.ip, '10.0.0.75') + self.assertEquals(member1.port, 80) + self.assertEquals(member2.ip, '10.0.0.75') + self.assertEquals(member2.port, 80) + + def test_balancer_detach_member(self): + balancer = LoadBalancer(23530, None, None, None, None, self.driver) + member = self.driver.balancer_list_members(balancer)[0] + + ret1 = self.driver.balancer_detach_member(balancer, member) + ret2 = balancer.detach_member(member) + + self.assertTrue(ret1) + self.assertTrue(ret2) + +class GoGridLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('gogrid') + + def _api_grid_loadbalancer_list(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list(self, method, url, body, headers): + body = self.fixtures.load('ip_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_add(self, method, url, body, headers): + query = urlparse(url).query + self.assertTrue(query.find('loadbalancer.type=round+robin') != -1) + + body = self.fixtures.load('loadbalancer_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_ip_list_UNEXPECTED_ERROR(self, method, url, body, headers): + return self._api_grid_ip_list(method, url, body, headers) + + def _api_grid_loadbalancer_add_UNEXPECTED_ERROR(self, method, url, body, headers): + body = self.fixtures.load('unexpected_error.json') + return (httplib.INTERNAL_SERVER_ERROR, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_delete(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_add.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_get(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_get.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _api_grid_loadbalancer_edit(self, method, url, body, headers): + body = self.fixtures.load('loadbalancer_edit.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff --git a/trunk/test/loadbalancer/test_rackspace.py b/trunk/test/loadbalancer/test_rackspace.py new file mode 100644 index 0000000000..9e61d580a1 --- /dev/null +++ b/trunk/test/loadbalancer/test_rackspace.py @@ -0,0 +1,176 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json + +from libcloud.loadbalancer.base import Member, Algorithm +from libcloud.loadbalancer.drivers.rackspace import RackspaceLBDriver +from libcloud.loadbalancer.drivers.rackspace import RackspaceUKLBDriver + +from test import MockHttpTestCase +from test.file_fixtures import LoadBalancerFileFixtures, OpenStackFixtures + +class RackspaceLBTests(unittest.TestCase): + + def setUp(self): + RackspaceLBDriver.connectionCls.conn_classes = (None, + RackspaceLBMockHttp) + RackspaceLBMockHttp.type = None + self.driver = RackspaceLBDriver('user', 'key') + + def test_list_protocols(self): + protocols = self.driver.list_protocols() + + self.assertEqual(len(protocols), 10) + self.assertTrue('http' in protocols) + + def test_list_balancers(self): + balancers = self.driver.list_balancers() + + self.assertEquals(len(balancers), 2) + self.assertEquals(balancers[0].name, "test0") + self.assertEquals(balancers[0].id, "8155") + self.assertEquals(balancers[1].name, "test1") + self.assertEquals(balancers[1].id, "8156") + + def test_create_balancer(self): + balancer = self.driver.create_balancer(name='test2', + port=80, + algorithm=Algorithm.ROUND_ROBIN, + members=(Member(None, '10.1.0.10', 80), + Member(None, '10.1.0.11', 80)) + ) + + self.assertEquals(balancer.name, 'test2') + self.assertEquals(balancer.id, '8290') + + def test_destroy_balancer(self): + balancer = self.driver.list_balancers()[0] + + ret = self.driver.destroy_balancer(balancer) + self.assertTrue(ret) + + def test_get_balancer(self): + balancer = self.driver.get_balancer(balancer_id='8290') + + self.assertEquals(balancer.name, 'test2') + self.assertEquals(balancer.id, '8290') + + def test_balancer_list_members(self): + balancer = self.driver.get_balancer(balancer_id='8290') + members = balancer.list_members() + + self.assertEquals(len(members), 2) + self.assertEquals(set(['10.1.0.10:80', '10.1.0.11:80']), + set(["%s:%s" % (member.ip, member.port) for member in members])) + + def test_balancer_attach_member(self): + balancer = self.driver.get_balancer(balancer_id='8290') + member = balancer.attach_member(Member(None, ip='10.1.0.12', port='80')) + + self.assertEquals(member.ip, '10.1.0.12') + self.assertEquals(member.port, 80) + + def test_balancer_detach_member(self): + balancer = self.driver.get_balancer(balancer_id='8290') + member = balancer.list_members()[0] + + ret = balancer.detach_member(member) + + self.assertTrue(ret) + + +class RackspaceUKLBTests(RackspaceLBTests): + + def setUp(self): + RackspaceLBDriver.connectionCls.conn_classes = (None, + RackspaceLBMockHttp) + RackspaceLBMockHttp.type = None + self.driver = RackspaceUKLBDriver('user', 'key') + + +class RackspaceLBMockHttp(MockHttpTestCase): + fixtures = LoadBalancerFileFixtures('rackspace') + auth_fixtures = OpenStackFixtures() + + def _v1_0(self, method, url, body, headers): + headers = {'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06', + 'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'} + return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT]) + + def _v1_0_slug_loadbalancers_protocols(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_loadbalancers_protocols.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + def _v1_0_slug_loadbalancers(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == "POST": + body_json = json.loads(body) + self.assertEqual(body_json['loadBalancer']['protocol'], 'HTTP') + self.assertEqual(body_json['loadBalancer']['algorithm'], 'ROUND_ROBIN') + + body = self.fixtures.load('v1_slug_loadbalancers_post.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_slug_loadbalancers_8155(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_slug_loadbalancers_8290(self, method, url, body, headers): + body = self.fixtures.load('v1_slug_loadbalancers_8290.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_0_slug_loadbalancers_8290_nodes(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + elif method == "POST": + body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes_post.json') + return (httplib.ACCEPTED, body, {}, + httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_0_slug_loadbalancers_8290_nodes_30944(self, method, url, body, headers): + if method == "DELETE": + return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED]) + + raise NotImplementedError + + def _v1_1__auth(self, method, url, body, headers): + headers = { 'content-type': 'application/json; charset=UTF-8'} + body = self.auth_fixtures.load('_v1_1__auth.json') + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + +if __name__ == "__main__": + sys.exit(unittest.main()) diff --git a/trunk/test/pricing_test.json b/trunk/test/pricing_test.json new file mode 100644 index 0000000000..92778743a4 --- /dev/null +++ b/trunk/test/pricing_test.json @@ -0,0 +1,10 @@ +{ + "compute": { + "foo": { + "1": 1.00, + "2": 2.00 + } + }, + + "updated": 1309019791 +} diff --git a/trunk/test/secrets.py-dist b/trunk/test/secrets.py-dist new file mode 100644 index 0000000000..f70f2e8d40 --- /dev/null +++ b/trunk/test/secrets.py-dist @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make a copy of this file named 'secrets.py' and add your credentials there. +# Note you can run unit tests without setting your credentials. + +BLUEBOX_PARAMS = ('customer_id', 'api_key') +BRIGHTBOX_PARAMS = ('client_id', 'client_secret') +DREAMHOST_PARAMS = ('key',) +EC2_PARAMS = ('access_id', 'secret') +ECP_PARAMS = ('user_name', 'password') +GANDI_PARAMS = ('user',) +HOSTINGCOM_PARAMS = ('user', 'secret') +IBM_PARAMS = ('user', 'secret') +# OPENSTACK_PARAMS = ('user_name', 'api_key', secure_bool, 'host', port_int) +OPENSTACK_PARAMS = ('user_name', 'api_key', False, 'host', 8774) +OPENNEBULA_PARAMS = ('user', 'key') +OPSOURCE_PARAMS = ('user', 'password') +RACKSPACE_PARAMS = ('user', 'key') +SLICEHOST_PARAMS = ('key',) +SOFTLAYER_PARAMS = ('user', 'api_key') +VCLOUD_PARAMS = ('user', 'secret') +VOXEL_PARAMS = ('key', 'secret') +VPSNET_PARAMS = ('user', 'key') + +# Storage +STORAGE_S3_PARAMS = ('key', 'secret') +STORAGE_GOOGLE_STORAGE_PARAMS = ('key', 'secret') + +# DNS +DNS_PARAMS_LINODE = ('user', 'key') +DNS_PARAMS_ZERIGO = ('email', 'api token') +DNS_PARAMS_RACKSPACE = ('user', 'key') diff --git a/trunk/test/storage/__init__.py b/trunk/test/storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/trunk/test/storage/fixtures/atmos/already_exists.xml b/trunk/test/storage/fixtures/atmos/already_exists.xml new file mode 100644 index 0000000000..2b9d94cb64 --- /dev/null +++ b/trunk/test/storage/fixtures/atmos/already_exists.xml @@ -0,0 +1,5 @@ + + + 1016 + The resource you are trying to create already exists. + diff --git a/trunk/test/storage/fixtures/atmos/empty_directory_listing.xml b/trunk/test/storage/fixtures/atmos/empty_directory_listing.xml new file mode 100644 index 0000000000..8040444d50 --- /dev/null +++ b/trunk/test/storage/fixtures/atmos/empty_directory_listing.xml @@ -0,0 +1,4 @@ + + + + diff --git a/trunk/test/storage/fixtures/atmos/list_containers.xml b/trunk/test/storage/fixtures/atmos/list_containers.xml new file mode 100644 index 0000000000..71befd84a8 --- /dev/null +++ b/trunk/test/storage/fixtures/atmos/list_containers.xml @@ -0,0 +1,45 @@ + + + + + b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9 + directory + container1 + + + 860855a4a445b5e45eeef4024371fd5c73ee3ada + directory + container2 + + + 651eae32634bf84529c74eabd555fda48c7cead6 + regular + not-a-container1 + + + 089293be672782a255498b0b05c4877acf23ef9e + directory + container3 + + + bd804e9f356b51844f93273ec8c94b2e274711d0 + directory + container4 + + + b40b0f3a17fad1d8c8b2085f668f8107bb400fa5 + regular + not-a-container-2 + + + 10bd74388b55a3c8c329ff5dd6d21bd55bfb7370 + directory + container5 + + + c04fa4aa3d0adcdf104baa0cef7b6279680a23c3 + directory + container6 + + + diff --git a/trunk/test/storage/fixtures/atmos/not_empty.xml b/trunk/test/storage/fixtures/atmos/not_empty.xml new file mode 100644 index 0000000000..6c46d5962d --- /dev/null +++ b/trunk/test/storage/fixtures/atmos/not_empty.xml @@ -0,0 +1,5 @@ + + + 1023 + The directory you are trying to delete is not empty. + diff --git a/trunk/test/storage/fixtures/atmos/not_found.xml b/trunk/test/storage/fixtures/atmos/not_found.xml new file mode 100644 index 0000000000..3f157a2942 --- /dev/null +++ b/trunk/test/storage/fixtures/atmos/not_found.xml @@ -0,0 +1,5 @@ + + + 1003 + The requested object was not found. + diff --git a/trunk/test/storage/fixtures/cloudfiles/list_container_objects.json b/trunk/test/storage/fixtures/cloudfiles/list_container_objects.json new file mode 100644 index 0000000000..4c47200bb4 --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_container_objects.json @@ -0,0 +1,14 @@ +[ + {"name":"foo test 1","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo test 2","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo tes 3","hash":"16265549b5bda64ecdaa5156de4c97ee", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:46.549890"}, + {"name":"foo test 3","hash":"16265549b5bda64ecdaa5156de4c97ff", + "bytes":1160520,"content_type":"application/text", + "last_modified":"2011-01-25T22:01:50.351810"} +] diff --git a/trunk/test/storage/fixtures/cloudfiles/list_container_objects_empty.json b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_empty.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_empty.json @@ -0,0 +1 @@ +{} diff --git a/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json new file mode 100644 index 0000000000..f6382d3b5f --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted1.json @@ -0,0 +1,11 @@ +[ + {"name":"foo-test-1","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-2","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-3","hash":"16265549b5bda64ecdaa5156de4c97ee", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:46.549890"} +] diff --git a/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json new file mode 100644 index 0000000000..6ad8210010 --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_container_objects_not_exhausted2.json @@ -0,0 +1,8 @@ +[ + {"name":"foo-test-4","hash":"16265549b5bda64ecdaa5156de4c97cc", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"}, + {"name":"foo-test-5","hash":"16265549b5bda64ecdaa5156de4c97bb", + "bytes":1160520,"content_type":"application/zip", + "last_modified":"2011-01-25T22:01:50.351810"} +] diff --git a/trunk/test/storage/fixtures/cloudfiles/list_containers.json b/trunk/test/storage/fixtures/cloudfiles/list_containers.json new file mode 100644 index 0000000000..ded31c02c3 --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_containers.json @@ -0,0 +1,5 @@ +[ + {"name":"container1","count":4,"bytes":3484450}, + {"name":"container2","count":120,"bytes":340084450}, + {"name":"container3","count":0,"bytes":0} +] diff --git a/trunk/test/storage/fixtures/cloudfiles/list_containers_empty.json b/trunk/test/storage/fixtures/cloudfiles/list_containers_empty.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/list_containers_empty.json @@ -0,0 +1 @@ +{} diff --git a/trunk/test/storage/fixtures/cloudfiles/meta_data.json b/trunk/test/storage/fixtures/cloudfiles/meta_data.json new file mode 100644 index 0000000000..5049f58492 --- /dev/null +++ b/trunk/test/storage/fixtures/cloudfiles/meta_data.json @@ -0,0 +1 @@ +{"bytes_used": 1234567, "container_count": 10, "object_count": 400} diff --git a/trunk/test/storage/fixtures/google_storage/list_container_objects.xml b/trunk/test/storage/fixtures/google_storage/list_container_objects.xml new file mode 100644 index 0000000000..7636f5413c --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_container_objects.xml @@ -0,0 +1,18 @@ + + + test_container + + + 1000 + false + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/google_storage/list_container_objects_empty.xml b/trunk/test/storage/fixtures/google_storage/list_container_objects_empty.xml new file mode 100644 index 0000000000..1a933cb323 --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_container_objects_empty.xml @@ -0,0 +1,8 @@ + + + test_container + + + 1000 + false + diff --git a/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml b/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml new file mode 100644 index 0000000000..1904a7e355 --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted1.xml @@ -0,0 +1,38 @@ + + + test_container + + + 1000 + true + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 2.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 3.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml b/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml new file mode 100644 index 0000000000..3e7d28769c --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_container_objects_not_exhausted2.xml @@ -0,0 +1,28 @@ + + + test_container + + + 3 + false + + 4.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 5.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/google_storage/list_containers.xml b/trunk/test/storage/fixtures/google_storage/list_containers.xml new file mode 100644 index 0000000000..9f23f24621 --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_containers.xml @@ -0,0 +1,16 @@ + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + test1 + 2011-04-09T12:34:49.000Z + + + test2 + 2011-02-09T12:34:49.000Z + + + diff --git a/trunk/test/storage/fixtures/google_storage/list_containers_empty.xml b/trunk/test/storage/fixtures/google_storage/list_containers_empty.xml new file mode 100644 index 0000000000..2584babacc --- /dev/null +++ b/trunk/test/storage/fixtures/google_storage/list_containers_empty.xml @@ -0,0 +1,9 @@ + + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + diff --git a/trunk/test/storage/fixtures/s3/list_container_objects.xml b/trunk/test/storage/fixtures/s3/list_container_objects.xml new file mode 100644 index 0000000000..6cfdcd30f8 --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_container_objects.xml @@ -0,0 +1,18 @@ + + + test_container + + + 1000 + false + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/s3/list_container_objects_empty.xml b/trunk/test/storage/fixtures/s3/list_container_objects_empty.xml new file mode 100644 index 0000000000..0702683a51 --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_container_objects_empty.xml @@ -0,0 +1,8 @@ + + + test_container + + + 1000 + false + diff --git a/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml b/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml new file mode 100644 index 0000000000..5a02ed0b5b --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted1.xml @@ -0,0 +1,38 @@ + + + test_container + + + 1000 + true + + 1.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 2.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 3.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml b/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml new file mode 100644 index 0000000000..0bf5af4afc --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_container_objects_not_exhausted2.xml @@ -0,0 +1,28 @@ + + + test_container + + + 3 + false + + 4.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + + 5.zip + 2011-04-09T19:05:18.000Z + "4397da7a7649e8085de9916c240e8166" + 1234567 + + 65a011niqo39cdf8ec533ec3d1ccaafsa932 + + STANDARD + + diff --git a/trunk/test/storage/fixtures/s3/list_containers.xml b/trunk/test/storage/fixtures/s3/list_containers.xml new file mode 100644 index 0000000000..ec66b73c54 --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_containers.xml @@ -0,0 +1,16 @@ + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + test1 + 2011-04-09T12:34:49.000Z + + + test2 + 2011-02-09T12:34:49.000Z + + + diff --git a/trunk/test/storage/fixtures/s3/list_containers_empty.xml b/trunk/test/storage/fixtures/s3/list_containers_empty.xml new file mode 100644 index 0000000000..463cb24d03 --- /dev/null +++ b/trunk/test/storage/fixtures/s3/list_containers_empty.xml @@ -0,0 +1,9 @@ + + + + af4rf45db0927637c66fb848dfc972b8b5126e1237bde6fe02862b11481fdfd9 + foobar + + + + diff --git a/trunk/test/storage/test_atmos.py b/trunk/test/storage/test_atmos.py new file mode 100644 index 0000000000..b6682e022e --- /dev/null +++ b/trunk/test/storage/test_atmos.py @@ -0,0 +1,570 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import httplib +import os.path +import sys +import unittest +import urlparse + +from xml.etree import ElementTree + +import libcloud.utils + +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerAlreadyExistsError, \ + ContainerDoesNotExistError, \ + ContainerIsNotEmptyError, \ + ObjectDoesNotExistError +from libcloud.storage.drivers.atmos import AtmosConnection, AtmosDriver +from libcloud.storage.drivers.dummy import DummyIterator + +from test import StorageMockHttp, MockRawResponse +from test.file_fixtures import StorageFileFixtures + +class AtmosTests(unittest.TestCase): + def setUp(self): + AtmosDriver.connectionCls.conn_classes = (None, AtmosMockHttp) + AtmosDriver.connectionCls.rawResponseCls = AtmosMockRawResponse + AtmosDriver.path = '' + AtmosMockHttp.type = None + AtmosMockHttp.upload_created = False + AtmosMockRawResponse.type = None + self.driver = AtmosDriver('dummy', base64.b64encode('dummy')) + self._remove_test_file() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_list_containers(self): + AtmosMockHttp.type = 'EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + AtmosMockHttp.type = None + containers = self.driver.list_containers() + self.assertEqual(len(containers), 6) + + def test_list_container_objects(self): + container = Container(name='test_container', extra={}, + driver=self.driver) + + AtmosMockHttp.type = 'EMPTY' + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + AtmosMockHttp.type = None + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 2) + + obj = [o for o in objects if o.name == 'not-a-container1'][0] + self.assertEqual(obj.meta_data['object_id'], + '651eae32634bf84529c74eabd555fda48c7cead6') + self.assertEqual(obj.container.name, 'test_container') + + def test_get_container(self): + container = self.driver.get_container(container_name='test_container') + self.assertEqual(container.name, 'test_container') + self.assertEqual(container.extra['object_id'], + 'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9') + + def test_get_container_not_found(self): + try: + self.driver.get_container(container_name='not_found') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + container = self.driver.create_container( + container_name='test_create_container') + self.assertTrue(isinstance(container, Container)) + self.assertEqual(container.name, 'test_create_container') + self.assertEqual(container.extra['object_id'], + '31a27b593629a3fe59f887fd973fd953e80062ce') + + def test_create_container_already_exists(self): + AtmosMockHttp.type = 'ALREADY_EXISTS' + + try: + self.driver.create_container( + container_name='test_create_container') + except ContainerAlreadyExistsError: + pass + else: + self.fail( + 'Container already exists but an exception was not thrown') + + def test_delete_container_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.delete_container(container=container) + self.assertTrue(result) + + def test_delete_container_not_found(self): + AtmosMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail( + 'Container does not exist but an exception was not thrown') + + def test_delete_container_not_empty(self): + AtmosMockHttp.type = 'NOT_EMPTY' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Container is not empty but an exception was not thrown') + + def test_get_object_success(self): + obj = self.driver.get_object(container_name='test_container', + object_name='test_object') + self.assertEqual(obj.container.name, 'test_container') + self.assertEqual(obj.size, 555) + self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') + self.assertEqual(obj.extra['object_id'], + '322dce3763aadc41acc55ef47867b8d74e45c31d6643') + self.assertEqual( + obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') + self.assertEqual(obj.meta_data['foo-bar'], 'test 1') + self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + + + def test_get_object_not_found(self): + try: + self.driver.get_object(container_name='test_container', + object_name='not_found') + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + status = self.driver.delete_object(obj=obj) + self.assertTrue(status) + + def test_delete_object_not_found(self): + AtmosMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_success_not_found(self): + AtmosMockRawResponse.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, + meta_data=None, + driver=self.driver) + destination_path = os.path.abspath(__file__) + '.temp' + try: + self.driver.download_object( + obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_as_stream(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=self.driver) + + stream = self.driver.download_object_as_stream(obj=obj, chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = AtmosDriver._upload_file + AtmosDriver._upload_file = upload_file + path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + extra = {'meta_data': { 'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=path, container=container, + extra=extra, object_name=object_name) + self.assertEqual(obj.name, 'ftu') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + AtmosDriver._upload_file = old_func + + def test_upload_object_no_content_type(self): + def no_content_type(name): + return None, None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = no_content_type + file_path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name) + except AttributeError: + pass + else: + self.fail( + 'File content type not provided' + ' but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_upload_object_error(self): + def dummy_content_type(name): + return 'application/zip', None + + def send(instance): + raise Exception('') + + old_func1 = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + old_func2 = AtmosMockHttp.send + AtmosMockHttp.send = send + + file_path = os.path.abspath(__file__) + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except LibcloudError: + pass + else: + self.fail('Timeout while uploading but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func1 + AtmosMockHttp.send = old_func2 + + def test_upload_object_nonexistent_file(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + + file_path = os.path.abspath(__file__ + '.inexistent') + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftu' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except OSError: + pass + else: + self.fail('Inesitent but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_upload_object_via_stream(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + + container = Container(name='fbc', extra={}, driver=self) + object_name = 'ftsd' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_signature_algorithm(self): + test_uid = 'fredsmagicuid' + test_key = base64.b64encode('ssssshhhhhmysecretkey') + test_date = 'Mon, 04 Jul 2011 07:39:19 GMT' + test_values = [ + ('GET', '/rest/namespace/foo', '', {}, + 'WfSASIA25TuqO2n0aO9k/dtg6S0='), + ('POST', '/rest/namespace/foo', '', {}, + 'oYKdsF+1DOuUT7iX5CJCDym2EQk='), + ('PUT', '/rest/namespace/foo', '', {}, + 'JleF9dpSWhaT3B2swZI3s41qqs4='), + ('DELETE', '/rest/namespace/foo', '', {}, + '2IX+Bd5XZF5YY+g4P59qXV1uLpo='), + ('GET', '/rest/namespace/foo?metata/system', '', {}, + 'zuHDEAgKM1winGnWn3WBsqnz4ks='), + ('POST', '/rest/namespace/foo?metadata/user', '', { + 'x-emc-meta': 'fakemeta=fake, othermeta=faketoo' + }, '7sLx1nxPIRAtocfv02jz9h1BjbU='), + ] + + class FakeDriver(object): + path = '' + + for method, action, api_path, headers, expected in test_values: + c = AtmosConnection(test_uid, test_key) + c.method = method + c.action = action + d = FakeDriver() + d.path = api_path + c.driver = d + headers = c.add_default_headers(headers) + headers['Date'] = headers['x-emc-date'] = test_date + self.assertEqual(c._calculate_signature({}, headers), expected) + +class AtmosMockHttp(StorageMockHttp, unittest.TestCase): + fixtures = StorageFileFixtures('atmos') + upload_created = False + + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self) + + if kwargs.get('host', None) and kwargs.get('port', None): + StorageMockHttp.__init__(self, *args, **kwargs) + + def runTest(self): + pass + + def request(self, method, url, body=None, headers=None, raw=False): + headers = headers or {} + parsed = urlparse.urlparse(url) + if parsed.query.startswith('metadata/'): + parsed = list(parsed) + parsed[2] = parsed[2] + '/' + parsed[4] + parsed[4] = '' + url = urlparse.urlunparse(parsed) + return super(AtmosMockHttp, self).request(method, url, body, headers, + raw) + + def _rest_namespace_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('empty_directory_listing.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('empty_directory_listing.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container__metadata_system(self, method, url, body, + headers): + headers = { + 'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9' + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_not_found__metadata_system(self, method, url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_test_create_container(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_test_create_container__metadata_system(self, method, + url, body, + headers): + headers = { + 'x-emc-meta': 'objectid=31a27b593629a3fe59f887fd973fd953e80062ce' + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_create_container_ALREADY_EXISTS(self, method, url, + body, headers): + body = self.fixtures.load('already_exists.xml') + return (httplib.BAD_REQUEST, body, {}, + httplib.responses[httplib.BAD_REQUEST]) + + def _rest_namespace_foo_bar_container(self, method, url, body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_NOT_FOUND(self, method, url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_foo_bar_container_NOT_EMPTY(self, method, url, body, + headers): + body = self.fixtures.load('not_empty.xml') + return (httplib.BAD_REQUEST, body, {}, + httplib.responses[httplib.BAD_REQUEST]) + + def _rest_namespace_test_container_test_object_metadata_system(self, method, + url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in meta.items()]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_test_object_metadata_user(self, method, + url, body, + headers): + meta = { + 'md5': '6b21c4a111ac178feacf9ec9d0c71f17', + 'foo-bar': 'test 1', + 'bar-foo': 'test 2', + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in meta.items()]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_test_container_not_found_metadata_system(self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url, + body, headers): + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_fbc_ftu_metadata_system(self, method, url, body, + headers): + if not self.upload_created: + self.__class__.upload_created = True + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + self.__class__.upload_created = False + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in meta.items()]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftu_metadata_user(self, method, url, body, headers): + self.assertTrue('x-emc-meta' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsd(self, method, url, body, headers): + self.assertTrue('Range' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsd_metadata_user(self, method, url, body, + headers): + self.assertTrue('x-emc-meta' in headers) + return (httplib.OK, '', {}, httplib.responses[httplib.OK]) + + def _rest_namespace_fbc_ftsd_metadata_system(self, method, url, body, + headers): + meta = { + 'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643', + 'size': '555', + 'mtime': '2011-01-25T22:01:49Z' + } + headers = { + 'x-emc-meta': ', '.join([k + '=' + v for k, v in meta.items()]) + } + return (httplib.OK, '', headers, httplib.responses[httplib.OK]) + +class AtmosMockRawResponse(MockRawResponse): + fixtures = StorageFileFixtures('atmos') + + def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url, + body, headers): + body = 'test' + self._data = self._generate_random_data(1000) + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(self, method, + url, body, + headers): + body = self.fixtures.load('not_found.xml') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _rest_namespace_fbc_ftu(self, method, url, body, headers): + return (httplib.CREATED, '', {}, httplib.responses[httplib.CREATED]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/storage/test_base.py b/trunk/test/storage/test_base.py new file mode 100644 index 0000000000..3b95af34e9 --- /dev/null +++ b/trunk/test/storage/test_base.py @@ -0,0 +1,141 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import hashlib + +from StringIO import StringIO +from mock import Mock + +from libcloud.storage.base import StorageDriver + +from test import StorageMockHttp # pylint: disable-msg=E0611 + + +class BaseStorageTests(unittest.TestCase): + def setUp(self): + self.send_called = 0 + StorageDriver.connectionCls.conn_classes = (None, StorageMockHttp) + + self.driver1 = StorageDriver('username', 'key', host='localhost') + self.driver1.supports_chunked_encoding = True + self.driver2 = StorageDriver('username', 'key', host='localhost') + self.driver2.supports_chunked_encoding = False + + def test__upload_object_iterator_must_have_next_method(self): + class Iterator(object): + def next(self): + pass + + class Iterator2(file): + def __init__(self): + pass + + class SomeClass(object): + pass + + valid_iterators = [Iterator(), Iterator2(), StringIO('bar')] + invalid_iterators = ['foobar', '', False, True, 1, object()] + + def upload_func(*args, **kwargs): + return True, 'barfoo', 100 + + kwargs = {'object_name': 'foo', 'content_type': 'foo/bar', + 'upload_func': upload_func, 'upload_func_kwargs': {}, + 'request_path': '/', 'headers': {}} + + for value in valid_iterators: + kwargs['iterator'] = value + self.driver1._upload_object(**kwargs) + + for value in invalid_iterators: + kwargs['iterator'] = value + + try: + self.driver1._upload_object(**kwargs) + except AttributeError: + pass + else: + self.fail('Exception was not thrown') + + def test_upload_zero_bytes_long_object_via_stream(self): + iterator = Mock() + iterator.next.side_effect = StopIteration() + + def mock_send(data): + self.send_called += 1 + + response = Mock() + response.connection.connection.send = mock_send + + # Normal + success, data_hash, bytes_transferred = \ + self.driver1._stream_data(response=response, + iterator=iterator, + chunked=False, calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5('').hexdigest()) + self.assertEqual(bytes_transferred, 0) + self.assertEqual(self.send_called, 1) + + # Chunked + success, data_hash, bytes_transferred = \ + self.driver1._stream_data(response=response, + iterator=iterator, + chunked=True, calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5('').hexdigest()) + self.assertEqual(bytes_transferred, 0) + self.assertEqual(self.send_called, 5) + + def test__upload_data(self): + def mock_send(data): + self.send_called += 1 + + response = Mock() + response.connection.connection.send = mock_send + + data = '123456789901234567' + success, data_hash, bytes_transferred = \ + self.driver1._upload_data(response=response, data=data, + calculate_hash=True) + + self.assertTrue(success) + self.assertEqual(data_hash, hashlib.md5(data).hexdigest()) + self.assertEqual(bytes_transferred, (len(data))) + self.assertEqual(self.send_called, 1) + + def test__get_hash_function(self): + self.driver1.hash_type = 'md5' + func = self.driver1._get_hash_function() + self.assertTrue(func) + + self.driver1.hash_type = 'sha1' + func = self.driver1._get_hash_function() + self.assertTrue(func) + + try: + self.driver1.hash_type = 'invalid-hash-function' + func = self.driver1._get_hash_function() + except RuntimeError: + pass + else: + self.fail('Invalid hash type but exception was not thrown') + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/storage/test_cloudfiles.py b/trunk/test/storage/test_cloudfiles.py new file mode 100644 index 0000000000..8afce7d378 --- /dev/null +++ b/trunk/test/storage/test_cloudfiles.py @@ -0,0 +1,700 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import os.path # pylint: disable-msg=W0404 +import sys +import copy +import unittest +import httplib + +import libcloud.utils + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver +from libcloud.storage.drivers.dummy import DummyIterator + +from test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from test.file_fixtures import StorageFileFixtures, OpenStackFixtures # pylint: disable-msg=E0611 + +current_hash = None + +class CloudFilesTests(unittest.TestCase): + + def setUp(self): + CloudFilesStorageDriver.connectionCls.conn_classes = ( + None, CloudFilesMockHttp) + CloudFilesStorageDriver.connectionCls.rawResponseCls = \ + CloudFilesMockRawResponse + CloudFilesMockHttp.type = None + CloudFilesMockRawResponse.type = None + self.driver = CloudFilesStorageDriver('dummy', 'dummy') + self._remove_test_file() + + def tearDown(self): + self._remove_test_file() + + def test_invalid_json_throws_exception(self): + CloudFilesMockHttp.type = 'MALFORMED_JSON' + try: + self.driver.list_containers() + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_containers(self): + CloudFilesMockHttp.type = 'EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + CloudFilesMockHttp.type = None + containers = self.driver.list_containers() + self.assertEqual(len(containers), 3) + + container = [c for c in containers if c.name == 'container2'][0] + self.assertEqual(container.extra['object_count'], 120) + self.assertEqual(container.extra['size'], 340084450) + + def test_list_container_objects(self): + CloudFilesMockHttp.type = 'EMPTY' + container = Container( + name='test_container', extra={}, driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + CloudFilesMockHttp.type = None + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 4) + + obj = [o for o in objects if o.name == 'foo test 1'][0] + self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') + self.assertEqual(obj.size, 1160520) + self.assertEqual(obj.container.name, 'test_container') + + def test_list_container_objects_iterator(self): + CloudFilesMockHttp.type = 'ITERATOR' + container = Container( + name='test_container', extra={}, driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 5) + + obj = [o for o in objects if o.name == 'foo-test-1'][0] + self.assertEqual(obj.hash, '16265549b5bda64ecdaa5156de4c97cc') + self.assertEqual(obj.size, 1160520) + self.assertEqual(obj.container.name, 'test_container') + + def test_get_container(self): + container = self.driver.get_container(container_name='test_container') + self.assertEqual(container.name, 'test_container') + self.assertEqual(container.extra['object_count'], 800) + self.assertEqual(container.extra['size'], 1234568) + + def test_get_container_not_found(self): + try: + self.driver.get_container(container_name='not_found') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + obj = self.driver.get_object(container_name='test_container', + object_name='test_object') + self.assertEqual(obj.container.name, 'test_container') + self.assertEqual(obj.size, 555) + self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17') + self.assertEqual(obj.extra['content_type'], 'application/zip') + self.assertEqual( + obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT') + self.assertEqual(obj.meta_data['foo-bar'], 'test 1') + self.assertEqual(obj.meta_data['bar-foo'], 'test 2') + + def test_get_object_not_found(self): + try: + self.driver.get_object(container_name='test_container', + object_name='not_found') + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + container = self.driver.create_container( + container_name='test_create_container') + self.assertTrue(isinstance(container, Container)) + self.assertEqual(container.name, 'test_create_container') + self.assertEqual(container.extra['object_count'], 0) + + def test_create_container_already_exists(self): + CloudFilesMockHttp.type = 'ALREADY_EXISTS' + + try: + self.driver.create_container( + container_name='test_create_container') + except ContainerAlreadyExistsError: + pass + else: + self.fail( + 'Container already exists but an exception was not thrown') + + def test_create_container_invalid_name_too_long(self): + name = ''.join([ 'x' for x in range(0, 257)]) + try: + self.driver.create_container(container_name=name) + except InvalidContainerNameError: + pass + else: + self.fail( + 'Invalid name was provided (name is too long)' + ', but exception was not thrown') + + def test_create_container_invalid_name_slashes_in_name(self): + try: + self.driver.create_container(container_name='test/slashes/') + except InvalidContainerNameError: + pass + else: + self.fail( + 'Invalid name was provided (name contains slashes)' + ', but exception was not thrown') + + def test_delete_container_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + result = self.driver.delete_container(container=container) + self.assertTrue(result) + + def test_delete_container_not_found(self): + CloudFilesMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail( + 'Container does not exist but an exception was not thrown') + + def test_delete_container_not_empty(self): + CloudFilesMockHttp.type = 'NOT_EMPTY' + container = Container(name='foo_bar_container', extra={}, driver=self) + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Container is not empty but an exception was not thrown') + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + CloudFilesMockRawResponse.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_success_not_found(self): + CloudFilesMockRawResponse.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, + meta_data=None, + driver=CloudFilesStorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + try: + self.driver.download_object( + obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_download_object_as_stream(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + + stream = self.driver.download_object_as_stream(obj=obj, chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + extra = {'meta_data': { 'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, container=container, + extra=extra, object_name=object_name) + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + CloudFilesStorageDriver._upload_file = old_func + + def test_upload_object_invalid_hash(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + CloudFilesMockRawResponse.type = 'INVALID_HASH' + + old_func = CloudFilesStorageDriver._upload_file + CloudFilesStorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + CloudFilesStorageDriver._upload_file = old_func + + def test_upload_object_no_content_type(self): + def no_content_type(name): + return None, None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = no_content_type + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name) + except AttributeError: + pass + else: + self.fail( + 'File content type not provided' + ' but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_upload_object_error(self): + def dummy_content_type(name): + return 'application/zip', None + + def send(instance): + raise Exception('') + + old_func1 = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + old_func2 = CloudFilesMockHttp.send + CloudFilesMockHttp.send = send + + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except LibcloudError: + pass + else: + self.fail('Timeout while uploading but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func1 + CloudFilesMockHttp.send = old_func2 + + def test_upload_object_inexistent_file(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + + file_path = os.path.abspath(__file__ + '.inexistent') + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_upload' + try: + self.driver.upload_object( + file_path=file_path, + container=container, + object_name=object_name) + except OSError: + pass + else: + self.fail('Inesitent but an exception was not thrown') + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_upload_object_via_stream(self): + def dummy_content_type(name): + return 'application/zip', None + + old_func = libcloud.utils.guess_file_mime_type + libcloud.utils.guess_file_mime_type = dummy_content_type + + container = Container(name='foo_bar_container', extra={}, driver=self) + object_name = 'foo_test_stream_data' + iterator = DummyIterator(data=['2', '3', '5']) + try: + self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator) + finally: + libcloud.utils.guess_file_mime_type = old_func + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + status = self.driver.delete_object(obj=obj) + self.assertTrue(status) + + def test_delete_object_not_found(self): + CloudFilesMockHttp.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, driver=self) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=CloudFilesStorageDriver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Object does not exist but an exception was not thrown') + + def test_ex_get_meta_data(self): + meta_data = self.driver.ex_get_meta_data() + self.assertTrue(isinstance(meta_data, dict)) + self.assertTrue('object_count' in meta_data) + self.assertTrue('container_count' in meta_data) + self.assertTrue('bytes_used' in meta_data) + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + +class CloudFilesMockHttp(StorageMockHttp): + + fixtures = StorageFileFixtures('cloudfiles') + auth_fixtures = OpenStackFixtures() + base_headers = { 'content-type': 'application/json; charset=UTF-8'} + + # fake auth token response + def _v1_0(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + headers.update({ 'x-server-management-url': + 'https://servers.api.rackspacecloud.com/v1.0/slug', + 'x-auth-token': 'FE011C19', + 'x-cdn-management-url': + 'https://cdn.clouddrive.com/v1/MossoCloudFS', + 'x-storage-token': 'FE011C19', + 'x-storage-url': + 'https://storage4.clouddrive.com/v1/MossoCloudFS'}) + return (httplib.NO_CONTENT, + "", + headers, + httplib.responses[httplib.NO_CONTENT]) + + def _v1_MossoCloudFS_MALFORMED_JSON(self, method, url, body, headers): + # test_invalid_json_throws_exception + body = 'broken: json /*"' + return (httplib.NO_CONTENT, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_EMPTY(self, method, url, body, headers): + return (httplib.NO_CONTENT, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'GET': + # list_containers + body = self.fixtures.load('list_containers.json') + status_code = httplib.OK + elif method == 'HEAD': + # get_meta_data + body = self.fixtures.load('meta_data.json') + status_code = httplib.NO_CONTENT + headers.update({ 'x-account-container-count': 10, + 'x-account-object-count': 400, + 'x-account-bytes-used': 1234567 + }) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_not_found(self, method, url, body, headers): + # test_get_object_not_found + if method == 'HEAD': + body = '' + else: + raise ValueError('Invalid method') + + return (httplib.NOT_FOUND, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.json') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'GET': + # list_container_objects + if url.find('marker') == -1: + body = self.fixtures.load('list_container_objects.json') + status_code = httplib.OK + else: + body = '' + status_code = httplib.NO_CONTENT + elif method == 'HEAD': + # get_container + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({ 'x-container-object-count': 800, + 'x-container-bytes-used': 1234568 + }) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_ITERATOR(self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + # list_container_objects + if url.find('foo-test-3') != -1: + body = self.fixtures.load('list_container_objects_not_exhausted2.json') + status_code = httplib.OK + elif url.find('foo-test-5') != -1: + body = '' + status_code = httplib.NO_CONTENT + else: + # First request + body = self.fixtures.load('list_container_objects_not_exhausted1.json') + status_code = httplib.OK + + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_not_found( + self, method, url, body, headers): + # test_get_container_not_found + if method == 'HEAD': + body = '' + else: + raise ValueError('Invalid method') + + return (httplib.NOT_FOUND, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_container_test_object( + self, method, url, body, headers): + headers = copy.deepcopy(self.base_headers) + if method == 'HEAD': + # get_object + body = self.fixtures.load('list_container_objects_empty.json') + status_code = httplib.NO_CONTENT + headers.update({ 'content-length': 555, + 'last-modified': 'Tue, 25 Jan 2011 22:01:49 GMT', + 'etag': '6b21c4a111ac178feacf9ec9d0c71f17', + 'x-object-meta-foo-bar': 'test 1', + 'x-object-meta-bar-foo': 'test 2', + 'content-type': 'application/zip'}) + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_create_container( + self, method, url, body, headers): + # test_create_container_success + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers = copy.deepcopy(self.base_headers) + headers.update({ 'content-length': 18, + 'date': 'Mon, 28 Feb 2011 07:52:57 GMT' + }) + status_code = httplib.CREATED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_test_create_container_ALREADY_EXISTS( + self, method, url, body, headers): + # test_create_container_already_exists + headers = copy.deepcopy(self.base_headers) + body = self.fixtures.load('list_container_objects_empty.json') + headers.update({ 'content-type': 'text/plain' }) + status_code = httplib.ACCEPTED + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container(self, method, url, body, headers): + if method == 'DELETE': + # test_delete_container_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_NOT_FOUND( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_container_not_found + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NOT_FOUND + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_NOT_EMPTY( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_container_not_empty + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.CONFLICT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_object_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NO_CONTENT + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, url, body, headers): + + if method == 'DELETE': + # test_delete_object_success + body = self.fixtures.load('list_container_objects_empty.json') + headers = self.base_headers + status_code = httplib.NOT_FOUND + + return (status_code, body, headers, httplib.responses[httplib.OK]) + + def _v1_1__auth(self, method, url, body, headers): + body = self.auth_fixtures.load('_v1_1__auth.json') + return (httplib.OK, body, {'content-type': 'application/json; charset=UTF-8'}, httplib.responses[httplib.OK]) + + +class CloudFilesMockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('cloudfiles') + base_headers = { 'content-type': 'application/json; charset=UTF-8'} + + def _v1_MossoCloudFS_foo_bar_container_foo_test_upload( + self, method, url, body, headers): + # test_object_upload_success + + body = '' + headers = {} + headers.update(self.base_headers) + headers['etag'] = 'hash343hhash89h932439jsaa89' + return (httplib.CREATED, body, headers, httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_test_upload_INVALID_HASH( + self, method, url, body, headers): + # test_object_upload_invalid_hash + body = '' + headers = {} + headers.update(self.base_headers) + headers['etag'] = 'foobar' + return (httplib.CREATED, body, headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object( + self, method, url, body, headers): + + # test_download_object_success + body = 'test' + self._data = self._generate_random_data(1000) + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_INVALID_SIZE( + self, method, url, body, headers): + # test_download_object_invalid_file_size + body = 'test' + self._data = self._generate_random_data(100) + return (httplib.OK, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_bar_object_NOT_FOUND( + self, method, url, body, headers): + body = '' + return (httplib.NOT_FOUND, body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _v1_MossoCloudFS_foo_bar_container_foo_test_stream_data( + self, method, url, body, headers): + + # test_upload_object_via_stream_success + headers = {} + headers.update(self.base_headers) + headers['etag'] = '577ef1154f3240ad5b9b413aa7346a1e' + body = 'test' + return (httplib.CREATED, + body, + headers, + httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/storage/test_google_storage.py b/trunk/test/storage/test_google_storage.py new file mode 100644 index 0000000000..ef3837be03 --- /dev/null +++ b/trunk/test/storage/test_google_storage.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import httplib +import unittest + +from libcloud.storage.drivers.google_storage import GoogleStorageDriver +from test.storage.test_s3 import S3Tests, S3MockHttp + +from test.file_fixtures import StorageFileFixtures +from test.secrets import STORAGE_GOOGLE_STORAGE_PARAMS + + +class GoogleStorageMockHttp(S3MockHttp): + fixtures = StorageFileFixtures('google_storage') + + +class GoogleStorageTests(S3Tests): + driver_type = GoogleStorageDriver + driver_args = STORAGE_GOOGLE_STORAGE_PARAMS + mock_response_klass = GoogleStorageMockHttp + + def test_billing_not_enabled(self): + # TODO + pass + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/storage/test_s3.py b/trunk/test/storage/test_s3.py new file mode 100644 index 0000000000..1e7603502f --- /dev/null +++ b/trunk/test/storage/test_s3.py @@ -0,0 +1,652 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import httplib +import unittest + +from libcloud.common.types import InvalidCredsError +from libcloud.common.types import LibcloudError +from libcloud.storage.base import Container, Object +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.drivers.s3 import S3StorageDriver, S3USWestStorageDriver +from libcloud.storage.drivers.s3 import S3EUWestStorageDriver +from libcloud.storage.drivers.s3 import S3APSEStorageDriver +from libcloud.storage.drivers.s3 import S3APNEStorageDriver +from libcloud.storage.drivers.dummy import DummyIterator + +from test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 +from test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 +from test.secrets import STORAGE_S3_PARAMS + + +class S3MockHttp(StorageMockHttp): + + fixtures = StorageFileFixtures('s3') + base_headers = {} + + def _UNAUTHORIZED(self, method, url, body, headers): + return (httplib.UNAUTHORIZED, + '', + self.base_headers, + httplib.responses[httplib.OK]) + + def _DIFFERENT_REGION(self, method, url, body, headers): + return (httplib.MOVED_PERMANENTLY, + '', + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_containers_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _list_containers(self, method, url, body, headers): + body = self.fixtures.load('list_containers.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects.xml') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test_container_ITERATOR(self, method, url, body, headers): + if url.find('3.zip') == -1: + # First part of the response (first 3 objects) + file_name = 'list_container_objects_not_exhausted1.xml' + else: + file_name = 'list_container_objects_not_exhausted2.xml' + + body = self.fixtures.load(file_name) + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + + def _test2_test_list_containers(self, method, url, body, headers): + # test_get_object + body = self.fixtures.load('list_containers.xml') + headers = {'content-type': 'application/zip', + 'etag': '"e31208wqsdoj329jd"', + 'content-length': 12345, + } + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_INVALID_NAME(self, method, url, body, headers): + # test_create_container + return (httplib.BAD_REQUEST, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_ALREADY_EXISTS(self, method, url, body, headers): + # test_create_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container(self, method, url, body, headers): + # test_create_container, test_delete_container + + if method == 'PUT': + status = httplib.OK + elif method == 'DELETE': + status = httplib.NO_CONTENT + + return (status, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_DOESNT_EXIST(self, method, url, body, headers): + # test_delete_container + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _new_container_NOT_EMPTY(self, method, url, body, headers): + # test_delete_container + return (httplib.CONFLICT, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container(self, method, url, body, headers): + # test_delete_container + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): + # test_delete_container_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, + headers): + # test_delete_object_not_found + return (httplib.NOT_FOUND, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_delete_object + return (httplib.NO_CONTENT, + body, + headers, + httplib.responses[httplib.OK]) + + +class S3MockRawResponse(MockRawResponse): + + fixtures = StorageFileFixtures('s3') + + def _foo_bar_container_foo_bar_object(self, method, url, body, headers): + # test_download_object_success + body = '' + self._data = self._generate_random_data(1000) + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload_INVALID_HASH1(self, method, url, + body, headers): + body = '' + headers = {} + headers['etag'] = '"foobar"' + # test_upload_object_invalid_hash1 + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload_INVALID_HASH2(self, method, url, + body, headers): + # test_upload_object_invalid_hash2 + body = '' + headers = {'etag': '"hash343hhash89h932439jsaa89"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_upload(self, method, url, body, headers): + # test_upload_object_success + body = '' + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, + body, headers): + # test_upload_object_invalid_file_size + body = '' + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + def _foo_bar_container_foo_test_stream_data(self, method, url, body, + headers): + # test_upload_object_via_stream + body = '' + headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + + +class S3Tests(unittest.TestCase): + driver_type = S3StorageDriver + driver_args = STORAGE_S3_PARAMS + mock_response_klass = S3MockHttp + mock_raw_response_klass = S3MockRawResponse + + @classmethod + def create_driver(self): + return self.driver_type(*self.driver_args) + + def setUp(self): + self.driver_type.connectionCls.conn_classes = (None, + self.mock_response_klass) + self.driver_type.connectionCls.rawResponseCls = \ + self.mock_raw_response_klass + self.mock_response_klass.type = None + self.mock_raw_response_klass.type = None + self.driver = self.create_driver() + + def tearDown(self): + self._remove_test_file() + + def _remove_test_file(self): + file_path = os.path.abspath(__file__) + '.temp' + + try: + os.unlink(file_path) + except OSError: + pass + + def test_invalid_credentials(self): + self.mock_response_klass.type = 'UNAUTHORIZED' + try: + self.driver.list_containers() + except InvalidCredsError, e: + self.assertEqual(True, isinstance(e, InvalidCredsError)) + else: + self.fail('Exception was not thrown') + + def test_bucket_is_located_in_different_region(self): + self.mock_response_klass.type = 'DIFFERENT_REGION' + try: + self.driver.list_containers() + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_containers_empty(self): + self.mock_response_klass.type = 'list_containers_EMPTY' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 0) + + def test_list_containers_success(self): + self.mock_response_klass.type = 'list_containers' + containers = self.driver.list_containers() + self.assertEqual(len(containers), 2) + + self.assertTrue('creation_date' in containers[1].extra) + + def test_list_container_objects_empty(self): + self.mock_response_klass.type = 'EMPTY' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + + def test_list_container_objects_success(self): + self.mock_response_klass.type = None + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 1) + + obj = [o for o in objects if o.name == '1.zip'][0] + self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') + self.assertEqual(obj.size, 1234567) + self.assertEqual(obj.container.name, 'test_container') + self.assertTrue('owner' in obj.meta_data) + + def test_list_container_objects_iterator_has_more(self): + self.mock_response_klass.type = 'ITERATOR' + container = Container(name='test_container', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + + obj = [o for o in objects if o.name == '1.zip'][0] + self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') + self.assertEqual(obj.size, 1234567) + self.assertEqual(obj.container.name, 'test_container') + + self.assertTrue(obj in objects) + self.assertEqual(len(objects), 5) + + def test_get_container_doesnt_exist(self): + self.mock_response_klass.type = 'list_containers' + try: + self.driver.get_container(container_name='container1') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_container_success(self): + self.mock_response_klass.type = 'list_containers' + container = self.driver.get_container(container_name='test1') + self.assertTrue(container.name, 'test1') + + def test_get_object_container_doesnt_exist(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = 'list_containers' + try: + self.driver.get_object(container_name='test-inexistent', + object_name='test') + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_get_object_success(self): + # This method makes two requests which makes mocking the response a bit + # trickier + self.mock_response_klass.type = 'list_containers' + obj = self.driver.get_object(container_name='test2', + object_name='test') + + self.assertEqual(obj.name, 'test') + self.assertEqual(obj.container.name, 'test2') + self.assertEqual(obj.size, 12345) + self.assertEqual(obj.hash, 'e31208wqsdoj329jd') + + def test_create_container_invalid_name(self): + # invalid container name + self.mock_response_klass.type = 'INVALID_NAME' + try: + self.driver.create_container(container_name='new_container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_already_exists(self): + # container with this name already exists + self.mock_response_klass.type = 'ALREADY_EXISTS' + try: + self.driver.create_container(container_name='new-container') + except InvalidContainerNameError: + pass + else: + self.fail('Exception was not thrown') + + def test_create_container_success(self): + # success + self.mock_response_klass.type = None + name = 'new_container' + container = self.driver.create_container(container_name=name) + self.assertEqual(container.name, name) + + def test_delete_container_doesnt_exist(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'DOESNT_EXIST' + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_container_not_empty(self): + container = Container(name='new_container', extra=None, + driver=self.driver) + self.mock_response_klass.type = 'NOT_EMPTY' + try: + self.driver.delete_container(container=container) + except ContainerIsNotEmptyError: + pass + else: + self.fail('Exception was not thrown') + + # success + self.mock_response_klass.type = None + self.assertTrue(self.driver.delete_container(container=container)) + + def test_delete_container_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + try: + self.driver.delete_container(container=container) + except ContainerDoesNotExistError: + pass + else: + self.fail('Container does not exist but an exception was not' + + 'thrown') + + def test_delete_container_success(self): + self.mock_response_klass.type = None + container = Container(name='new_container', extra=None, + driver=self.driver) + self.assertTrue(self.driver.delete_container(container=container)) + + def test_download_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=S3StorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertTrue(result) + + def test_download_object_invalid_file_size(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=S3StorageDriver) + destination_path = os.path.abspath(__file__) + '.temp' + result = self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + self.assertFalse(result) + + def test_download_object_invalid_file_already_exists(self): + self.mock_raw_response_klass.type = 'INVALID_SIZE' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=S3StorageDriver) + destination_path = os.path.abspath(__file__) + try: + self.driver.download_object(obj=obj, + destination_path=destination_path, + overwrite_existing=False, + delete_on_failure=True) + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + + def test_download_object_as_stream_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + + obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, + container=container, meta_data=None, + driver=S3StorageDriver) + + stream = self.driver.download_object_as_stream(obj=obj, + chunk_size=None) + self.assertTrue(hasattr(stream, '__iter__')) + + def test_upload_object_invalid_ex_storage_class(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True, + ex_storage_class='invalid-class') + except ValueError, e: + self.assertTrue(str(e).lower().find('invalid storage class') != -1) + else: + self.fail('Exception was not thrown') + + def test_upload_object_invalid_hash1(self): + # Invalid hash is detected on the amazon side and BAD_REQUEST is + # returned + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, 'hash343hhash89h932439jsaa89', 1000 + + self.mock_raw_response_klass.type = 'INVALID_HASH1' + + old_func = S3StorageDriver._upload_file + S3StorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + S3StorageDriver._upload_file = old_func + + def test_upload_object_invalid_hash2(self): + # Invalid hash is detected when comparing hash provided in the response + # ETag header + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, '0cc175b9c0f1b6a831c399e269772661', 1000 + + self.mock_raw_response_klass.type = 'INVALID_HASH2' + + old_func = S3StorageDriver._upload_file + S3StorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + try: + self.driver.upload_object(file_path=file_path, container=container, + object_name=object_name, + verify_hash=True) + except ObjectHashMismatchError: + pass + else: + self.fail( + 'Invalid hash was returned but an exception was not thrown') + finally: + S3StorageDriver._upload_file = old_func + + def test_upload_object_success(self): + def upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + return True, '0cc175b9c0f1b6a831c399e269772661', 1000 + + old_func = S3StorageDriver._upload_file + S3StorageDriver._upload_file = upload_file + file_path = os.path.abspath(__file__) + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_upload' + extra = {'meta_data': {'some-value': 'foobar'}} + obj = self.driver.upload_object(file_path=file_path, + container=container, + object_name=object_name, + extra=extra, + verify_hash=True) + self.assertEqual(obj.name, 'foo_test_upload') + self.assertEqual(obj.size, 1000) + self.assertTrue('some-value' in obj.meta_data) + S3StorageDriver._upload_file = old_func + + def test_upload_object_via_stream(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + object_name = 'foo_test_stream_data' + iterator = DummyIterator(data=['2', '3', '5']) + extra = {'content_type': 'text/plain'} + obj = self.driver.upload_object_via_stream(container=container, + object_name=object_name, + iterator=iterator, + extra=extra) + + self.assertEqual(obj.name, object_name) + self.assertEqual(obj.size, 3) + + def test_delete_object_not_found(self): + self.mock_response_klass.type = 'NOT_FOUND' + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + try: + self.driver.delete_object(obj=obj) + except ObjectDoesNotExistError: + pass + else: + self.fail('Exception was not thrown') + + def test_delete_object_success(self): + container = Container(name='foo_bar_container', extra={}, + driver=self.driver) + obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, + meta_data=None, container=container, driver=self.driver) + + result = self.driver.delete_object(obj=obj) + self.assertTrue(result) + + +class S3USWestTests(S3Tests): + driver_type = S3USWestStorageDriver + + +class S3EUWestTests(S3Tests): + driver_type = S3EUWestStorageDriver + + +class S3APSETests(S3Tests): + driver_type = S3APSEStorageDriver + + +class S3APNETests(S3Tests): + driver_tyoe = S3APNEStorageDriver + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/test_file_fixtures.py b/trunk/test/test_file_fixtures.py new file mode 100644 index 0000000000..c0c905a54e --- /dev/null +++ b/trunk/test/test_file_fixtures.py @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +from test.file_fixtures import ComputeFileFixtures + +class FileFixturesTests(unittest.TestCase): + + def test_success(self): + f = ComputeFileFixtures('meta') + self.assertEqual("Hello, World!", f.load('helloworld.txt')) + + def test_failure(self): + f = ComputeFileFixtures('meta') + self.assertRaises(IOError, f.load, 'nil') + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/test_httplib_ssl.py b/trunk/test/test_httplib_ssl.py new file mode 100644 index 0000000000..6c8d48e86b --- /dev/null +++ b/trunk/test/test_httplib_ssl.py @@ -0,0 +1,140 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import os.path + +import libcloud.security +from libcloud.httplib_ssl import LibcloudHTTPSConnection + +class TestHttpLibSSLTests(unittest.TestCase): + + def setUp(self): + libcloud.security.VERIFY_SSL_CERT = False + self.httplib_object = LibcloudHTTPSConnection('foo.bar') + + def test_verify_hostname(self): + cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),)), + 'subjectAltName': ((('DNS', 'foo.alt.name')), + (('DNS', 'foo.alt.name.1')))} + + self.assertFalse(self.httplib_object._verify_hostname( + hostname='invalid', cert=cert1)) + self.assertTrue(self.httplib_object._verify_hostname( + hostname='somemachine.python.org', cert=cert1)) + + self.assertFalse(self.httplib_object._verify_hostname( + hostname='invalid', cert=cert2)) + self.assertTrue(self.httplib_object._verify_hostname( + hostname='foo.alt.name.1', cert=cert2)) + + def test_get_subject_alt_names(self): + cert1 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + cert2 = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),)), + 'subjectAltName': ((('DNS', 'foo.alt.name')), + (('DNS', 'foo.alt.name.1')))} + + self.assertEqual(self.httplib_object._get_subject_alt_names(cert=cert1), + []) + + alt_names = self.httplib_object._get_subject_alt_names(cert=cert2) + self.assertEqual(len(alt_names), 2) + self.assertTrue('foo.alt.name' in alt_names) + self.assertTrue('foo.alt.name.1' in alt_names) + + def test_get_common_name(self): + cert = {'notAfter': 'Feb 16 16:54:50 2013 GMT', + 'subject': ((('countryName', 'US'),), + (('stateOrProvinceName', 'Delaware'),), + (('localityName', 'Wilmington'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'SSL'),), + (('commonName', 'somemachine.python.org'),))} + + self.assertEqual(self.httplib_object._get_common_name(cert)[0], + 'somemachine.python.org') + self.assertEqual(self.httplib_object._get_common_name({}), + None) + + def test_setup_verify(self): + # @TODO: catch warnings + # non-strict mode,s hould just emit a warning + libcloud.security.VERIFY_SSL_CERT = True + libcloud.security.VERIFY_SSL_CERT_STRICT = False + self.httplib_object._setup_verify() + + # strict mode, should throw a runtime error + libcloud.security.VERIFY_SSL_CERT = True + libcloud.security.VERIFY_SSL_CERT_STRICT = True + try: + self.httplib_object._setup_verify() + except: + pass + else: + self.fail('Exception not thrown') + + libcloud.security.VERIFY_SSL_CERT = False + libcloud.security.VERIFY_SSL_CERT_STRICT = False + self.httplib_object._setup_verify() + + def test_setup_ca_cert(self): + # @TODO: catch warnings + self.httplib_object.verify = False + self.httplib_object.strict = False + self.httplib_object._setup_ca_cert() + + self.assertEqual(self.httplib_object.ca_cert, None) + + self.httplib_object.verify = True + + libcloud.security.CA_CERTS_PATH = [os.path.abspath(__file__)] + self.httplib_object._setup_ca_cert() + self.assertTrue(self.httplib_object.ca_cert is not None) + + libcloud.security.CA_CERTS_PATH = [] + self.httplib_object._setup_ca_cert() + self.assertFalse(self.httplib_object.ca_cert) + self.assertFalse(self.httplib_object.verify) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/test_pricing.py b/trunk/test/test_pricing.py new file mode 100644 index 0000000000..71b60e44f6 --- /dev/null +++ b/trunk/test/test_pricing.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path +import sys +import unittest + +import libcloud.pricing + +PRICING_FILE_PATH = os.path.join(os.path.dirname(__file__), 'pricing_test.json') + +class PricingTestCase(unittest.TestCase): + + def test_get_pricing_success(self): + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + pricing = libcloud.pricing.get_pricing(driver_type='compute', + driver_name='foo', + pricing_file_path=PRICING_FILE_PATH) + self.assertEqual(pricing['1'], 1.0) + self.assertEqual(pricing['2'], 2.0) + + self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['1'], 1.0) + self.assertEqual(libcloud.pricing.PRICING_DATA['compute']['foo']['2'], 2.0) + + def test_get_pricing_invalid_file_path(self): + try: + libcloud.pricing.get_pricing(driver_type='compute', driver_name='bar', + pricing_file_path='inexistent.json') + except IOError: + pass + else: + self.fail('Invalid pricing file path provided, but an exception was not' + ' thrown') + + def test_get_pricing_invalid_driver_type(self): + try: + libcloud.pricing.get_pricing(driver_type='invalid_type', driver_name='bar', + pricing_file_path='inexistent.json') + except AttributeError: + pass + else: + self.fail('Invalid driver_type provided, but an exception was not' + ' thrown') + + def test_get_pricing_not_in_cache(self): + try: + libcloud.pricing.get_pricing(driver_type='compute', driver_name='inexistent', + pricing_file_path=PRICING_FILE_PATH) + except KeyError: + pass + else: + self.fail('Invalid driver provided, but an exception was not' + ' thrown') + + def test_get_size_price(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = { 2: 2, '3': 3 } + price1 = libcloud.pricing.get_size_price(driver_type='compute', + driver_name='foo', + size_id=2) + price2 = libcloud.pricing.get_size_price(driver_type='compute', + driver_name='foo', + size_id='3') + self.assertEqual(price1, 2) + self.assertEqual(price2, 3) + + def test_invalid_pricing_cache(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = { 2: 2 } + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.invalidate_pricing_cache() + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + def test_invalid_module_pricing_cache(self): + libcloud.pricing.PRICING_DATA['compute']['foo'] = { 1:1 } + + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', + driver_name='foo') + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + libcloud.pricing.invalidate_module_pricing_cache(driver_type='compute', + driver_name='foo1') + + def test_set_pricing(self): + self.assertFalse('foo' in libcloud.pricing.PRICING_DATA['compute']) + + libcloud.pricing.set_pricing(driver_type='compute', driver_name='foo', + pricing={'foo': 1}) + self.assertTrue('foo' in libcloud.pricing.PRICING_DATA['compute']) + +if __name__ == '__main__': + sys.exit(unittest.main()) + diff --git a/trunk/test/test_response_classes.py b/trunk/test/test_response_classes.py new file mode 100644 index 0000000000..b95bc304a9 --- /dev/null +++ b/trunk/test/test_response_classes.py @@ -0,0 +1,92 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import httplib + +from mock import Mock + +from libcloud.common.base import XmlResponse, JsonResponse +from libcloud.common.types import MalformedResponseError + + +class ResponseClassesTests(unittest.TestCase): + def setUp(self): + self._mock_response = Mock() + self._mock_response.getheaders.return_value = [] + self._mock_response.status = httplib.OK + self._mock_connection = Mock() + + def test_XmlResponse_class(self): + self._mock_response.read.return_value = 'bar' + response = XmlResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed.tag, 'foo') + self.assertEqual(parsed.text, 'bar') + + def test_XmlResponse_class_malformed_response(self): + self._mock_response.read.return_value = '' + + try: + XmlResponse(response=self._mock_response, + connection=self._mock_connection) + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_XmlResponse_class_zero_length_body_strip(self): + self._mock_response.read.return_value = ' ' + + response = XmlResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, '') + + def test_JsonResponse_class_success(self): + self._mock_response.read.return_value = '{"foo": "bar"}' + response = JsonResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, {'foo': 'bar'}) + + def test_JsonResponse_class_malformed_response(self): + self._mock_response.read.return_value = '{"foo": "bar' + + try: + JsonResponse(response=self._mock_response, + connection=self._mock_connection) + except MalformedResponseError: + pass + else: + self.fail('Exception was not thrown') + + def test_JsonResponse_class_zero_length_body_strip(self): + self._mock_response.read.return_value = ' ' + + response = JsonResponse(response=self._mock_response, + connection=self._mock_connection) + + parsed = response.parse_body() + self.assertEqual(parsed, '') + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/test_types.py b/trunk/test/test_types.py new file mode 100644 index 0000000000..453ffcc299 --- /dev/null +++ b/trunk/test/test_types.py @@ -0,0 +1,112 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +from libcloud.common.types import LazyList + + +class TestLazyList(unittest.TestCase): + def setUp(self): + super(TestLazyList, self).setUp + self._get_more_counter = 0 + + def tearDown(self): + super(TestLazyList, self).tearDown + + def test_init(self): + data = [1, 2, 3, 4, 5] + ll = LazyList(get_more=self._get_more_exhausted) + ll_list = list(ll) + self.assertEqual(ll_list, data) + + def test_iterator(self): + data = [1, 2, 3, 4, 5] + ll = LazyList(get_more=self._get_more_exhausted) + for i, d in enumerate(ll): + self.assertEqual(d, data[i]) + + def test_empty_list(self): + ll = LazyList(get_more=self._get_more_empty) + + self.assertEqual(list(ll), []) + self.assertEqual(len(ll), 0) + self.assertTrue(10 not in ll) + + def test_iterator_not_exhausted(self): + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + ll = LazyList(get_more=self._get_more_not_exhausted) + number_of_iterations = 0 + for i, d in enumerate(ll): + self.assertEqual(d, data[i]) + number_of_iterations += 1 + self.assertEqual(number_of_iterations, 10) + + def test_len(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(len(ll), 10) + + def test_contains(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertTrue(40 not in ll) + self.assertTrue(1 in ll) + self.assertTrue(5 in ll) + self.assertTrue(10 in ll) + + def test_indexing(self): + ll = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(ll[0], 1) + self.assertEqual(ll[9], 10) + self.assertEqual(ll[-1], 10) + + try: + ll[11] + except IndexError: + pass + else: + self.fail('Exception was not thrown') + + def test_repr(self): + ll1 = LazyList(get_more=self._get_more_empty) + ll2 = LazyList(get_more=self._get_more_exhausted) + ll3 = LazyList(get_more=self._get_more_not_exhausted) + + self.assertEqual(repr(ll1), '[]') + self.assertEqual(repr(ll2), '[1, 2, 3, 4, 5]') + self.assertEqual(repr(ll3), '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]') + + def _get_more_empty(self, last_key, value_dict): + return [], None, True + + def _get_more_exhausted(self, last_key, value_dict): + data = [1, 2, 3, 4, 5] + return data, 5, True + + def _get_more_not_exhausted(self, last_key, value_dict): + self._get_more_counter += 1 + if not last_key: + data, last_key, exhausted = [1, 2, 3, 4, 5], 5, False + else: + data, last_key, exhausted = [6, 7, 8, 9, 10], 10, True + + return data, last_key, exhausted + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/test/test_utils.py b/trunk/test/test_utils.py new file mode 100644 index 0000000000..e491c52b4b --- /dev/null +++ b/trunk/test/test_utils.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more§ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest +import warnings +import os.path +from StringIO import StringIO + +# In Python > 2.7 DeprecationWarnings are disabled by default +warnings.simplefilter('default') + +import libcloud.utils +from libcloud.compute.types import Provider +from libcloud.compute.providers import DRIVERS + +WARNINGS_BUFFER = [] + + +def show_warning(msg, cat, fname, lno, line=None): + WARNINGS_BUFFER.append((msg, cat, fname, lno)) + +original_func = warnings.showwarning + + +class TestUtils(unittest.TestCase): + def setUp(self): + global WARNINGS_BUFFER + WARNINGS_BUFFER = [] + + def tearDown(self): + global WARNINGS_BUFFER + WARNINGS_BUFFER = [] + warnings.showwarning = original_func + + def test_guess_file_mime_type(self): + file_path = os.path.abspath(__file__) + mimetype, encoding = libcloud.utils.guess_file_mime_type( + file_path=file_path) + + self.assertTrue(mimetype.find('python') != -1) + + def test_get_driver(self): + driver = libcloud.utils.get_driver(drivers=DRIVERS, + provider=Provider.DUMMY) + self.assertTrue(driver is not None) + + try: + driver = libcloud.utils.get_driver(drivers=DRIVERS, + provider='fooba') + except AttributeError: + pass + else: + self.fail('Invalid provider, but an exception was not thrown') + + def test_deprecated_warning(self): + warnings.showwarning = show_warning + + libcloud.utils.SHOW_DEPRECATION_WARNING = False + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.deprecated_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 0) + + libcloud.utils.SHOW_DEPRECATION_WARNING = True + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.deprecated_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 1) + + def test_in_development_warning(self): + warnings.showwarning = show_warning + + libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.in_development_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 0) + + libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True + self.assertEqual(len(WARNINGS_BUFFER), 0) + libcloud.utils.in_development_warning('test_module') + self.assertEqual(len(WARNINGS_BUFFER), 1) + + def test_read_in_chunks_iterator(self): + def iterator(): + for x in range(0, 1000): + yield 'aa' + + for result in libcloud.utils.read_in_chunks(iterator(), chunk_size=10, + fill_size=False): + self.assertEqual(result, 'aa') + + for result in libcloud.utils.read_in_chunks(iterator(), chunk_size=10, + fill_size=True): + self.assertEqual(result, 'aaaaaaaaaa') + + def test_read_in_chunks_filelike(self): + class FakeFile(file): + def __init__(self): + self.remaining = 500 + + def read(self, size): + self.remaining -= 1 + if self.remaining == 0: + return '' + return 'b' * (size + 1) + + for index, result in enumerate(libcloud.utils.read_in_chunks( + FakeFile(), chunk_size=10, + fill_size=False)): + self.assertEqual(result, 'b' * 11) + + self.assertEqual(index, 498) + + for index, result in enumerate(libcloud.utils.read_in_chunks( + FakeFile(), chunk_size=10, + fill_size=True)): + if index != 548: + self.assertEqual(result, 'b' * 10) + else: + self.assertEqual(result, 'b' * 9) + + self.assertEqual(index, 548) + + def test_exhaust_iterator(self): + def iterator_func(): + for x in range(0, 1000): + yield 'aa' + + data = 'aa' * 1000 + iterator = libcloud.utils.read_in_chunks(iterator=iterator_func()) + result = libcloud.utils.exhaust_iterator(iterator=iterator) + self.assertEqual(result, data) + + result = libcloud.utils.exhaust_iterator(iterator=iterator_func()) + self.assertEqual(result, data) + + data = '12345678990' + iterator = StringIO(data) + result = libcloud.utils.exhaust_iterator(iterator=iterator) + self.assertEqual(result, data) + + def test_exhaust_iterator_empty_iterator(self): + data = '' + iterator = StringIO(data) + result = libcloud.utils.exhaust_iterator(iterator=iterator) + self.assertEqual(result, data) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/trunk/tox.ini b/trunk/tox.ini new file mode 100644 index 0000000000..28c403d8c7 --- /dev/null +++ b/trunk/tox.ini @@ -0,0 +1,11 @@ +[tox] +envlist = py25,py26,py27,pypy + +[testenv] +deps = mock +commands = python setup.py test + +[testenv:py25] +deps = mock + ssl + simplejson From 523abff179706dade726a539b1fb373414b43297 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 12 Nov 2011 01:34:48 +0000 Subject: [PATCH 3/7] Increase the default poll interval in the Rackspace driver to 2.5 seconds (backport from trunk) git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1201147 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++++ libcloud/dns/drivers/rackspace.py | 1 + 2 files changed, 6 insertions(+) diff --git a/CHANGES b/CHANGES index fb3d952a8e..df4fb67ab8 100644 --- a/CHANGES +++ b/CHANGES @@ -21,6 +21,11 @@ Changes with Apache Libcloud in development: - Update Amazon S3 driver to support a new region - US West 2 (Oregon) [Tomaz Muraus] + *) DNS: + - Increase the default poll interval in the Rackspace driver to 2.5 + seconds + [Tomaz Muraus] + Changes with Apache Libcloud 0.6.1: *) General: diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index 7a166726bd..d2e8f0003e 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -82,6 +82,7 @@ class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): responseCls = RackspaceDNSResponse _url_key = 'dns_url' XML_NAMESPACE = None + poll_interval = 2.5 def get_poll_request_kwargs(self, response, context): job_id = response.object['jobId'] From d734d4c0968b8ad53acb1168f4b203a458f55499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Sat, 12 Nov 2011 01:35:53 +0000 Subject: [PATCH 4/7] Fix a bug in Rackspace Cloud DNS driver and make sure to throw an exception if an unexpected status code is returned. Reported by "jeblair" (backport from trunk) git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1201148 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 10 ++++++++-- libcloud/dns/drivers/rackspace.py | 20 +++++++++++--------- test/dns/test_rackspace.py | 15 +++++++++++++++ 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/CHANGES b/CHANGES index df4fb67ab8..2216247a75 100644 --- a/CHANGES +++ b/CHANGES @@ -22,9 +22,15 @@ Changes with Apache Libcloud in development: [Tomaz Muraus] *) DNS: + - Increase the default poll interval in the Rackspace driver to 2.5 - seconds - [Tomaz Muraus] + seconds. + [Tomaz Muraus] + + - Fix a bug in Rackspace Cloud DNS driver and make sure to throw an + exception if an unexpected status code is returned. Reported by + jeblair. + [Tomaz Muraus] Changes with Apache Libcloud 0.6.1: diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index d2e8f0003e..ccb6723a9f 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -63,15 +63,17 @@ def parse_error(self): elif context['resource'] == 'record': raise RecordDoesNotExistError(value='', driver=self, record_id=context['id']) - - if 'code' and 'message' in body: - err = '%s - %s (%s)' % (body['code'], body['message'], - body['details']) - elif 'validationErrors' in body: - errors = [m for m in body['validationErrors']['messages']] - err = 'Validation errors: %s' % ', '.join(errors) - - return err + if body: + if 'code' and 'message' in body: + err = '%s - %s (%s)' % (body['code'], body['message'], + body['details']) + return err + elif 'validationErrors' in body: + errors = [m for m in body['validationErrors']['messages']] + err = 'Validation errors: %s' % ', '.join(errors) + return err + + raise LibcloudError('Unexpected status code: %s' % (status)) class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection): diff --git a/test/dns/test_rackspace.py b/test/dns/test_rackspace.py index ba1ab71f30..4a14e98137 100644 --- a/test/dns/test_rackspace.py +++ b/test/dns/test_rackspace.py @@ -48,6 +48,16 @@ def test_list_zones_success(self): self.assertEqual(zones[0].domain, 'foo4.bar.com') self.assertEqual(zones[0].extra['comment'], 'wazaaa') + def test_list_zones_http_413(self): + RackspaceMockHttp.type = '413' + + try: + self.driver.list_zones() + except LibcloudError: + pass + else: + self.fail('Exception was not thrown') + def test_list_zones_no_results(self): RackspaceMockHttp.type = 'NO_RESULTS' zones = self.driver.list_zones() @@ -269,6 +279,11 @@ def _v1_0_11111_domains(self, method, url, body, headers): return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) + def _v1_0_11111_domains_413(self, method, url, body, headers): + body = '' + return (httplib.REQUEST_ENTITY_TOO_LARGE, body, self.base_headers, + httplib.responses[httplib.REQUEST_ENTITY_TOO_LARGE]) + def _v1_0_11111_domains_NO_RESULTS(self, method, url, body, headers): body = self.fixtures.load('list_zones_no_results.json') return (httplib.OK, body, self.base_headers, From b9fbefd7d52d6778c5da7fbf64d493fdf23eedb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 15 Nov 2011 02:05:41 +0000 Subject: [PATCH 5/7] Bump version, update description in setup.py git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1202003 13f79535-47bb-0310-9956-ffa450edef68 --- libcloud/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 44347cff4d..b6dd88b2e2 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ["__version__", "enable_debug"] -__version__ = '0.6.1' +__version__ = '0.6.2' try: import paramiko diff --git a/setup.py b/setup.py index 36a1dde56e..d2a3245a02 100644 --- a/setup.py +++ b/setup.py @@ -191,7 +191,7 @@ def run(self): setup( name='apache-libcloud', version=read_version_string(), - description='A unified interface into many cloud server providers', + description='A a standard Python library that abstracts away differences among multiple cloud provider APIs', author='Apache Software Foundation', author_email='dev@libcloud.apache.org', requires=([], ['ssl', 'simplejson'],)[pre_python26], From 00f2e98f9d48d99d17d25219f756dc0b0e54986d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 15 Nov 2011 02:07:01 +0000 Subject: [PATCH 6/7] Update changes. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1202005 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 1cf396d858..e26f3184e7 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,6 @@ -*- coding: utf-8 -*- -Changes with Apache Libcloud in development: +Changes with Apache Libcloud 0.6.2: *) General @@ -18,7 +18,7 @@ Changes with Apache Libcloud in development: inconsistencies in the OpenStack driver extension method signatures. [Brad Morgan] - - Update Amazon EC2 driver and pricing data to support a new region - + - Update Amazon EC2 driver and pricing data to support a new region - US West 2 (Oregon) [Tomaz Muraus] From fbe4161f2ca99e69a90b260c31fc75013d5a7e3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Muraus?= Date: Tue, 15 Nov 2011 02:08:06 +0000 Subject: [PATCH 7/7] Fix formatting. git-svn-id: https://svn.apache.org/repos/asf/libcloud/branches/0.6.x@1202006 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index e26f3184e7..93a4f64a3e 100644 --- a/CHANGES +++ b/CHANGES @@ -31,7 +31,7 @@ Changes with Apache Libcloud 0.6.2: - Update Amazon S3 driver to support a new region - US West 2 (Oregon) [Tomaz Muraus] - *) DNS: + *) DNS: - Increase the default poll interval in the Rackspace driver to 2.5 seconds.