From 2a0eaaebd301c623879384801261c39ea73da305 Mon Sep 17 00:00:00 2001 From: Sowmya Krishnan Date: Mon, 17 Aug 2015 16:22:54 +0530 Subject: [PATCH 1/2] CLOUDSTACK-8738: Added the two methods for enable and cancel maintenance mode on StoragePool --- tools/marvin/marvin/lib/base.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index 71aa3e67a670..aca7fd105de7 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -2637,6 +2637,22 @@ def enableMaintenance(self, apiclient): cmd.id = self.id return apiclient.enableStorageMaintenance(cmd) + @classmethod + def enableMaintenance(cls, apiclient, id): + """enables maintenance mode Storage pool""" + + cmd = enableStorageMaintenance.enableStorageMaintenanceCmd() + cmd.id = id + return apiclient.enableStorageMaintenance(cmd) + + @classmethod + def cancelMaintenance(cls, apiclient, id): + """Cancels maintenance mode Host""" + + cmd = cancelStorageMaintenance.cancelStorageMaintenanceCmd() + cmd.id = id + return apiclient.cancelStorageMaintenance(cmd) + @classmethod def list(cls, apiclient, **kwargs): """List all storage pools matching criteria""" From 8bd1ca14ce3f7ea567f50662f2894c6bb4e2e326 Mon Sep 17 00:00:00 2001 From: Sowmya Krishnan Date: Mon, 17 Aug 2015 16:57:33 +0530 Subject: [PATCH 2/2] Fixed pep8 issues --- .../maint/testpath_disablestoragepool.py | 1023 +++++++++++------ 1 file changed, 674 insertions(+), 349 deletions(-) diff --git a/test/integration/component/maint/testpath_disablestoragepool.py b/test/integration/component/maint/testpath_disablestoragepool.py index d276a51151d7..c1bb2040b6f7 100644 --- a/test/integration/component/maint/testpath_disablestoragepool.py +++ b/test/integration/component/maint/testpath_disablestoragepool.py @@ -16,7 +16,7 @@ # under the License. """Utilities functions """ -#All tests inherit from cloudstackTestCase +# All tests inherit from cloudstack TestCase from marvin.cloudstackTestCase import cloudstackTestCase from marvin.cloudstackTestCase import cloudstackTestCase, unittest @@ -33,58 +33,95 @@ Host, Capacities) from marvin.lib.utils import cleanup_resources, validateList -from marvin.lib.common import get_zone, get_domain, list_clusters, get_template, list_volumes, list_virtual_machines +from marvin.lib.common import (get_zone, + get_domain, + list_clusters, + get_template, + list_volumes, + list_virtual_machines) from nose.plugins.attrib import attr from ddt import ddt, data + def verify_vm_state(self, vmid, state): - list_vm = list_virtual_machines(self.userapiclient, account=self.account.name, domainid=self.account.domainid, id=vmid) - self.assertEqual(validateList(list_vm)[0], PASS, 'Check List vm response for vmid: %s' % vmid) - self.assertGreater(len(list_vm), 0, 'Check the list vm response for vm id: %s' % vmid) + list_vm = list_virtual_machines(self.userapiclient, + account=self.account.name, + domainid=self.account.domainid, + id=vmid) + self.assertEqual( + validateList(list_vm)[0], + PASS, + 'Check List vm response for vmid: %s' % + vmid) + self.assertGreater( + len(list_vm), + 0, + 'Check the list vm response for vm id: %s' % + vmid) vm = list_vm[0] - self.assertEqual(vm.id, str(vmid), 'Vm deployed is different from the test') + self.assertEqual( + vm.id, + str(vmid), + 'Vm deployed is different from the test') self.assertEqual(vm.state, state, 'VM is not in %s state' % state) self.debug('VM is in is %s state' % state) - def verify_pool_state(self, poolid, state): - list_storage_pool_response = StoragePool.list(self.userapiclient, id=poolid) - self.assertGreater(len(list_storage_pool_response), 0, 'Check list pool response is greater than 0') - self.assertEqual(list_storage_pool_response[0].state, state, 'Storage pool is not in %s state' % state) - + list_storage_pool_response = StoragePool.list( + self.userapiclient, id=poolid) + self.assertGreater(len(list_storage_pool_response), 0, + 'Check list pool response is greater than 0') + self.assertEqual( + list_storage_pool_response[0].state, + state, + 'Storage pool is not in %s state' % + state) def verify_vm_storage_pool(self, vmid, storageid): - root_volume = Volume.list(self.userapiclient, virtualmachineid=vmid, type='ROOT')[0] + root_volume = Volume.list( + self.userapiclient, + virtualmachineid=vmid, + type='ROOT')[0] list_volume = Volume.list(self.userapiclient, id=root_volume.id) - self.assertEqual(list_volume[0].storageid, storageid, 'check list volume response for Storage id: % s ' % storageid) - + self.assertEqual( + list_volume[0].storageid, + storageid, + 'check list volume response for Storage id: % s ' % + storageid) @ddt class TestPathDisableStorage_Basic(cloudstackTestCase): """ # Tests in this path requires to be run independently - # ( not to be run in parallel with any other tests since it involves disabling/enabling storage pools and may cause unexpected failures in other tests + # ( not to be run in parallel with any other tests since it involves disabling/enabling storage pools \ + and may cause unexpected failures in other tests # The test also requires to have 2 Cluster-wide and 2 zone-wide storage pools available in the setup. # For running the tests on local storage, ensure there are 2 local storage pools set up on each host - - """ + """ @classmethod def setUpClass(cls): - testClient = super(TestPathDisableStorage_Basic, cls).getClsTestClient() + testClient = super( + TestPathDisableStorage_Basic, + cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient) cls.testdata['mode'] = cls.zone.networktype - cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata['ostype']) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata['ostype']) cls.testdata['template']['ostypeid'] = cls.template.ostypeid if cls.template == FAILED: - cls.fail('get_template() failed to return template with description %s' % cls.testdata['ostype']) + cls.fail( + 'get_template() failed to return template with description %s' + % cls.testdata['ostype']) cls._cleanup = [] cls.disabled_list = [] cls.testdata['template_2']['zoneid'] = cls.zone.id @@ -102,38 +139,32 @@ def setUpClass(cls): raise e # Create shared storage offerings - cls.service_offering_shared = ServiceOffering.create(cls.apiclient, - cls.testdata['service_offering'] - ) + cls.service_offering_shared = ServiceOffering.create( + cls.apiclient, cls.testdata['service_offering']) cls._cleanup.append(cls.service_offering_shared) - cls.disk_offering_shared = DiskOffering.create(cls.apiclient, - cls.testdata['disk_offering'] - ) - cls.resized_disk_offering = DiskOffering.create(cls.apiclient, - cls.testdata['resized_disk_offering'] - ) + cls.disk_offering_shared = DiskOffering.create( + cls.apiclient, cls.testdata['disk_offering']) + cls.resized_disk_offering = DiskOffering.create( + cls.apiclient, cls.testdata['resized_disk_offering']) cls._cleanup.append(cls.disk_offering_shared) # Create offerings for local storage if local storage is enabled if cls.zone.localstorageenabled: cls.testdata["service_offerings"]["tiny"]["storagetype"] = 'local' - cls.service_offering_local = ServiceOffering.create(cls.apiclient, - cls.testdata["service_offerings"]["tiny"] - ) + cls.service_offering_local = ServiceOffering.create( + cls.apiclient, cls.testdata["service_offerings"]["tiny"]) cls._cleanup.append(cls.service_offering_local) cls.testdata["disk_offering"]["storagetype"] = 'local' - cls.disk_offering_local = DiskOffering.create(cls.apiclient, - cls.testdata["disk_offering"] - ) + cls.disk_offering_local = DiskOffering.create( + cls.apiclient, cls.testdata["disk_offering"]) cls._cleanup.append(cls.disk_offering_local) cls.testdata["disk_offering"]["storagetype"] = ' ' cls.testdata["service_offerings"]["tiny"]["storagetype"] = ' ' else: cls.debug("No local storage found") - cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, - DomainName=cls.account.domain - ) + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, DomainName=cls.account.domain) response = User.login(cls.userapiclient, username=cls.account.name, password=cls.testdata['account']['password'] @@ -154,9 +185,12 @@ def setUp(self): def tearDown(self): if self.disabled_list: for poolid in self.disabled_list: - if StoragePool.list(self.userapiclient, id=poolid)[0].state != 'Up': + if StoragePool.list( + self.userapiclient, + id=poolid)[0].state != 'Up': try: - StoragePool.update(self.userapiclient, id=poolid, enabled=True) + StoragePool.update( + self.userapiclient, id=poolid, enabled=True) self.debug('Enabling: % s ' % poolid) except Exception as e: self.fail("Couldn't enable storage % s" % id) @@ -166,12 +200,11 @@ def tearDown(self): except Exception as e: self.fail('Warning: Exception during cleanup : %s' % e) - @data('host', 'CLUSTER', 'ZONE') @attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false') def test_01_disable_enable_pool(self, value): """ - + Test Steps: ========= 1. Deploy 2 VMs @@ -190,7 +223,8 @@ def test_01_disable_enable_pool(self, value): 14. findStoragePoolsforMigration should not list the disabled pool """ - # Choose appropriate service offering depending on the scope the test is being run on + # Choose appropriate service offering depending on the scope the test + # is being run on self.disabled_list = [] if value == 'CLUSTER': other_scope = 'ZONE' @@ -211,66 +245,84 @@ def test_01_disable_enable_pool(self, value): # Keep only one pool active and disable the rest try: - self.list_storage = StoragePool.list(self.userapiclient, scope=value) + self.list_storage = StoragePool.list( + self.userapiclient, scope=value) if self.list_storage: count_st_pools = len(self.list_storage) else: count_st_pools = 0 self.disabled_pool_1 = None if count_st_pools > 1: - self.debug('Found % s storage pools, keeping one and disabling rest' % count_st_pools) + self.debug( + 'Found % s storage pools, keeping one and disabling rest' % + count_st_pools) for pool in self.list_storage[1:]: self.disabled_pool_1 = self.list_storage[1] if pool.state == 'Up': self.debug('Trying to disable storage %s' % pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: raise e elif count_st_pools == 1: - self.debug('Only one % s wide storage found - will not be able to complete all tests' % value) + self.debug( + 'Only one % s wide storage found - will not be able to complete all tests' % + value) else: self.skipTest('No % s storage pools found' % value) except Exception as e: raise e - # Disable the other scope shared storage pools while we are testing on one - applicable for only shared storage + # Disable the other scope shared storage pools while we are testing on + # one - applicable for only shared storage if value != 'host': try: - self.list_storage = StoragePool.list(self.userapiclient, scope=other_scope) + self.list_storage = StoragePool.list( + self.userapiclient, scope=other_scope) if self.list_storage: for pool in self.list_storage: if pool.state == 'Up': - self.debug('Trying to disable storage % s' % pool.id) + self.debug( + 'Trying to disable storage % s' % + pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: - self.fail("Couldn't disable storage % s" % pool.id) + self.fail( + "Couldn't disable storage % s" % pool.id) else: self.debug('No % s wide storage pools found' % other_scope) except Exception as e: raise e # Step 1: Deploy 2 VMs - self.virtual_machine_1 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_1 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_1.id, 'Running') - self.virtual_machine_2 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_2 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_2.id, 'Running') # Step 2: Keep one VM in stopped state while other keeps running @@ -281,12 +333,19 @@ def test_01_disable_enable_pool(self, value): except Exception as e: self.fail('Step 2: Failed to stop VM: %s' % e) - # Step 3: Disable the Storage Pool, verify VMs are in same state as before - self.storage_pools_list = StoragePool.list(self.userapiclient, scope=value, state='Up') + # Step 3: Disable the Storage Pool, verify VMs are in same state as + # before + self.storage_pools_list = StoragePool.list( + self.userapiclient, scope=value, state='Up') self.storage_pool_1 = self.storage_pools_list[0] try: - self.debug('Step 3: Disabling Storage Pool: %s' % self.storage_pool_1.id) - StoragePool.update(self.userapiclient, id=self.storage_pool_1.id, enabled=False) + self.debug( + 'Step 3: Disabling Storage Pool: %s' % + self.storage_pool_1.id) + StoragePool.update( + self.userapiclient, + id=self.storage_pool_1.id, + enabled=False) except Exception as e: self.debug("Step 3: Couldn't disable pool %s" % e) @@ -295,7 +354,8 @@ def test_01_disable_enable_pool(self, value): verify_vm_state(self, self.virtual_machine_2.id, 'Stopped') # Step 4: Deploying new VM on disabled pool should fail - self.debug('Step 4: Trying to deploy VM on disabled storage - should fail') + self.debug( + 'Step 4: Trying to deploy VM on disabled storage - should fail') with self.assertRaises(Exception): VirtualMachine.create(self.userapiclient, self.testdata['small'], @@ -309,7 +369,10 @@ def test_01_disable_enable_pool(self, value): try: self.virtual_machine_2.start(self.userapiclient) verify_vm_state(self, self.virtual_machine_2.id, 'Running') - verify_vm_storage_pool(self, self.virtual_machine_2.id, self.storage_pool_1.id) + verify_vm_storage_pool( + self, + self.virtual_machine_2.id, + self.storage_pool_1.id) except Exception as e: self.fail('Step 5: Failed to start VM: %s' % e) @@ -320,20 +383,26 @@ def test_01_disable_enable_pool(self, value): # Step 7: Enable Storage pool try: - self.debug('Step 7: Enabling Storage Pool: %s' % self.storage_pool_1.id) - StoragePool.update(self.userapiclient, id=self.storage_pool_1.id, enabled=True) + self.debug( + 'Step 7: Enabling Storage Pool: %s' % + self.storage_pool_1.id) + StoragePool.update( + self.userapiclient, + id=self.storage_pool_1.id, + enabled=True) except Exception as e: self.debug("Step 7: Couldn't enable pool %s" % e) verify_pool_state(self, self.storage_pool_1.id, 'Up') # Step 8: Deploy a VM on the pool - self.virtual_machine_3 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_3 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_3.id, 'Running') if self.hypervisor.lower() == 'lxc': @@ -344,25 +413,39 @@ def test_01_disable_enable_pool(self, value): services=self.testdata['volume'], diskofferingid=self.disk_offering.id, zoneid=self.zone.id) - list_volume = Volume.list(self.userapiclient, id=self.volume.id, accountid=self.account.name, domainid=self.account.domainid) - self.assertEqual(validateList(list_volume)[0], - PASS, - 'Step 9: Check List volume response for volume %s' % self.volume.id) - self.assertEqual(list_volume[0].id, - self.volume.id, - 'Step 9: check list volume response for volume id: %s' % self.volume.id) - self.debug('Step 9: volume id %s got created successfully' % list_volume[0].id) + list_volume = Volume.list( + self.userapiclient, + id=self.volume.id, + accountid=self.account.name, + domainid=self.account.domainid) + self.assertEqual( + validateList(list_volume)[0], + PASS, + 'Step 9: Check List volume response for volume %s' % + self.volume.id) + self.assertEqual( + list_volume[0].id, + self.volume.id, + 'Step 9: check list volume response for volume id: %s' % + self.volume.id) + self.debug( + 'Step 9: volume id %s got created successfully' % + list_volume[0].id) self.virtual_machine_3.attach_volume(self.userapiclient, self.volume) list_volume = Volume.list(self.userapiclient, id=self.volume.id) - self.assertEqual(list_volume[0].virtualmachineid, - self.virtual_machine_3.id, - 'Step 9: Check if volume state (attached) is reflected') - self.debug('Step 9: volume id:%s successfully attached to vm id%s' % (self.volume.id, self.virtual_machine_3.id)) + self.assertEqual( + list_volume[0].virtualmachineid, + self.virtual_machine_3.id, + 'Step 9: Check if volume state (attached) is reflected') + self.debug( + 'Step 9: volume id:%s successfully attached to vm id%s' % + (self.volume.id, self.virtual_machine_3.id)) if self.disabled_pool_1: newpoolid = self.disabled_pool_1.id else: - self.skipTest('Step 9: Could not find a second storage pool to complete the remaining tests') + self.skipTest( + 'Step 9: Could not find a second storage pool to complete the remaining tests') # Step 10: Disable storage pool SP1 again and enable new pool try: @@ -371,52 +454,77 @@ def test_01_disable_enable_pool(self, value): self.fail('Step 10: Enable storage pool %s' % e, 'failed') verify_pool_state(self, newpoolid, 'Up') try: - self.debug('Step 10: Disabling Storage Pool: %s' % self.storage_pool_1.id) - StoragePool.update(self.userapiclient, id=self.storage_pool_1.id, enabled=False) + self.debug( + 'Step 10: Disabling Storage Pool: %s' % + self.storage_pool_1.id) + StoragePool.update( + self.userapiclient, + id=self.storage_pool_1.id, + enabled=False) self.disabled_list.append(self.storage_pool_1.id) - self.debug('Step 10: Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Step 10: Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: self.debug("Step 10: Couldn't disable pool %s" % e) verify_pool_state(self, self.storage_pool_1.id, 'Disabled') # Step 11: Deploy new VM, VM5 - should succeed - self.virtual_machine_4 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_4 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_4.id, 'Running') # Step 12: Stop VM1 which is running from disabled pool self.virtual_machine_1.stop(self.userapiclient) verify_vm_state(self, self.virtual_machine_1.id, 'Stopped') - # Step 13: Migrate ROOT volume of VM1 to another enabled storage pool - should succeed + # Step 13: Migrate ROOT volume of VM1 to another enabled storage pool - + # should succeed if value != 'host': - root_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT') + root_volume = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='ROOT') try: - Volume.migrate(self.userapiclient, volumeid=root_volume[0].id, storageid=newpoolid) + Volume.migrate( + self.userapiclient, + volumeid=root_volume[0].id, + storageid=newpoolid) except Exception as e: raise e - list_volume = list_volumes(self.userapiclient, id=root_volume[0].id) - self.assertEqual(isinstance(list_volume, list), True, 'Step 13: Check list volumes response for valid list') - - # Step 14: findStoragePoolsforMigration should not list the disabled pool + list_volume = list_volumes( + self.userapiclient, id=root_volume[0].id) + self.assertEqual( + isinstance( + list_volume, + list), + True, + 'Step 13: Check list volumes response for valid list') + + # Step 14: findStoragePoolsforMigration should not list the disabled + # pool if value != 'host': - pools_for_migration = StoragePool.listForMigration(self.userapiclient, id=root_volume[0].id) - self.debug('Step 14: List of pools suitable for migration: % s ' % pools_for_migration) + pools_for_migration = StoragePool.listForMigration( + self.userapiclient, id=root_volume[0].id) + self.debug( + 'Step 14: List of pools suitable for migration: % s ' % + pools_for_migration) if pools_for_migration: if self.storage_pool_1 in pools_for_migration: - self.fail('Step 14: Storage pool % s is supposed to be disabled and not suitable for migration, \ - but found in the list of pools suitable for migration' % self.storage_pool_1.id) - + self.fail( + 'Step 14: Storage pool % s is supposed to be disabled and not suitable for migration, \ + but found in the list of pools suitable for migration' % + self.storage_pool_1.id) @data('host', 'CLUSTER', 'ZONE') @attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false') def test_02_vm_operations_on_disabled_pool(self, value): - """ Test Steps: ========= @@ -439,7 +547,8 @@ def test_02_vm_operations_on_disabled_pool(self, value): """ - # Choose appropriate service offering depending on the scope the test is being run on + # Choose appropriate service offering depending on the scope the test + # is being run on self.disabled_list = [] if value == 'CLUSTER': other_scope = 'ZONE' @@ -463,58 +572,75 @@ def test_02_vm_operations_on_disabled_pool(self, value): # Keep one storage pool active and disable the rest try: - self.list_storage = StoragePool.list(self.userapiclient, scope=value) + self.list_storage = StoragePool.list( + self.userapiclient, scope=value) if self.list_storage: count_st_pools = len(self.list_storage) else: count_st_pools = 0 self.disabled_pool_1 = None if count_st_pools > 1: - self.debug('Found % s storage pools, keeping one and disabling rest' % count_st_pools) + self.debug( + 'Found % s storage pools, keeping one and disabling rest' % + count_st_pools) for pool in self.list_storage[1:]: self.disabled_pool_1 = self.list_storage[1] if pool.state == 'Up': self.debug('Trying to disable storage %s' % pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: raise e elif count_st_pools == 1: - self.debug('Only one % s wide storage found - will not be able to complete all tests' % value) + self.debug( + 'Only one % s wide storage found - will not be able to complete all tests' % + value) else: self.skipTest('No % s wide storage pools found' % value) except Exception as e: raise e - # Disable the other scope storage pools while we are testing on one scope - applicable for only shared storage + # Disable the other scope storage pools while we are testing on one + # scope - applicable for only shared storage if value != 'host': try: - self.list_storage = StoragePool.list(self.userapiclient, scope=other_scope) + self.list_storage = StoragePool.list( + self.userapiclient, scope=other_scope) if self.list_storage: for pool in self.list_storage: if pool.state == 'Up': - self.debug('Trying to disable storage % s' % pool.id) + self.debug( + 'Trying to disable storage % s' % + pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: - self.fail("Couldn't disable storage % s" % pool.id) + self.fail( + "Couldn't disable storage % s" % pool.id) else: self.debug('No % s wide storage pools found' % other_scope) except Exception as e: raise e # Step 1: Deploy a VM and attach data disk to one VM - self.virtual_machine_1 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_1 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_1.id, 'Running') self.volume_1 = Volume.create(self.userapiclient, @@ -523,19 +649,29 @@ def test_02_vm_operations_on_disabled_pool(self, value): zoneid=self.zone.id) self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1) list_volume = Volume.list(self.userapiclient, id=self.volume_1.id) - self.assertEqual(list_volume[0].virtualmachineid, - self.virtual_machine_1.id, '' - 'Check if volume state (attached) is reflected') - self.debug('Step 1: volume id:%s successfully attached to vm id%s' % (self.volume_1.id, self.virtual_machine_1.id)) + self.assertEqual( + list_volume[0].virtualmachineid, + self.virtual_machine_1.id, + '' + 'Check if volume state (attached) is reflected') + self.debug( + 'Step 1: volume id:%s successfully attached to vm id%s' % + (self.volume_1.id, self.virtual_machine_1.id)) # Step 2: Disable the storage pool - self.storage_pools_list = StoragePool.list(self.userapiclient, scope=value, state='Up') + self.storage_pools_list = StoragePool.list( + self.userapiclient, scope=value, state='Up') self.storage_pool_1 = self.storage_pools_list[0] try: - self.debug('Step 2: Disabling Storage Pool: %s' % self.storage_pool_1.id) - StoragePool.update(self.userapiclient, id=self.storage_pool_1.id, enabled=False) + self.debug( + 'Step 2: Disabling Storage Pool: %s' % + self.storage_pool_1.id) + StoragePool.update( + self.userapiclient, + id=self.storage_pool_1.id, + enabled=False) self.disabled_list.append(self.storage_pool_1.id) except Exception as e: self.debug("Step 2: Couldn't disable pool %s" % e) @@ -543,7 +679,10 @@ def test_02_vm_operations_on_disabled_pool(self, value): verify_vm_state(self, self.virtual_machine_1.id, 'Running') # Step 3: Create Template from root volume of the VM - root_volume_1 = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT')[0] + root_volume_1 = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='ROOT')[0] self.virtual_machine_1.stop(self.userapiclient) try: template_2 = Template.create(self.userapiclient, @@ -553,7 +692,10 @@ def test_02_vm_operations_on_disabled_pool(self, value): domainid=self.account.domainid) self.cleanup.append(template_2) self.debug('Step 3: Created template with ID: %s' % template_2.id) - list_template = Template.list(self.userapiclient, templatefilter='self', id=template_2.id) + list_template = Template.list( + self.userapiclient, + templatefilter='self', + id=template_2.id) except Exception as e: self.fail('Step 3: Template from volume failed') @@ -562,62 +704,88 @@ def test_02_vm_operations_on_disabled_pool(self, value): services=self.testdata['volume'], diskofferingid=self.disk_offering.id, zoneid=self.zone.id) - self.debug('Step 4: Trying to attach new volume to VM on disabled storage - should fail') + self.debug( + 'Step 4: Trying to attach new volume to VM on disabled storage - should fail') with self.assertRaises(Exception): - self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_2) + self.virtual_machine_1.attach_volume( + self.userapiclient, self.volume_2) # Step 5: Resize DATA disk to a higher value for attached disk try: - self.volume_1.resize(self.userapiclient, diskofferingid=self.resized_disk_offering.id) - list_volume_1 = Volume.list(self.userapiclient, id=self.volume_1.id) - self.assertEqual(list_volume_1[0].diskofferingid, - self.resized_disk_offering.id, - 'check list volume response for volume id: %s' % self.volume_1.id) - self.debug('Step 5: volume id %s got resized successfully' % list_volume_1[0].id) + self.volume_1.resize(self.userapiclient, + diskofferingid=self.resized_disk_offering.id) + list_volume_1 = Volume.list( + self.userapiclient, id=self.volume_1.id) + self.assertEqual( + list_volume_1[0].diskofferingid, + self.resized_disk_offering.id, + 'check list volume response for volume id: %s' % + self.volume_1.id) + self.debug( + 'Step 5: volume id %s got resized successfully' % + list_volume_1[0].id) except Exception as e: self.fail('Step 5: Volume resize on disabled pool failed: % s' % e) # Step 6: Take VM Snapshot if self.hypervisor.lower() not in ('kvm', 'hyperv', 'lxc'): try: - self.debug("Step 6: Taking VM Snapshot for vm id % s" % self.virtual_machine_1.id) + self.debug( + "Step 6: Taking VM Snapshot for vm id % s" % + self.virtual_machine_1.id) vm_snapshot = VmSnapshot.create(self.userapiclient, self.virtual_machine_1.id, 'false', 'TestSnapshot', 'Display Text') - self.assertEqual(vm_snapshot.state, 'Ready', 'Check VM snapshot is ready') + self.assertEqual( + vm_snapshot.state, + 'Ready', + 'Check VM snapshot is ready') except Exception as e: - self.fail('Step 6: VM Snapshot on disabled pool failed: % s' % e) - + self.fail( + 'Step 6: VM Snapshot on disabled pool failed: % s' % + e) + if vm_snapshot: - self.debug('Step 6: Deleting Vm Snapshot') - VmSnapshot.deleteVMSnapshot(self.userapiclient, vm_snapshot.id) + self.debug('Step 6: Deleting Vm Snapshot') + VmSnapshot.deleteVMSnapshot(self.userapiclient, vm_snapshot.id) # Step 7: Destroy VM and immediately restore the VM - self.debug("Step 7: Deleting and restoring the VM, should continue to run from same storage pool") + self.debug( + "Step 7: Deleting and restoring the VM, should continue to run from same storage pool") self.virtual_machine_1.delete(self.userapiclient, expunge=False) self.virtual_machine_1.recover(self.userapiclient) verify_vm_state(self, self.virtual_machine_1.id, 'Stopped') self.virtual_machine_1.start(self.userapiclient) verify_vm_state(self, self.virtual_machine_1.id, 'Running') - verify_vm_storage_pool(self, self.virtual_machine_1.id, self.storage_pool_1.id) + verify_vm_storage_pool( + self, + self.virtual_machine_1.id, + self.storage_pool_1.id) # Step 8: Enable new pool if self.disabled_pool_1: try: newpoolid = self.disabled_pool_1.id - StoragePool.update(self.userapiclient, id=newpoolid, enabled=True) + StoragePool.update( + self.userapiclient, id=newpoolid, enabled=True) self.debug("Step 8: Enabling new pool % s " % newpoolid) if newpoolid in self.disabled_list: self.disabled_list.remove(newpoolid) except Exception as e: self.fail('Step 8: Enable storage pool %s' % e, 'failed') else: - self.debug('Step 8: Could not find a second storage pool, so enabling the first storage pool and running the tests') + self.debug( + 'Step 8: Could not find a second storage pool, so enabling the first storage pool and running the tests') try: - self.debug('Step 8: Enabling Storage Pool: %s' % self.storage_pool_1.id) - StoragePool.update(self.userapiclient, id=self.storage_pool_1.id, enabled=True) + self.debug( + 'Step 8: Enabling Storage Pool: %s' % + self.storage_pool_1.id) + StoragePool.update( + self.userapiclient, + id=self.storage_pool_1.id, + enabled=True) if self.storage_pool_1.id in self.disabled_list: self.disabled_list.remove(self.storage_pool_1.id) newpoolid = self.storage_pool_1.id @@ -629,31 +797,46 @@ def test_02_vm_operations_on_disabled_pool(self, value): if value != 'host': self.debug("Step 9: Re-installing VM 1") - vm_restore = self.virtual_machine_1.restore(self.userapiclient, templateid=self.template.id) + vm_restore = self.virtual_machine_1.restore( + self.userapiclient, templateid=self.template.id) verify_vm_storage_pool(self, self.virtual_machine_1.id, newpoolid) # Step 10 : Re-install VM with different template self.debug("Step 10: re-installing VM with different template") - vm_restore = self.virtual_machine_1.restore(self.userapiclient, templateid=template_2.id) + vm_restore = self.virtual_machine_1.restore( + self.userapiclient, templateid=template_2.id) verify_vm_storage_pool(self, self.virtual_machine_1.id, newpoolid) # Step 11: Repeat tests with enabled pool. Start with attach VM if value != 'host': self.debug("Step 11: Attach volume to VM") - self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_2) - list_volume_2 = Volume.list(self.userapiclient, id=self.volume_2.id) + self.virtual_machine_1.attach_volume( + self.userapiclient, self.volume_2) + list_volume_2 = Volume.list( + self.userapiclient, id=self.volume_2.id) self.assertEqual(list_volume_2[0].virtualmachineid, self.virtual_machine_1.id, - 'Check if volume state (attached) is reflected') - self.debug('Step 11: volume id:% s successfully attached to vm id % s' % (self.volume_2.id, self.virtual_machine_1.id)) + 'Check if volume state (attached) is reflected') + self.debug( + 'Step 11: volume id:% s successfully attached to vm id % s' % + (self.volume_2.id, self.virtual_machine_1.id)) # Step 12: Re-size Volume to higher disk offering try: self.virtual_machine_1.stop(self.userapiclient) - self.volume_2.resize(self.userapiclient, diskofferingid=self.resized_disk_offering.id) - list_volume_2 = Volume.list(self.userapiclient, id=self.volume_2.id) - self.assertEqual(list_volume_2[0].diskofferingid, self.resized_disk_offering.id, 'check list volume response for volume id: %s' % self.volume_2.id) - self.debug('Step 12: volume id %s got resized successfully' % list_volume_2[0].id) + self.volume_2.resize( + self.userapiclient, + diskofferingid=self.resized_disk_offering.id) + list_volume_2 = Volume.list( + self.userapiclient, id=self.volume_2.id) + self.assertEqual( + list_volume_2[0].diskofferingid, + self.resized_disk_offering.id, + 'check list volume response for volume id: %s' % + self.volume_2.id) + self.debug( + 'Step 12: volume id %s got resized successfully' % + list_volume_2[0].id) except Exception as e: self.fail('Step 12: Failed to resize volume % s ' % e) self.virtual_machine_1.start(self.userapiclient) @@ -665,10 +848,19 @@ def test_02_vm_operations_on_disabled_pool(self, value): # Step 14: Take Snapshot of VM if self.hypervisor.lower() not in ('kvm', 'hyperv', 'lxc'): try: - vm_snapshot = VmSnapshot.create(self.userapiclient, self.virtual_machine_1.id, 'false', 'TestSnapshot2', 'Display Text') - self.assertEqual(vm_snapshot.state, 'Ready', 'Check the snapshot of vm is ready!') + vm_snapshot = VmSnapshot.create( + self.userapiclient, + self.virtual_machine_1.id, + 'false', + 'TestSnapshot2', + 'Display Text') + self.assertEqual( + vm_snapshot.state, + 'Ready', + 'Check the snapshot of vm is ready!') except Exception as e: - self.fail('Step 14: Snapshot failed post enabling new storage pool') + self.fail( + 'Step 14: Snapshot failed post enabling new storage pool') # Step 15: Delete and recover VM self.debug("Step 15: Deleting and recovering VM") @@ -682,25 +874,33 @@ def test_02_vm_operations_on_disabled_pool(self, value): @ddt class TestPathDisableStorage_Maint_Tags(cloudstackTestCase): """ - # Tests in this path requires to be run independently (not to be run in parallel with any other tests since it involves disabling/enabling storage pools and may cause unexpected failures in other tests + # Tests in this path requires to be run independently + # Not to be run in parallel with any other tests since it involves disabling/enabling storage pools \ + and may cause unexpected failures in other tests # The test also requires to have 2 Cluster-wide and 2 zone-wide storage pools available in the setup. # For running the tests on local storage, ensure there are 2 local storage pools set up on each host or different hosts """ - @classmethod def setUpClass(cls): - testClient = super(TestPathDisableStorage_Maint_Tags, cls).getClsTestClient() + testClient = super( + TestPathDisableStorage_Maint_Tags, + cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient) cls.testdata['mode'] = cls.zone.networktype - cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata['ostype']) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata['ostype']) cls.testdata['template']['ostypeid'] = cls.template.ostypeid if cls.template == FAILED: - cls.fail('get_template() failed to return template with description %s' % cls.testdata['ostype']) + cls.fail( + 'get_template() failed to return template with description %s' % + cls.testdata['ostype']) cls._cleanup = [] cls.disabled_list = [] cls.maint_list = [] @@ -714,40 +914,36 @@ def setUpClass(cls): cls.debug('Creating account') cls._cleanup.append(cls.account) - # Create shared storage offerings - cls.service_offering_shared = ServiceOffering.create(cls.apiclient, - cls.testdata['service_offering'] - ) + cls.service_offering_shared = ServiceOffering.create( + cls.apiclient, cls.testdata['service_offering']) cls._cleanup.append(cls.service_offering_shared) - cls.disk_offering_shared = DiskOffering.create(cls.apiclient, - cls.testdata['disk_offering'] - ) - cls.resized_disk_offering = DiskOffering.create(cls.apiclient, - cls.testdata['resized_disk_offering'] - ) + cls.disk_offering_shared = DiskOffering.create( + cls.apiclient, cls.testdata['disk_offering']) + cls.resized_disk_offering = DiskOffering.create( + cls.apiclient, cls.testdata['resized_disk_offering']) cls._cleanup.append(cls.disk_offering_shared) # Create offerings for local storage if local storage is enabled if cls.zone.localstorageenabled: - cls.testdata["service_offerings"]["tiny"]["storagetype"] = 'local' + cls.testdata["service_offerings"][ + "tiny"]["storagetype"] = 'local' cls.debug("Creating local storage offering") - cls.service_offering_local = ServiceOffering.create(cls.apiclient, - cls.testdata["service_offerings"]["tiny"] - ) + cls.service_offering_local = ServiceOffering.create( + cls.apiclient, cls.testdata["service_offerings"]["tiny"]) cls._cleanup.append(cls.service_offering_local) cls.testdata["disk_offering"]["storagetype"] = 'local' cls.debug("Creating local storage disk offering") - cls.disk_offering_local = DiskOffering.create(cls.apiclient, - cls.testdata["disk_offering"] - ) + cls.disk_offering_local = DiskOffering.create( + cls.apiclient, cls.testdata["disk_offering"]) cls._cleanup.append(cls.disk_offering_local) cls.testdata["disk_offering"]["storagetype"] = ' ' cls.testdata["service_offerings"]["tiny"]["storagetype"] = ' ' else: cls.debug("No local storage found") - cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, DomainName=cls.account.domain) + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, DomainName=cls.account.domain) response = User.login(cls.userapiclient, username=cls.account.name, password=cls.testdata['account']['password']) @@ -772,7 +968,8 @@ def tearDown(self): for poolid in self.disabled_list: if StoragePool.list(self.userapiclient, id=poolid)[0].state == 'Disabled': try: - StoragePool.update(self.userapiclient, id=poolid, enabled=True) + StoragePool.update( + self.userapiclient, id=poolid, enabled=True) self.debug('Enabling: % s ' % poolid) except Exception as e: self.fail("Couldn't enable storage % s" % id) @@ -781,22 +978,26 @@ def tearDown(self): for poolid in self.maint_list: if StoragePool.list(self.userapiclient, id=poolid)[0].state == 'Maintenance': try: - StoragePool.cancelMaintenance(self.userapiclient, id=poolid) - self.debug('Cancelled Maintenance mode for % s' % poolid) + StoragePool.cancelMaintenance( + self.userapiclient, id=poolid) + self.debug( + 'Cancelled Maintenance mode for % s' % + poolid) except Exception as e: - self.fail("Couldn't cancel Maintenance mode for storage % s " % poolid) + self.fail( + "Couldn't cancel Maintenance mode for storage % s " % + poolid) try: cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.fail('Warning: Exception during cleanup : %s' % e) - - @data('host','CLUSTER', 'ZONE') + @data('host', 'CLUSTER', 'ZONE') @attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false') def test_01_maint_capacity_tags(self, value): """ - + Test Steps: ======== @@ -818,7 +1019,8 @@ def test_01_maint_capacity_tags(self, value): """ - # Choose appropriate service offering depending on the scope the test is being run on + # Choose appropriate service offering depending on the scope the test + # is being run on self.disabled_list = [] if value == 'CLUSTER': other_scope = 'ZONE' @@ -837,62 +1039,82 @@ def test_01_maint_capacity_tags(self, value): else: self.skipTest("Local storage not enabled") - # Keep 2 storage pools active and disable the rest. If only one storage pool is present, then skip the test + # Keep 2 storage pools active and disable the rest. If only one storage + # pool is present, then skip the test try: - self.list_storage = StoragePool.list(self.userapiclient, scope=value) + self.list_storage = StoragePool.list( + self.userapiclient, scope=value) count_st_pools = len(self.list_storage) if count_st_pools <= 1: - raise unittest.SkipTest('Found 1 or less storage pools in % s wide scope- cannot proceed' % value) + raise unittest.SkipTest( + 'Found 1 or less storage pools in % s wide scope- cannot proceed' % + value) elif count_st_pools > 2: for pool in self.list_storage[2:]: if pool.state == 'Up': self.debug('Trying to disable storage %s' % pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: raise e elif count_st_pools == 2: for pool in self.list_storage: if pool.state != 'Up': - raise unittest.SkipTest('Found storage pool % s not in Up State.. cannot proceed' % pool.id) + raise unittest.SkipTest( + 'Found storage pool % s not in Up State.. cannot proceed' % + pool.id) except Exception as e: raise e - - # Disable the other scope shared storage pools while we are testing on one - applicable for only shared storage + # Disable the other scope shared storage pools while we are testing on + # one - applicable for only shared storage if value != 'host': try: - self.list_storage = StoragePool.list(self.userapiclient, scope=other_scope) + self.list_storage = StoragePool.list( + self.userapiclient, scope=other_scope) if self.list_storage: for pool in self.list_storage: if pool.state == 'Up': - self.debug('Trying to disable storage % s' % pool.id) + self.debug( + 'Trying to disable storage % s' % + pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: - self.fail("Couldn't disable storage % s" % pool.id) + self.fail( + "Couldn't disable storage % s" % pool.id) else: self.debug('No % s wide storage pools found' % other_scope) except Exception as e: raise e self.debug("Step 1: Deploy VM") - self.virtual_machine_1 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_1 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_1.id, 'Running') # Step 2: Add storage to Maintenance mode self.debug("Step 2: Adding storage to maintenance mode ") - root_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT')[0] + root_volume = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='ROOT')[0] list_volume = Volume.list(self.userapiclient, id=root_volume.id) storage_id = list_volume[0].storageid try: @@ -900,29 +1122,44 @@ def test_01_maint_capacity_tags(self, value): self.debug('Step 2: Added % s to Maintenance mode') self.maint_list.append(storage_id) except Exception as e: - self.fail('Step 2: Failed to add Storage pool % s to Maintenance mode' % storage_id) + self.fail( + 'Step 2: Failed to add Storage pool % s to Maintenance mode' % + storage_id) verify_vm_state(self, self.virtual_machine_1.id, 'Stopped') - #Step 3: Cancel maintenance mode + # Step 3: Cancel maintenance mode try: StoragePool.cancelMaintenance(self.userapiclient, id=storage_id) - self.debug('Step 3: Cancelled Maintenance mode for % s' % storage_id) + self.debug( + 'Step 3: Cancelled Maintenance mode for % s' % + storage_id) self.maint_list.remove(storage_id) except Exception as e: - self.fail("Step 3: Couldn't cancel Maintenance mode for storage % s " % storage_id) + self.fail( + "Step 3: Couldn't cancel Maintenance mode for storage % s " % + storage_id) - # Step 4: Start the VM after disabling pool and verify it's running from same pool + # Step 4: Start the VM after disabling pool and verify it's running + # from same pool try: self.debug("Step 4: Starting VM after disabling pool") - self.list_storage = StoragePool.list(self.userapiclient, id=storage_id) + self.list_storage = StoragePool.list( + self.userapiclient, id=storage_id) if self.list_storage[0].state == 'Up': - StoragePool.update(self.userapiclient, id=storage_id, enabled=False) + StoragePool.update( + self.userapiclient, + id=storage_id, + enabled=False) self.debug("Step 4: Disabled pool % s" % storage_id) self.disabled_list.append(storage_id) except Exception as e: raise e - list_vm = list_virtual_machines(self.userapiclient, account=self.account.name, domainid=self.account.domainid, id=self.virtual_machine_1.id) + list_vm = list_virtual_machines( + self.userapiclient, + account=self.account.name, + domainid=self.account.domainid, + id=self.virtual_machine_1.id) vm = list_vm[0] if vm.state != 'Running': self.virtual_machine_1.start(self.userapiclient) @@ -930,18 +1167,24 @@ def test_01_maint_capacity_tags(self, value): verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_id) # Step 5: Perform some VM operations - reboot - self.debug("Step 5: Performing reboot of VM % s" % self.virtual_machine_1.id) + self.debug( + "Step 5: Performing reboot of VM % s" % + self.virtual_machine_1.id) self.virtual_machine_1.reboot(self.userapiclient) verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_id) # Step 6: Add tags to the storage pool self.debug("Step 6: Adding tags to storage pool") - StoragePool.update(self.userapiclient, id=storage_id, tags='disable_prov') + StoragePool.update( + self.userapiclient, + id=storage_id, + tags='disable_prov') # Step 7: Add tagged service offering self.testdata['service_offerings']['tiny']['tags'] = 'disable_prov' self.testdata["service_offerings"]["tiny"]["storagetype"] = 'local' - self.tagged_so = ServiceOffering.create(self.userapiclient, self.testdata['service_offerings']) + self.tagged_so = ServiceOffering.create( + self.userapiclient, self.testdata['service_offerings']) self.testdata['service_offerings']['tiny']['tags'] = ' ' self.testdata["service_offerings"]["tiny"]["storagetype"] = ' ' self.cleanup.append(self.tagged_so) @@ -949,30 +1192,39 @@ def test_01_maint_capacity_tags(self, value): # Step 8: Enable the pool try: self.debug("Step 8: Enabling pool") - self.list_storage = StoragePool.list(self.userapiclient, id=storage_id) + self.list_storage = StoragePool.list( + self.userapiclient, id=storage_id) if self.list_storage[0].state == 'Disabled': - StoragePool.update(self.userapiclient, id=storage_id, enabled=True) + StoragePool.update( + self.userapiclient, + id=storage_id, + enabled=True) self.disabled_list.remove(storage_id) except Exception as e: raise e # Step 9: Deploy VM using the tagged offering self.debug("Step 9: Deploying VM using tagged offering") - self.virtual_machine_2 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.tagged_so.id, - zoneid=self.zone.id) + self.virtual_machine_2 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.tagged_so.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_2.id, 'Running') verify_vm_storage_pool(self, self.virtual_machine_2.id, storage_id) # Step 10: Disable storage Pool try: - self.list_storage = StoragePool.list(self.userapiclient, id=storage_id) + self.list_storage = StoragePool.list( + self.userapiclient, id=storage_id) if self.list_storage[0].state == 'Up': - StoragePool.update(self.userapiclient, id=storage_id, enabled=False) + StoragePool.update( + self.userapiclient, + id=storage_id, + enabled=False) if storage_id not in self.disabled_list: self.disabled_list.append(storage_id) except Exception as e: @@ -985,35 +1237,46 @@ def test_01_maint_capacity_tags(self, value): # Step 11: View current capacity of storage pool self.debug("Step 11: Getting current capacity...") - list_capacity_allocated = Capacities.list(self.userapiclient, fetchlatest='true', type=capacity_type) + list_capacity_allocated = Capacities.list( + self.userapiclient, fetchlatest='true', type=capacity_type) capacity_1 = list_capacity_allocated[0].capacityused self.debug("Capacity 1: % s" % capacity_1) - # Step 12: Delete VM and check capacity is recalculated in disabled pool + # Step 12: Delete VM and check capacity is recalculated in disabled + # pool self.debug("Step 12: Deleting Vm and re-calculating capacity") self.virtual_machine_2.delete(self.userapiclient) - list_capacity_allocated = Capacities.list(self.userapiclient, fetchlatest='true', type=capacity_type) + list_capacity_allocated = Capacities.list( + self.userapiclient, fetchlatest='true', type=capacity_type) capacity_2 = list_capacity_allocated[0].capacityused self.debug("Capacity 2: % s" % capacity_2) - self.assertGreater(capacity_1, - capacity_2, - 'Step 12: Capacity Used should be greater after VM delete although Storage is not enabled') + self.assertGreater( + capacity_1, + capacity_2, + 'Step 12: Capacity Used should be greater after VM delete although Storage is not enabled') # Step 13: Deploy new VM with tagged offering again - should fail with self.assertRaises(Exception): - self.virtual_machine_3 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.tagged_so.id, - zoneid=self.zone.id) - - # Step 14: Capacity should not be altered in disabled pool since deploy VM failed - self.debug("Step 14: Checking capacity is not altered after deploy VM fails") - list_capacity_allocated = Capacities.list(self.userapiclient, fetchlatest='true', type=capacity_type) + self.virtual_machine_3 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.tagged_so.id, + zoneid=self.zone.id) + + # Step 14: Capacity should not be altered in disabled pool since deploy + # VM failed + self.debug( + "Step 14: Checking capacity is not altered after deploy VM fails") + list_capacity_allocated = Capacities.list( + self.userapiclient, fetchlatest='true', type=capacity_type) capacity_3 = list_capacity_allocated[0].capacityused - self.assertEqual(capacity_2, capacity_3, "Step 14: Capacity Used shouldn't be altered since VM deployment failed") + self.assertEqual( + capacity_2, + capacity_3, + "Step 14: Capacity Used shouldn't be altered since VM deployment failed") class TestPathDisableStorage_Cross_Cluster(cloudstackTestCase): @@ -1022,22 +1285,28 @@ class TestPathDisableStorage_Cross_Cluster(cloudstackTestCase): since it involves disabling/enabling storage pools and may cause unexpected failures in other tests # This test atleast 2 Clusters in the set up wiht suitable hosts for migration. # For running the tests on local storage, ensure there are 2 local storage pools set up on each host - - """ + """ @classmethod def setUpClass(cls): - testClient = super(TestPathDisableStorage_Cross_Cluster, cls).getClsTestClient() + testClient = super( + TestPathDisableStorage_Cross_Cluster, + cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient) cls.testdata['mode'] = cls.zone.networktype - cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata['ostype']) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata['ostype']) cls.testdata['template']['ostypeid'] = cls.template.ostypeid if cls.template == FAILED: - cls.fail('get_template() failed to return template with description %s' % cls.testdata['ostype']) + cls.fail( + 'get_template() failed to return template with description %s' % + cls.testdata['ostype']) cls._cleanup = [] cls.disabled_list = [] @@ -1047,20 +1316,21 @@ def setUpClass(cls): cls.hypervisor = testClient.getHypervisorInfo() try: - cls.account = Account.create(cls.apiclient, cls.testdata['account'], admin=True) + cls.account = Account.create( + cls.apiclient, cls.testdata['account'], admin=True) cls.debug('Creating account') cls._cleanup.append(cls.account) - cls.service_offering = ServiceOffering.create(cls.apiclient, - cls.testdata['service_offering']) + cls.service_offering = ServiceOffering.create( + cls.apiclient, cls.testdata['service_offering']) cls._cleanup.append(cls.service_offering) - cls.disk_offering = DiskOffering.create(cls.apiclient, - cls.testdata['disk_offering']) - cls.resized_disk_offering = DiskOffering.create(cls.apiclient, - cls.testdata['resized_disk_offering']) + cls.disk_offering = DiskOffering.create( + cls.apiclient, cls.testdata['disk_offering']) + cls.resized_disk_offering = DiskOffering.create( + cls.apiclient, cls.testdata['resized_disk_offering']) cls._cleanup.append(cls.disk_offering) - cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, - DomainName=cls.account.domain) + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, DomainName=cls.account.domain) response = User.login(cls.userapiclient, username=cls.account.name, password=cls.testdata['account']['password']) @@ -1069,7 +1339,6 @@ def setUpClass(cls): cls.tearDownClass() raise e - @classmethod def tearDownClass(cls): try: @@ -1077,7 +1346,6 @@ def tearDownClass(cls): except Exception as e: raise Exception('Warning:Exception during cleanup: %s' % e) - def setUp(self): self.apiclient = self.testClient.getApiClient() self.cleanup = [] @@ -1085,9 +1353,11 @@ def setUp(self): def tearDown(self): if self.disabled_list: for poolid in self.disabled_list: - if StoragePool.list(self.userapiclient, id=poolid)[0].state == 'Disabled': + if StoragePool.list(self.userapiclient, id=poolid)[ + 0].state == 'Disabled': try: - StoragePool.update(self.userapiclient, id=poolid, enabled=True) + StoragePool.update( + self.userapiclient, id=poolid, enabled=True) self.debug('Enabling: % s ' % poolid) except Exception as e: self.fail("Couldn't enable storage % s" % id) @@ -1126,18 +1396,26 @@ def test_01_cross_cluster_attach_disk(self): for cluster in clusters: try: self.debug('Processing for cluster % s ' % cluster.id) - self.list_storage = StoragePool.list(self.userapiclient, clusterid=cluster.id, scope='CLUSTER') + self.list_storage = StoragePool.list( + self.userapiclient, clusterid=cluster.id, scope='CLUSTER') count_st_pools = len(self.list_storage) if count_st_pools > 1: - self.debug('Found % s storage pools in cluster % s, keeping one and disabling rest' % (count_st_pools, cluster.id)) + self.debug( + 'Found % s storage pools in cluster % s, keeping one and disabling rest' % + (count_st_pools, cluster.id)) for pool in self.list_storage[1:]: self.disabled_pool_1 = self.list_storage[1] if pool.state == 'Up': - self.debug('Trying to disable storage %s' % pool.id) + self.debug( + 'Trying to disable storage %s' % + pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: raise e elif count_st_pools == 1: @@ -1148,15 +1426,19 @@ def test_01_cross_cluster_attach_disk(self): raise e try: - self.list_storage = StoragePool.list(self.userapiclient, scope='ZONE') + self.list_storage = StoragePool.list( + self.userapiclient, scope='ZONE') if self.list_storage: for pool in self.list_storage: if pool.state == 'Up': self.debug('Trying to disable storage % s' % pool.id) try: - StoragePool.update(self.userapiclient, id=pool.id, enabled=False) + StoragePool.update( + self.userapiclient, id=pool.id, enabled=False) self.disabled_list.append(pool.id) - self.debug('Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: self.fail("Couldn't disable storage % s" % pool.id) else: @@ -1165,32 +1447,46 @@ def test_01_cross_cluster_attach_disk(self): raise e # Step 1: Deploy VM in a cluster - self.virtual_machine_1 = VirtualMachine.create(self.userapiclient, - self.testdata['small'], - templateid=self.template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - zoneid=self.zone.id) + self.virtual_machine_1 = VirtualMachine.create( + self.userapiclient, + self.testdata['small'], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + zoneid=self.zone.id) verify_vm_state(self, self.virtual_machine_1.id, 'Running') - root_vol = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT')[0] + root_vol = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='ROOT')[0] storage_1 = root_vol.storageid host_1 = self.virtual_machine_1.hostid - self.debug("Step 1: VM1 is running on % s host and % s storage pool" % (host_1, storage_1)) + self.debug( + "Step 1: VM1 is running on % s host and % s storage pool" % + (host_1, storage_1)) # Step 2: Live Migrate VM to another cluster - hosts_for_migration = Host.listForMigration(self.userapiclient, virtualmachineid=self.virtual_machine_1.id) - self.debug('Step 2: List of hosts suitable for migration: % s ' % hosts_for_migration) + hosts_for_migration = Host.listForMigration( + self.userapiclient, virtualmachineid=self.virtual_machine_1.id) + self.debug( + 'Step 2: List of hosts suitable for migration: % s ' % + hosts_for_migration) host_2 = None for host in hosts_for_migration: - self.debug('Step 2: Host Requires storage motion is % s ' % host.requiresStorageMotion) - if host.requiresStorageMotion == True: + self.debug( + 'Step 2: Host Requires storage motion is % s ' % + host.requiresStorageMotion) + if host.requiresStorageMotion: host_2 = host.id if host_2: - self.debug('Step 2: Migrating VM % s to Host % s' % (self.virtual_machine_1.id, host_2)) - self.virtual_machine_1.migrate_vm_with_volume(self.userapiclient, hostid=host_2) + self.debug( + 'Step 2: Migrating VM % s to Host % s' % + (self.virtual_machine_1.id, host_2)) + self.virtual_machine_1.migrate_vm_with_volume( + self.userapiclient, hostid=host_2) else: self.fail('Step 2: No host found suitable for migration') @@ -1202,42 +1498,62 @@ def test_01_cross_cluster_attach_disk(self): self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1) list_volume = Volume.list(self.userapiclient, id=self.volume_1.id) - - self.assertEqual(list_volume[0].virtualmachineid, - self.virtual_machine_1.id, - 'Step 3: Check if volume state (attached) is reflected') - self.debug('Step 3: volume id:% s successfully attached to vm id % s' % (self.volume_1.id, self.virtual_machine_1.id)) - - root_vol = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT')[0] + self.assertEqual( + list_volume[0].virtualmachineid, + self.virtual_machine_1.id, + 'Step 3: Check if volume state (attached) is reflected') + self.debug( + 'Step 3: volume id:% s successfully attached to vm id % s' % + (self.volume_1.id, self.virtual_machine_1.id)) + + root_vol = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='ROOT')[0] storage_2 = root_vol.storageid - data_vol = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATA')[0] - self.debug("Step 3: Data Volume is in storage pool: % s" % data_vol.storageid) - self.assertEqual(data_vol.storageid, - root_vol.storageid, - "Step 3: Root and Data disk should be running from 2nd storage pool where the VM was live migrated") - - # Step 4: Disable first Storage Pool and verify it is not listed in hosts suitable for migration + data_vol = Volume.list( + self.userapiclient, + virtualmachineid=self.virtual_machine_1.id, + type='DATA')[0] + self.debug( + "Step 3: Data Volume is in storage pool: % s" % + data_vol.storageid) + self.assertEqual( + data_vol.storageid, + root_vol.storageid, + "Step 3: Root and Data disk should be running from 2nd storage pool where the VM was live migrated") + + # Step 4: Disable first Storage Pool and verify it is not listed in + # hosts suitable for migration try: StoragePool.update(self.userapiclient, id=storage_1, enabled=False) self.disabled_list.append(storage_1) - self.debug('Step 4: Appended to list of disabled pools. List is now: % s ' % self.disabled_list) + self.debug( + 'Step 4: Appended to list of disabled pools. List is now: % s ' % + self.disabled_list) except Exception as e: self.fail("Step 4: Couldn't disable storage % s" % storage_1) - # Step 5: Disabled pool shouldn't be listed in hostsforMigration since all pools in the cluster are disabled - hosts_for_migration = Host.listForMigration(self.userapiclient, virtualmachineid=self.virtual_machine_1.id) - self.debug("Step 5: List of Hosts For Migration is % s" % hosts_for_migration) + # Step 5: Disabled pool shouldn't be listed in hostsforMigration since + # all pools in the cluster are disabled + hosts_for_migration = Host.listForMigration( + self.userapiclient, virtualmachineid=self.virtual_machine_1.id) + self.debug( + "Step 5: List of Hosts For Migration is % s" % + hosts_for_migration) if hosts_for_migration: for host in hosts_for_migration: if host_1 == host.id: - self.fail("Step 5: All pools in the cluster are disabled, hence host should not be listed for migration") + self.fail( + "Step 5: All pools in the cluster are disabled, hence host should not be listed for migration") # Step 6: Stop VM and Detach Disk self.virtual_machine_1.stop(self.userapiclient) verify_vm_state(self, self.virtual_machine_1.id, 'Stopped') verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_2) self.debug("Step 6: Stopping VM and detaching disk") - self.virtual_machine_1.detach_volume(self.userapiclient, volume=self.volume_1) + self.virtual_machine_1.detach_volume( + self.userapiclient, volume=self.volume_1) # Step 7, 8: Enable Pool for Migrating VM and disable again try: @@ -1247,15 +1563,19 @@ def test_01_cross_cluster_attach_disk(self): except Exception as e: self.fail("Step 7: Couldn't enable storage % s" % storage_1) - self.virtual_machine_1.start(self.userapiclient) verify_vm_state(self, self.virtual_machine_1.id, 'Running') try: - self.debug('Step 8: Migrating VM % s to Host % s' % (self.virtual_machine_1.id, host_1)) - self.virtual_machine_1.migrate_vm_with_volume(self.userapiclient, hostid=host_1) + self.debug( + 'Step 8: Migrating VM % s to Host % s' % + (self.virtual_machine_1.id, host_1)) + self.virtual_machine_1.migrate_vm_with_volume( + self.userapiclient, hostid=host_1) except Exception as e: - self.fail("Step 8: Couldn't live migrate VM to host % s due to % s" % (host_1, e)) + self.fail( + "Step 8: Couldn't live migrate VM to host % s due to % s" % + (host_1, e)) # Step 9: disable pool again try: @@ -1265,16 +1585,19 @@ def test_01_cross_cluster_attach_disk(self): except Exception as e: self.fail("Step 9: Couldn't disable storage % s" % storage_1) - st_list = StoragePool.list(self.userapiclient, id=storage_1) - self.debug("9.5 Status of storage pool 1 % s is % s " % (st_list[0].name, st_list[0].state)) - + self.debug( + "9.5 Status of storage pool 1 % s is % s " % + (st_list[0].name, st_list[0].state)) - # Step 10: Try to attach data disk running from enabled pool with Root running in disabled pool - this should fail + # Step 10: Try to attach data disk running from enabled pool with Root + # running in disabled pool - this should fail with self.assertRaises(Exception): - self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1) - self.debug("Step 10: Trying to attach volume % s" % self.volume_1.id) - + self.virtual_machine_1.attach_volume( + self.userapiclient, self.volume_1) + self.debug( + "Step 10: Trying to attach volume % s" % + self.volume_1.id) # Step 11: Enable the pool and try to attach again - this should pass try: @@ -1289,8 +1612,10 @@ def test_01_cross_cluster_attach_disk(self): self.debug("Step 12: Trying to attach volume") list_volume = Volume.list(self.userapiclient, id=self.volume_1.id) - self.assertEqual(list_volume[0].virtualmachineid, - self.virtual_machine_1.id, - 'Step 12: Check if volume state (attached) is reflected') - self.debug('Step 12: volume id:%s successfully attached to vm id%s' % (self.volume_1.id, self.virtual_machine_1.id)) - + self.assertEqual( + list_volume[0].virtualmachineid, + self.virtual_machine_1.id, + 'Step 12: Check if volume state (attached) is reflected') + self.debug( + 'Step 12: volume id:%s successfully attached to vm id%s' % + (self.volume_1.id, self.virtual_machine_1.id))