diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3d70876dd37..fd983861a94 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2034,6 +2034,9 @@ def instance_destroy(context, instance_uuid, constraint=None): model_query(context, models.InstanceGroupMember).\ filter_by(instance_id=instance_uuid).\ soft_delete() + model_query(context, models.BlockDeviceMapping).\ + filter_by(instance_uuid=instance_uuid).\ + soft_delete() # NOTE(snikitin): We can't use model_query here, because there is no # column 'deleted' in 'tags' table. context.session.query(models.Tag).filter_by( diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py index 91a0bb37838..1ac46427c2a 100644 --- a/nova/tests/functional/test_server_group.py +++ b/nova/tests/functional/test_server_group.py @@ -18,6 +18,9 @@ from oslo_config import cfg +from nova import context +from nova import db +from nova.db.sqlalchemy import api as db_api from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client @@ -265,6 +268,33 @@ def test_boot_servers_with_affinity(self): self.assertIn(server['id'], members) self.assertEqual(host, server['OS-EXT-SRV-ATTR:host']) + def test_boot_servers_with_affinity_overquota(self): + # Tests that we check server group member quotas and cleanup created + # resources when we fail with OverQuota. + self.flags(quota_server_group_members=1) + # make sure we start with 0 servers + servers = self.api.get_servers(detail=False) + self.assertEqual(0, len(servers)) + created_group = self.api.post_server_groups(self.affinity) + ex = self.assertRaises(client.OpenStackApiException, + self._boot_servers_to_group, + created_group) + self.assertEqual(403, ex.response.status_code) + # _boot_servers_to_group creates 2 instances in the group in order, not + # multiple servers in a single request. Since our quota is 1, the first + # server create would pass, the second should fail, and we should be + # left with 1 server and it's 1 block device mapping. + servers = self.api.get_servers(detail=False) + self.assertEqual(1, len(servers)) + ctxt = context.get_admin_context() + servers = db.instance_get_all(ctxt) + self.assertEqual(1, len(servers)) + ctxt_mgr = db_api.get_context_manager(ctxt) + with ctxt_mgr.reader.using(ctxt): + bdms = db_api._block_device_mapping_get_query(ctxt).all() + self.assertEqual(1, len(bdms)) + self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid']) + def test_boot_servers_with_affinity_no_valid_host(self): created_group = self.api.post_server_groups(self.affinity) # Using big enough flavor to use up the resources on the host diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index 5c43834ee8c..5d299b5773f 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -2738,6 +2738,23 @@ def test_instance_update_with_instance_uuid(self): system_meta = db.instance_system_metadata_get(ctxt, instance['uuid']) self.assertEqual('baz', system_meta['original_image_ref']) + def test_delete_block_device_mapping_on_instance_destroy(self): + # Makes sure that the block device mapping is deleted when the + # related instance is deleted. + ctxt = context.get_admin_context() + instance = db.instance_create(ctxt, dict(display_name='bdm-test')) + bdm = { + 'volume_id': uuidutils.generate_uuid(), + 'device_name': '/dev/vdb', + 'instance_uuid': instance['uuid'], + } + bdm = db.block_device_mapping_create(ctxt, bdm, legacy=False) + db.instance_destroy(ctxt, instance['uuid']) + # make sure the bdm is deleted as well + bdms = db.block_device_mapping_get_all_by_instance( + ctxt, instance['uuid']) + self.assertEqual([], bdms) + def test_delete_instance_metadata_on_instance_destroy(self): ctxt = context.get_admin_context() # Create an instance with some metadata