Skip to content

Commit

Permalink
Merge "Clean bdms and networks after deleting shelved VM"
Browse files Browse the repository at this point in the history
  • Loading branch information
Jenkins authored and openstack-gerrit committed Dec 10, 2014
2 parents cb3a8b5 + 54f4600 commit a844e1a
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 34 deletions.
58 changes: 40 additions & 18 deletions nova/compute/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1531,15 +1531,14 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
instance=instance)
return

host = instance['host']
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)

project_id, user_id = quotas_obj.ids_from_instance(context, instance)

# At these states an instance has a snapshot associate.
if instance['vm_state'] in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_LI("Working on deleting snapshot %s "
"from shelved instance..."),
Expand Down Expand Up @@ -1581,8 +1580,9 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
cb(context, instance, bdms, reservations=None)
quotas.commit()
return

if not host:
shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED)
if not instance.host and not shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
Expand All @@ -1600,13 +1600,14 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)

is_up = False
is_local_delete = True
try:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True

if not shelved_offloaded:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if (delete_type != delete_types.FORCE_DELETE
and original_task_state in (
task_states.DELETING,
Expand All @@ -1616,7 +1617,6 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
instance=instance)
quotas.rollback()
return

self._record_action_start(context, instance,
instance_actions.DELETE)

Expand All @@ -1633,8 +1633,10 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
except exception.ComputeHostNotFound:
pass

if not is_up:
# If compute node isn't up, just delete from DB
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()

Expand Down Expand Up @@ -1738,16 +1740,36 @@ def _create_reservations(self, context, instance, original_task_state,
return quotas

def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_LW("instance's host %s is down, deleting from "
"database"), instance['host'], instance=instance)
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info(_LI("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database."),
instance=instance)
else:
LOG.warning(_LW("instance's host %s is down, deleting from "
"database"), instance.host, instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)

elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in SHELVED_OFFLOADED
# state, instance['host'] is None, here, use shelved_host as host
# to deallocate network info and reset instance['host'] after that.
# Here we shouldn't use instance.save(), because this will mislead
# user who may think the instance's host has been changed, and
# actually, the instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
instance['host'] = instance._system_metadata.get(
'shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance['host'] = orig_host

# cleanup volumes
for bdm in bdms:
Expand Down
45 changes: 29 additions & 16 deletions nova/tests/unit/compute/test_compute_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -612,11 +612,11 @@ def _test_delete_shelved_part(self, inst):
def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context, inst,
'%s.start' % delete_type)
self.compute_api.notifier, self.context, inst,
'%s.start' % delete_type)
self.context.elevated().AndReturn(self.context)
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
self.context, inst)
state = (delete_types.SOFT_DELETE in delete_type and
vm_states.SOFT_DELETED or
vm_states.DELETED)
Expand All @@ -631,9 +631,9 @@ def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
db.instance_destroy(self.context, inst.uuid,
constraint=None).AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier,
self.context, inst, '%s.end' % delete_type,
system_metadata=inst.system_metadata)
self.compute_api.notifier,
self.context, inst, '%s.end' % delete_type,
system_metadata=inst.system_metadata)

def _test_delete(self, delete_type, **attrs):
reservations = ['fake-resv']
Expand Down Expand Up @@ -716,15 +716,18 @@ def _test_delete(self, delete_type, **attrs):
self._test_delete_resized_part(inst)
if inst.vm_state == vm_states.SOFT_DELETED:
soft_delete = True
self.context.elevated().AndReturn(self.context)
db.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(objects.Service)).AndReturn(
inst.host != 'down-host')

if inst.host == 'down-host':
if inst.vm_state != vm_states.SHELVED_OFFLOADED:
self.context.elevated().AndReturn(self.context)
db.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(objects.Service)).AndReturn(
inst.host != 'down-host')

if (inst.host == 'down-host' or
inst.vm_state == vm_states.SHELVED_OFFLOADED):

self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
Expand Down Expand Up @@ -815,7 +818,12 @@ def test_delete_soft(self):
self._test_delete(delete_types.SOFT_DELETE)

def test_delete_forced(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
for vm_state in self._get_vm_states():
if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
self._test_delete(delete_types.FORCE_DELETE,
vm_state=vm_state,
system_metadata=fake_sys_meta)
self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state)

def test_delete_forced_when_task_state_deleting(self):
Expand All @@ -842,8 +850,13 @@ def test_no_delete_when_task_state_deleting(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}

for vm_state in self._get_vm_states():
if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
if vm_state == vm_states.SHELVED:
attrs.update({'system_metadata': fake_sys_meta})
if vm_state == vm_states.SHELVED_OFFLOADED:
# when instance in SHELVED_OFFLOADED state, we assume the
# instance cannot be in deleting task state, this is same to
# the case that instance.host is down, deleting locally.
continue

attrs.update({'vm_state': vm_state, 'task_state': 'deleting'})
reservations = ['fake-resv']
Expand Down

0 comments on commit a844e1a

Please sign in to comment.