From 2d7beddc0bb7830c2a8bf893b9221c0de568c55d Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 14 May 2013 10:44:13 +0100 Subject: [PATCH] Extract live-migration scheduler logic from the scheduler driver Before moving the control of live-migration into the conductor, extract the live-migration control logic into a separate class. The callback to select_hosts will be replaced by a new scheduler rpc method in a later changeset. Part of blueprint live-migration-to-conductor Change-Id: I6de33ada6dc377e20f8df07da92244f2c150b9fe --- nova/conductor/tasks/__init__.py | 11 + nova/conductor/tasks/live_migrate.py | 173 ++++++ nova/scheduler/chance.py | 5 + nova/scheduler/driver.py | 192 ------- nova/scheduler/filter_scheduler.py | 14 + nova/scheduler/manager.py | 13 +- nova/tests/conductor/tasks/__init__.py | 11 + .../conductor/tasks/test_live_migrate.py | 311 +++++++++++ nova/tests/integrated/test_api_samples.py | 22 +- nova/tests/scheduler/test_filter_scheduler.py | 139 ----- nova/tests/scheduler/test_scheduler.py | 506 +----------------- 11 files changed, 559 insertions(+), 838 deletions(-) create mode 100644 nova/conductor/tasks/__init__.py create mode 100644 nova/conductor/tasks/live_migrate.py create mode 100644 nova/tests/conductor/tasks/__init__.py create mode 100644 nova/tests/conductor/tasks/test_live_migrate.py diff --git a/nova/conductor/tasks/__init__.py b/nova/conductor/tasks/__init__.py new file mode 100644 index 00000000000..94e731d2014 --- /dev/null +++ b/nova/conductor/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py new file mode 100644 index 00000000000..3c7f2ac160a --- /dev/null +++ b/nova/conductor/tasks/live_migrate.py @@ -0,0 +1,173 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from nova.compute import flavors +from nova.compute import power_state +from nova.compute import rpcapi as compute_rpcapi +from nova import db +from nova import exception +from nova.image import glance +from nova.openstack.common import log as logging +from nova import servicegroup + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('scheduler_max_attempts', 'nova.scheduler.driver') + + +class LiveMigrationTask(object): + def __init__(self, context, instance, destination, + block_migration, disk_over_commit, + select_hosts_callback): + self.context = context + self.instance = instance + self.destination = destination + self.block_migration = block_migration + self.disk_over_commit = disk_over_commit + self.select_hosts_callback = select_hosts_callback + self.source = instance['host'] + self.migrate_data = None + self.compute_rpcapi = compute_rpcapi.ComputeAPI() + self.servicegroup_api = servicegroup.API() + self.image_service = glance.get_default_image_service() + + def execute(self): + self._check_instance_is_running() + self._check_host_is_up(self.source) + + if not self.destination: + self.destination = self._find_destination() + else: + self._check_requested_destination() + + #TODO(johngarbutt) need to move complexity out of compute manager + return self.compute_rpcapi.live_migration(self.context, + host=self.source, + instance=self.instance, + dest=self.destination, + block_migration=self.block_migration, + migrate_data=self.migrate_data) + #TODO(johngarbutt) disk_over_commit? + + def rollback(self): + #TODO(johngarbutt) need to implement the clean up operation + raise NotImplementedError() + + def _check_instance_is_running(self): + if self.instance['power_state'] != power_state.RUNNING: + raise exception.InstanceNotRunning( + instance_id=self.instance['uuid']) + + def _check_host_is_up(self, host): + try: + service = db.service_get_by_compute_host(self.context, host) + except exception.NotFound: + raise exception.ComputeServiceUnavailable(host=host) + + if not self.servicegroup_api.service_is_up(service): + raise exception.ComputeServiceUnavailable(host=host) + + def _check_requested_destination(self): + self._check_destination_is_not_source() + self._check_host_is_up(self.destination) + self._check_destination_has_enough_memory() + self._check_compatible_with_source_hypervisor(self.destination) + self._call_livem_checks_on_host(self.destination) + + def _check_destination_is_not_source(self): + if self.destination == self.source: + raise exception.UnableToMigrateToSelf( + instance_id=self.instance['uuid'], host=self.destination) + + def _check_destination_has_enough_memory(self): + avail = self._get_compute_info(self.destination)['free_ram_mb'] + mem_inst = self.instance['memory_mb'] + + if not mem_inst or avail <= mem_inst: + instance_uuid = self.instance['uuid'] + dest = self.destination + reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: " + "Lack of memory(host:%(avail)s <= " + "instance:%(mem_inst)s)") + raise exception.MigrationPreCheckError(reason=reason % dict( + instance_uuid=instance_uuid, dest=dest, avail=avail, + mem_inst=mem_inst)) + + def _get_compute_info(self, host): + service_ref = db.service_get_by_compute_host(self.context, host) + return service_ref['compute_node'][0] + + def _check_compatible_with_source_hypervisor(self, destination): + source_info = self._get_compute_info(self.source) + destination_info = self._get_compute_info(destination) + + source_type = source_info['hypervisor_type'] + destination_type = destination_info['hypervisor_type'] + if source_type != destination_type: + raise exception.InvalidHypervisorType() + + source_version = source_info['hypervisor_version'] + destination_version = destination_info['hypervisor_version'] + if source_version > destination_version: + raise exception.DestinationHypervisorTooOld() + + def _call_livem_checks_on_host(self, destination): + self.migrate_data = self.compute_rpcapi.\ + check_can_live_migrate_destination(self.context, self.instance, + destination, self.block_migration, self.disk_over_commit) + + def _find_destination(self): + #TODO(johngarbutt) this retry loop should be shared + ignore_hosts = [self.source] + image = self.image_service.show(self.context, + self.instance['image_ref']) + instance_type = flavors.extract_flavor(self.instance) + + host = None + while host is None: + self._check_not_over_max_attempts(ignore_hosts) + + host = self._get_candidate_destination(image, + instance_type, ignore_hosts) + try: + self._check_compatible_with_source_hypervisor(host) + self._call_livem_checks_on_host(host) + except exception.Invalid as e: + LOG.debug(_("Skipping host: %(host)s because: %(e)s") % + {"host": host, "e": e}) + ignore_hosts.append(host) + host = None + return host + + def _get_candidate_destination(self, image, instance_type, ignore_hosts): + request_spec = {'instance_properties': self.instance, + 'instance_type': instance_type, + 'instance_uuids': [self.instance['uuid']], + 'image': image} + filter_properties = {'ignore_hosts': ignore_hosts} + #TODO(johngarbutt) this should be an rpc call to scheduler + return self.select_hosts_callback(self.context, request_spec, + filter_properties)[0] + + def _check_not_over_max_attempts(self, ignore_hosts): + attempts = len(ignore_hosts) + if attempts > CONF.scheduler_max_attempts: + msg = (_('Exceeded max scheduling attempts %(max_attempts)d for ' + 'instance %(instance_uuid)s during live migration') + % {'max_attempts': attempts, + 'instance_uuid': self.instance['uuid']}) + raise exception.NoValidHost(reason=msg) diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 0f8f3c45ad1..f0b1701e008 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -25,6 +25,7 @@ from oslo.config import cfg +from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.scheduler import driver @@ -35,6 +36,10 @@ class ChanceScheduler(driver.Scheduler): """Implements Scheduler as a random node selector.""" + def __init__(self, *args, **kwargs): + super(ChanceScheduler, self).__init__(*args, **kwargs) + self.compute_rpcapi = compute_rpcapi.ComputeAPI() + def _filter_hosts(self, request_spec, hosts, filter_properties): """Filter a list of hosts based on request_spec.""" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index d5a1eedeac5..c4265285f4e 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -25,15 +25,11 @@ from oslo.config import cfg -from nova.compute import flavors -from nova.compute import power_state -from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import db from nova import exception -from nova.image import glance from nova import notifications from nova.openstack.common import importutils from nova.openstack.common import log as logging @@ -122,9 +118,7 @@ class Scheduler(object): def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) - self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() - self.image_service = glance.get_default_image_service() def update_service_capabilities(self, service_name, host, capabilities): """Process a capability update from a service node.""" @@ -168,189 +162,3 @@ def select_hosts(self, context, request_spec, filter_properties): """Must override select_hosts method for scheduler to work.""" msg = _("Driver must implement select_hosts") raise NotImplementedError(msg) - - def schedule_live_migration(self, context, instance, dest, - block_migration, disk_over_commit): - """Live migration scheduling method. - - :param context: - :param instance: instance dict - :param dest: destination host - :param block_migration: if true, block_migration. - :param disk_over_commit: if True, consider real(not virtual) - disk size. - - :return: - The host where instance is running currently. - Then scheduler send request that host. - """ - # Check we can do live migration - self._live_migration_src_check(context, instance) - - if dest is None: - # Let scheduler select a dest host, retry next best until success - # or no more valid hosts. - ignore_hosts = [instance['host']] - while dest is None: - dest = self._live_migration_dest_check(context, instance, dest, - ignore_hosts) - try: - self._live_migration_common_check(context, instance, dest) - migrate_data = self.compute_rpcapi.\ - check_can_live_migrate_destination(context, instance, - dest, - block_migration, - disk_over_commit) - except exception.Invalid: - ignore_hosts.append(dest) - dest = None - continue - else: - # Test the given dest host - self._live_migration_dest_check(context, instance, dest) - self._live_migration_common_check(context, instance, dest) - migrate_data = self.compute_rpcapi.\ - check_can_live_migrate_destination(context, instance, dest, - block_migration, - disk_over_commit) - - # Perform migration - src = instance['host'] - self.compute_rpcapi.live_migration(context, host=src, - instance=instance, dest=dest, - block_migration=block_migration, - migrate_data=migrate_data) - - def _live_migration_src_check(self, context, instance_ref): - """Live migration check routine (for src host). - - :param context: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - - """ - # TODO(johngar) why is this not in the API layer? - # Checking instance is running. - if instance_ref['power_state'] != power_state.RUNNING: - raise exception.InstanceNotRunning( - instance_id=instance_ref['uuid']) - - # Checking src host exists and compute node - src = instance_ref['host'] - try: - service = db.service_get_by_compute_host(context, src) - except exception.NotFound: - raise exception.ComputeServiceUnavailable(host=src) - - # Checking src host is alive. - if not self.servicegroup_api.service_is_up(service): - raise exception.ComputeServiceUnavailable(host=src) - - def _live_migration_dest_check(self, context, instance_ref, dest, - ignore_hosts=None): - """Live migration check routine (for destination host). - - :param context: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - :param dest: destination host - :param ignore_hosts: hosts that should be avoided as dest host - """ - - # If dest is not specified, have scheduler pick one. - if dest is None: - instance_type = flavors.extract_flavor(instance_ref) - if not instance_ref['image_ref']: - image = None - else: - image = self.image_service.show(context, - instance_ref['image_ref']) - request_spec = {'instance_properties': instance_ref, - 'instance_type': instance_type, - 'instance_uuids': [instance_ref['uuid']], - 'image': image} - filter_properties = {'ignore_hosts': ignore_hosts} - return self.select_hosts(context, request_spec, - filter_properties)[0] - - # Checking whether The host where instance is running - # and dest is not same. - src = instance_ref['host'] - if dest == src: - raise exception.UnableToMigrateToSelf( - instance_id=instance_ref['uuid'], host=dest) - - # Checking dest exists and compute node. - try: - dservice_ref = db.service_get_by_compute_host(context, dest) - except exception.NotFound: - raise exception.ComputeServiceUnavailable(host=dest) - - # Checking dest host is alive. - if not self.servicegroup_api.service_is_up(dservice_ref): - raise exception.ComputeServiceUnavailable(host=dest) - - # Check memory requirements - self._assert_compute_node_has_enough_memory(context, - instance_ref, dest) - - return dest - - def _live_migration_common_check(self, context, instance_ref, dest): - """Live migration common check routine. - - The following checks are based on - http://wiki.libvirt.org/page/TodoPreMigrationChecks - - :param context: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - :param dest: destination host - """ - dservice_ref = self._get_compute_info(context, dest) - src = instance_ref['host'] - oservice_ref = self._get_compute_info(context, src) - - # Checking hypervisor is same. - orig_hypervisor = oservice_ref['hypervisor_type'] - dest_hypervisor = dservice_ref['hypervisor_type'] - if orig_hypervisor != dest_hypervisor: - raise exception.InvalidHypervisorType() - - # Checking hypervisor version. - orig_hypervisor = oservice_ref['hypervisor_version'] - dest_hypervisor = dservice_ref['hypervisor_version'] - if orig_hypervisor > dest_hypervisor: - raise exception.DestinationHypervisorTooOld() - - def _assert_compute_node_has_enough_memory(self, context, - instance_ref, dest): - """Checks if destination host has enough memory for live migration. - - - :param context: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - :param dest: destination host - - """ - # Getting total available memory of host - avail = self._get_compute_info(context, dest)['free_ram_mb'] - - mem_inst = instance_ref['memory_mb'] - if not mem_inst or avail <= mem_inst: - instance_uuid = instance_ref['uuid'] - reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: " - "Lack of memory(host:%(avail)s <= " - "instance:%(mem_inst)s)") - raise exception.MigrationPreCheckError(reason=reason % - {'instance_uuid': instance_uuid, 'dest': dest, 'avail': avail, - 'mem_inst': mem_inst}) - - def _get_compute_info(self, context, host): - """get compute node's information specified by key - - :param context: security context - :param host: hostname(must be compute node) - :param key: column name of compute_nodes - :return: value specified by key - - """ - service_ref = db.service_get_by_compute_host(context, host) - return service_ref['compute_node'][0] diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 08cb6a20eb5..4e5662e652a 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -24,6 +24,8 @@ from oslo.config import cfg from nova.compute import flavors +from nova.compute import rpcapi as compute_rpcapi +from nova import db from nova import exception from nova.openstack.common import log as logging from nova.openstack.common.notifier import api as notifier @@ -54,6 +56,7 @@ class FilterScheduler(driver.Scheduler): def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.options = scheduler_options.SchedulerOptions() + self.compute_rpcapi = compute_rpcapi.ComputeAPI() def schedule_run_instance(self, context, request_spec, admin_password, injected_files, @@ -377,6 +380,17 @@ def _schedule(self, context, request_spec, filter_properties, filter_properties['group_hosts'].append(chosen_host.obj.host) return selected_hosts + def _get_compute_info(self, context, dest): + """Get compute node's information + + :param context: security context + :param dest: hostname (must be compute node) + :return: dict of compute node information + + """ + service_ref = db.service_get_by_compute_host(context, dest) + return service_ref['compute_node'][0] + def _assert_compute_node_has_enough_memory(self, context, instance_ref, dest): """Checks if destination host has enough memory for live migration. diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 5c99feb0314..3d62ff9b09b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -30,6 +30,7 @@ from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api +from nova.conductor.tasks import live_migrate import nova.context from nova import exception from nova import manager @@ -102,9 +103,8 @@ def create_volume(self, context, volume_id, snapshot_id, def live_migration(self, context, instance, dest, block_migration, disk_over_commit): try: - return self.driver.schedule_live_migration( - context, instance, dest, - block_migration, disk_over_commit) + self._schedule_live_migration(context, instance, dest, + block_migration, disk_over_commit) except (exception.NoValidHost, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, @@ -131,6 +131,13 @@ def live_migration(self, context, instance, dest, {'vm_state': vm_states.ERROR}, context, ex, request_spec) + def _schedule_live_migration(self, context, instance, dest, + block_migration, disk_over_commit): + task = live_migrate.LiveMigrationTask(context, instance, + dest, block_migration, disk_over_commit, + self.driver.select_hosts) + return task.execute() + def run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties): diff --git a/nova/tests/conductor/tasks/__init__.py b/nova/tests/conductor/tasks/__init__.py new file mode 100644 index 00000000000..94e731d2014 --- /dev/null +++ b/nova/tests/conductor/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/tests/conductor/tasks/test_live_migrate.py b/nova/tests/conductor/tasks/test_live_migrate.py new file mode 100644 index 00000000000..c54e53b1a05 --- /dev/null +++ b/nova/tests/conductor/tasks/test_live_migrate.py @@ -0,0 +1,311 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.compute import flavors +from nova.compute import power_state +from nova.conductor.tasks import live_migrate +from nova import db +from nova import exception +from nova import test + + +class LiveMigrationTaskTestCase(test.TestCase): + def setUp(self): + super(LiveMigrationTaskTestCase, self).setUp() + self.context = "context" + self.instance_host = "host" + self.instance_uuid = "uuid" + self.instance_image = "image_ref" + self.instance = { + "host": self.instance_host, + "uuid": self.instance_uuid, + "power_state": power_state.RUNNING, + "memory_mb": 512, + "image_ref": self.instance_image} + self.destination = "destination" + self.block_migration = "bm" + self.disk_over_commit = "doc" + self.select_hosts_callback = self._select_hosts_callback + self._generate_task() + + def _generate_task(self): + self.task = live_migrate.LiveMigrationTask(self.context, + self.instance, self.destination, self.block_migration, + self.disk_over_commit, self.select_hosts_callback) + + def _select_hosts_callback(self, *args): + return ["host1"] + + def test_execute_with_destination(self): + self.mox.StubOutWithMock(self.task, '_check_host_is_up') + self.mox.StubOutWithMock(self.task, '_check_requested_destination') + self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') + + self.task._check_host_is_up(self.instance_host) + self.task._check_requested_destination() + self.task.compute_rpcapi.live_migration(self.context, + host=self.instance_host, + instance=self.instance, + dest=self.destination, + block_migration=self.block_migration, + migrate_data=None).AndReturn("bob") + + self.mox.ReplayAll() + self.assertEqual("bob", self.task.execute()) + + def test_execute_without_destination(self): + self.destination = None + self._generate_task() + self.assertEqual(None, self.task.destination) + + self.mox.StubOutWithMock(self.task, '_check_host_is_up') + self.mox.StubOutWithMock(self.task, '_find_destination') + self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration') + + self.task._check_host_is_up(self.instance_host) + self.task._find_destination().AndReturn("found_host") + self.task.compute_rpcapi.live_migration(self.context, + host=self.instance_host, + instance=self.instance, + dest="found_host", + block_migration=self.block_migration, + migrate_data=None).AndReturn("bob") + + self.mox.ReplayAll() + self.assertEqual("bob", self.task.execute()) + + def test_check_instance_is_running_passes(self): + self.task._check_instance_is_running() + + def test_check_instance_is_running_fails_when_shutdown(self): + self.task.instance['power_state'] = power_state.SHUTDOWN + self.assertRaises(exception.InstanceNotRunning, + self.task._check_instance_is_running) + + def test_check_instance_host_is_up(self): + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') + + db.service_get_by_compute_host(self.context, + "host").AndReturn("service") + self.task.servicegroup_api.service_is_up("service").AndReturn(True) + + self.mox.ReplayAll() + self.task._check_host_is_up("host") + + def test_check_instance_host_is_up_fails_if_not_up(self): + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') + + db.service_get_by_compute_host(self.context, + "host").AndReturn("service") + self.task.servicegroup_api.service_is_up("service").AndReturn(False) + + self.mox.ReplayAll() + self.assertRaises(exception.ComputeServiceUnavailable, + self.task._check_host_is_up, "host") + + def test_check_instance_host_is_up_fails_if_not_found(self): + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + + db.service_get_by_compute_host(self.context, + "host").AndRaise(exception.NotFound) + + self.mox.ReplayAll() + self.assertRaises(exception.ComputeServiceUnavailable, + self.task._check_host_is_up, "host") + + def test_check_requested_destination(self): + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + self.mox.StubOutWithMock(self.task, '_get_compute_info') + self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') + self.mox.StubOutWithMock(self.task.compute_rpcapi, + 'check_can_live_migrate_destination') + + db.service_get_by_compute_host(self.context, + self.destination).AndReturn("service") + self.task.servicegroup_api.service_is_up("service").AndReturn(True) + hypervisor_details = { + "hypervisor_type": "a", + "hypervisor_version": 6.1, + "free_ram_mb": 513 + } + self.task._get_compute_info(self.destination)\ + .AndReturn(hypervisor_details) + self.task._get_compute_info(self.instance_host)\ + .AndReturn(hypervisor_details) + self.task._get_compute_info(self.destination)\ + .AndReturn(hypervisor_details) + + self.task.compute_rpcapi.check_can_live_migrate_destination( + self.context, self.instance, self.destination, + self.block_migration, self.disk_over_commit).AndReturn( + "migrate_data") + + self.mox.ReplayAll() + self.task._check_requested_destination() + self.assertEqual("migrate_data", self.task.migrate_data) + + def test_check_requested_destination_fails_with_same_dest(self): + self.task.destination = "same" + self.task.source = "same" + self.assertRaises(exception.UnableToMigrateToSelf, + self.task._check_requested_destination) + + def test_check_requested_destination_fails_when_destination_is_up(self): + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + + db.service_get_by_compute_host(self.context, + self.destination).AndRaise(exception.NotFound) + + self.mox.ReplayAll() + self.assertRaises(exception.ComputeServiceUnavailable, + self.task._check_requested_destination) + + def test_check_requested_destination_fails_with_not_enough_memory(self): + self.mox.StubOutWithMock(self.task, '_check_host_is_up') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') + + self.task._check_host_is_up(self.destination) + db.service_get_by_compute_host(self.context, + self.destination).AndReturn({ + "compute_node": [{"free_ram_mb": 511}] + }) + + self.mox.ReplayAll() + self.assertRaises(exception.MigrationPreCheckError, + self.task._check_requested_destination) + + def test_check_requested_destination_fails_with_hypervisor_diff(self): + self.mox.StubOutWithMock(self.task, '_check_host_is_up') + self.mox.StubOutWithMock(self.task, + '_check_destination_has_enough_memory') + self.mox.StubOutWithMock(self.task, '_get_compute_info') + + self.task._check_host_is_up(self.destination) + self.task._check_destination_has_enough_memory() + self.task._get_compute_info(self.instance_host).AndReturn({ + "hypervisor_type": "b" + }) + self.task._get_compute_info(self.destination).AndReturn({ + "hypervisor_type": "a" + }) + + self.mox.ReplayAll() + self.assertRaises(exception.InvalidHypervisorType, + self.task._check_requested_destination) + + def test_check_requested_destination_fails_with_hypervisor_too_old(self): + self.mox.StubOutWithMock(self.task, '_check_host_is_up') + self.mox.StubOutWithMock(self.task, + '_check_destination_has_enough_memory') + self.mox.StubOutWithMock(self.task, '_get_compute_info') + + self.task._check_host_is_up(self.destination) + self.task._check_destination_has_enough_memory() + self.task._get_compute_info(self.instance_host).AndReturn({ + "hypervisor_type": "a", + "hypervisor_version": 7 + }) + self.task._get_compute_info(self.destination).AndReturn({ + "hypervisor_type": "a", + "hypervisor_version": 6 + }) + + self.mox.ReplayAll() + self.assertRaises(exception.DestinationHypervisorTooOld, + self.task._check_requested_destination) + + def test_find_destination_works(self): + self.mox.StubOutWithMock(self.task.image_service, 'show') + self.mox.StubOutWithMock(flavors, 'extract_flavor') + self.mox.StubOutWithMock(self.task, + '_check_compatible_with_source_hypervisor') + self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') + + self.task.image_service.show(self.context, + self.instance_image).AndReturn("image") + flavors.extract_flavor(self.instance).AndReturn("inst_type") + self.task._check_compatible_with_source_hypervisor("host1") + self.task._call_livem_checks_on_host("host1") + + self.mox.ReplayAll() + self.assertEqual("host1", self.task._find_destination()) + + def _test_find_destination_retry_hypervisor_raises(self, error): + self.mox.StubOutWithMock(self.task.image_service, 'show') + self.mox.StubOutWithMock(flavors, 'extract_flavor') + self.mox.StubOutWithMock(self.task, + '_check_compatible_with_source_hypervisor') + self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') + + self.task.image_service.show(self.context, + self.instance_image).AndReturn("image") + flavors.extract_flavor(self.instance).AndReturn("inst_type") + self.task._check_compatible_with_source_hypervisor("host1")\ + .AndRaise(error) + + self.task._check_compatible_with_source_hypervisor("host1") + self.task._call_livem_checks_on_host("host1") + + self.mox.ReplayAll() + self.assertEqual("host1", self.task._find_destination()) + + def test_find_destination_retry_with_old_hypervisor(self): + self._test_find_destination_retry_hypervisor_raises( + exception.DestinationHypervisorTooOld) + + def test_find_destination_retry_with_invalid_hypervisor_type(self): + self._test_find_destination_retry_hypervisor_raises( + exception.InvalidHypervisorType) + + def test_find_destination_retry_with_invalid_livem_checks(self): + self.mox.StubOutWithMock(self.task.image_service, 'show') + self.mox.StubOutWithMock(flavors, 'extract_flavor') + self.mox.StubOutWithMock(self.task, + '_check_compatible_with_source_hypervisor') + self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') + + self.task.image_service.show(self.context, + self.instance_image).AndReturn("image") + flavors.extract_flavor(self.instance).AndReturn("inst_type") + self.task._check_compatible_with_source_hypervisor("host1") + self.task._call_livem_checks_on_host("host1")\ + .AndRaise(exception.Invalid) + + self.task._check_compatible_with_source_hypervisor("host1") + self.task._call_livem_checks_on_host("host1") + + self.mox.ReplayAll() + self.assertEqual("host1", self.task._find_destination()) + + def test_find_destination_retry_exceeds_max(self): + self.flags(scheduler_max_attempts=1) + self.mox.StubOutWithMock(self.task.image_service, 'show') + self.mox.StubOutWithMock(flavors, 'extract_flavor') + self.mox.StubOutWithMock(self.task, + '_check_compatible_with_source_hypervisor') + self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') + + self.task.image_service.show(self.context, + self.instance_image).AndReturn("image") + flavors.extract_flavor(self.instance).AndReturn("inst_type") + self.task._check_compatible_with_source_hypervisor("host1")\ + .AndRaise(exception.DestinationHypervisorTooOld) + + self.mox.ReplayAll() + self.assertRaises(exception.NoValidHost, self.task._find_destination) + + def test_not_implemented_rollback(self): + self.assertRaises(NotImplementedError, self.task.rollback) diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 87ed6c6ce21..380b690797f 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -47,7 +47,7 @@ from nova.openstack.common import log as logging from nova.openstack.common import timeutils import nova.quota -from nova.scheduler import driver +from nova.scheduler import manager as scheduler_manager from nova.servicegroup import api as service_group_api from nova import test from nova.tests.api.openstack.compute.contrib import test_coverage_ext @@ -2213,23 +2213,13 @@ def image_details(self, context, **kwargs): def test_post_live_migrate_server(self): # Get api samples to server live migrate request. - def fake_live_migration_src_check(self, context, instance_ref): - """Skip live migration scheduler checks.""" + def fake_live_migration(self, context, instance, dest, + block_migration, disk_over_commit): return - def fake_live_migration_dest_check(self, context, instance_ref, dest): - """Skip live migration scheduler checks.""" - return dest - - def fake_live_migration_common(self, context, instance_ref, dest): - """Skip live migration scheduler checks.""" - return - self.stubs.Set(driver.Scheduler, '_live_migration_src_check', - fake_live_migration_src_check) - self.stubs.Set(driver.Scheduler, '_live_migration_dest_check', - fake_live_migration_dest_check) - self.stubs.Set(driver.Scheduler, '_live_migration_common_check', - fake_live_migration_common) + self.stubs.Set(scheduler_manager.SchedulerManager, + 'live_migration', + fake_live_migration) def fake_get_compute(context, host): service = dict(host=host, diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index d6cc7808e0a..ac2e73ec7ac 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -25,12 +25,10 @@ from nova import context from nova import db from nova import exception -from nova.openstack.common import rpc from nova.scheduler import driver from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import weights -from nova import servicegroup from nova.tests.scheduler import fakes from nova.tests.scheduler import test_scheduler @@ -393,143 +391,6 @@ def test_prep_resize_post_populates_retry(self): self.assertEqual([['host', 'node']], filter_properties['retry']['hosts']) - def test_live_migration_dest_check_service_memory_overcommit(self): - instance = self._live_migration_instance() - - # Live-migration should work since default is to overcommit memory. - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(self.driver, '_get_compute_info') - self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') - self.mox.StubOutWithMock(rpc, 'call') - self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - - self.driver._live_migration_src_check(self.context, instance) - db.service_get_by_compute_host(self.context, - dest).AndReturn('fake_service3') - self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) - - self.driver._get_compute_info(self.context, dest).AndReturn( - {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': 512, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None}) - - self.driver._live_migration_common_check(self.context, instance, dest) - - rpc.call(self.context, "compute.fake_host2", - {"method": 'check_can_live_migrate_destination', - "namespace": None, - "args": {'instance': instance, - 'block_migration': block_migration, - 'disk_over_commit': disk_over_commit}, - "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, - None).AndReturn({}) - - self.driver.compute_rpcapi.live_migration(self.context, - host=instance['host'], instance=instance, dest=dest, - block_migration=block_migration, migrate_data={}) - - self.mox.ReplayAll() - result = self.driver.schedule_live_migration(self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - self.assertEqual(result, None) - - def test_live_migration_assert_memory_no_overcommit(self): - # Test that memory check passes with no memory overcommit. - def fake_get(context, host): - return {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': 1024, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None} - - self.stubs.Set(self.driver, '_get_compute_info', fake_get) - - self.flags(ram_allocation_ratio=1.0) - instance = self._live_migration_instance() - dest = 'fake_host2' - result = self.driver._assert_compute_node_has_enough_memory( - self.context, instance, dest) - self.assertEqual(result, None) - - def test_live_migration_assert_memory_no_overcommit_lack_memory(self): - # Test that memory check fails with no memory overcommit. - def fake_get(context, host): - return {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': 1023, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None} - - self.stubs.Set(self.driver, '_get_compute_info', fake_get) - - self.flags(ram_allocation_ratio=1.0) - instance = self._live_migration_instance() - dest = 'fake_host2' - self.assertRaises(exception.MigrationError, - self.driver._assert_compute_node_has_enough_memory, - context, instance, dest) - - def test_live_migration_assert_memory_overcommit(self): - # Test that memory check passes with memory overcommit. - def fake_get(context, host): - return {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': -1024, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None} - - self.stubs.Set(self.driver, '_get_compute_info', fake_get) - - self.flags(ram_allocation_ratio=2.0) - instance = self._live_migration_instance() - dest = 'fake_host2' - result = self.driver._assert_compute_node_has_enough_memory( - self.context, instance, dest) - self.assertEqual(result, None) - - def test_live_migration_assert_memory_overcommit_lack_memory(self): - # Test that memory check fails with memory overcommit. - def fake_get(context, host): - return {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': -1025, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None} - - self.stubs.Set(self.driver, '_get_compute_info', fake_get) - - self.flags(ram_allocation_ratio=2.0) - instance = self._live_migration_instance() - dest = 'fake_host2' - self.assertRaises(exception.MigrationError, - self.driver._assert_compute_node_has_enough_memory, - self.context, instance, dest) - def test_basic_schedule_run_instances_anti_affinity(self): filter_properties = {'scheduler_hints': {'group': 'cats'}} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 0574f6d2e21..99de672c49d 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -22,20 +22,16 @@ import mox from nova.compute import api as compute_api -from nova.compute import flavors -from nova.compute import power_state -from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api +from nova.conductor.tasks import live_migrate from nova import context from nova import db from nova import exception from nova.image import glance -from nova.openstack.common import jsonutils from nova.openstack.common.notifier import api as notifier -from nova.openstack.common import rpc from nova.openstack.common.rpc import common as rpc_common from nova.scheduler import driver from nova.scheduler import manager @@ -45,7 +41,6 @@ from nova.tests.image import fake as fake_image from nova.tests import matchers from nova.tests.scheduler import fakes -from nova import utils class SchedulerManagerTestCase(test.NoDBTestCase): @@ -220,11 +215,11 @@ def test_live_migration_schedule_novalidhost(self): block_migration = False disk_over_commit = False - self._mox_schedule_method_helper('schedule_live_migration') + self.mox.StubOutWithMock(self.manager, '_schedule_live_migration') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') - self.manager.driver.schedule_live_migration(self.context, + self.manager._schedule_live_migration(self.context, inst, dest, block_migration, disk_over_commit).AndRaise( exception.NoValidHost(reason="")) db.instance_update_and_get_original(self.context, inst["uuid"], @@ -253,11 +248,11 @@ def test_live_migration_compute_service_notavailable(self): block_migration = False disk_over_commit = False - self._mox_schedule_method_helper('schedule_live_migration') + self.mox.StubOutWithMock(self.manager, '_schedule_live_migration') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') - self.manager.driver.schedule_live_migration(self.context, + self.manager._schedule_live_migration(self.context, inst, dest, block_migration, disk_over_commit).AndRaise( exception.ComputeServiceUnavailable(host="src")) db.instance_update_and_get_original(self.context, inst["uuid"], @@ -277,6 +272,17 @@ def test_live_migration_compute_service_notavailable(self): self.context, inst, dest, block_migration, disk_over_commit) + def test_live_migrate(self): + instance = {'host': 'h'} + self.mox.StubOutClassWithMocks(live_migrate, "LiveMigrationTask") + task = live_migrate.LiveMigrationTask(self.context, instance, + "dest", "bm", "doc", self.manager.driver.select_hosts) + task.execute() + + self.mox.ReplayAll() + self.manager.live_migration(self.context, instance, "dest", + "bm", "doc") + def test_live_migration_set_vmstate_error(self): inst = {"uuid": "fake-instance-id", "vm_state": vm_states.ACTIVE, } @@ -285,11 +291,11 @@ def test_live_migration_set_vmstate_error(self): block_migration = False disk_over_commit = False - self._mox_schedule_method_helper('schedule_live_migration') + self.mox.StubOutWithMock(self.manager, '_schedule_live_migration') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') - self.manager.driver.schedule_live_migration(self.context, + self.manager._schedule_live_migration(self.context, inst, dest, block_migration, disk_over_commit).AndRaise( ValueError) db.instance_update_and_get_original(self.context, inst["uuid"], @@ -464,482 +470,6 @@ def test_hosts_up(self): result = self.driver.hosts_up(self.context, self.topic) self.assertEqual(result, ['host2']) - def _live_migration_instance(self): - inst_type = {'memory_mb': 1024, 'root_gb': 40, 'deleted_at': None, - 'name': u'm1.medium', 'deleted': 0, 'created_at': None, - 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, - 'vcpus': 2, 'extra_specs': {}, 'swap': 0, - 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': u'3', - 'vcpu_weight': None, 'id': 1} - - sys_meta = utils.dict_to_metadata( - flavors.save_flavor_info({}, inst_type)) - return {'id': 31337, - 'uuid': 'fake_uuid', - 'name': 'fake-instance', - 'host': 'fake_host1', - 'power_state': power_state.RUNNING, - 'memory_mb': 1024, - 'root_gb': 1024, - 'ephemeral_gb': 0, - 'vm_state': '', - 'task_state': '', - 'instance_type_id': inst_type['id'], - 'image_ref': 'fake-image-ref', - 'system_metadata': sys_meta} - - def test_live_migration_basic(self): - # Test basic schedule_live_migration functionality. - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') - self.mox.StubOutWithMock(self.driver.compute_rpcapi, - 'check_can_live_migrate_destination') - self.mox.StubOutWithMock(self.driver.compute_rpcapi, - 'live_migration') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = jsonutils.to_primitive(self._live_migration_instance()) - - self.driver._live_migration_src_check(self.context, instance) - self.driver._live_migration_dest_check(self.context, instance, - dest).AndReturn(dest) - self.driver._live_migration_common_check(self.context, instance, - dest) - self.driver.compute_rpcapi.check_can_live_migrate_destination( - self.context, instance, dest, block_migration, - disk_over_commit).AndReturn({}) - self.driver.compute_rpcapi.live_migration(self.context, - host=instance['host'], instance=instance, dest=dest, - block_migration=block_migration, migrate_data={}) - - self.mox.ReplayAll() - self.driver.schedule_live_migration(self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_all_checks_pass(self): - # Test live migration when all checks pass. - - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - self.mox.StubOutWithMock(rpc, 'call') - self.mox.StubOutWithMock(self.driver.compute_rpcapi, - 'live_migration') - - dest = 'fake_host2' - block_migration = True - disk_over_commit = True - instance = jsonutils.to_primitive(self._live_migration_instance()) - - # Source checks - db.service_get_by_compute_host(self.context, - instance['host']).AndReturn('fake_service2') - self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) - - # Destination checks (compute is up, enough memory, disk) - db.service_get_by_compute_host(self.context, - dest).AndReturn('fake_service3') - self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) - # assert_compute_node_has_enough_memory() - db.service_get_by_compute_host(self.context, dest).AndReturn( - {'compute_node': [{'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': 1280, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None, - 'hypervisor_version': 1}]}) - - # Common checks (same hypervisor, etc) - db.service_get_by_compute_host(self.context, dest).AndReturn( - {'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}) - db.service_get_by_compute_host(self.context, - instance['host']).AndReturn( - {'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1, - 'cpu_info': 'fake_cpu_info'}]}) - - rpc.call(self.context, "compute.fake_host2", - {"method": 'check_can_live_migrate_destination', - "namespace": None, - "args": {'instance': instance, - 'block_migration': block_migration, - 'disk_over_commit': disk_over_commit}, - "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, - None).AndReturn({}) - - self.driver.compute_rpcapi.live_migration(self.context, - host=instance['host'], instance=instance, dest=dest, - block_migration=block_migration, migrate_data={}) - - self.mox.ReplayAll() - result = self.driver.schedule_live_migration(self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - self.assertEqual(result, None) - - def test_live_migration_instance_not_running(self): - # The instance given by instance_id is not running. - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - instance['power_state'] = power_state.NOSTATE - - self.assertRaises(exception.InstanceNotRunning, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_compute_src_not_exist(self): - # Raise exception when src compute node is does not exist. - - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - # Compute down - db.service_get_by_compute_host(self.context, - instance['host']).AndRaise( - exception.ComputeHostNotFound(host='fake')) - - self.mox.ReplayAll() - self.assertRaises(exception.ComputeServiceUnavailable, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_compute_src_not_alive(self): - # Raise exception when src compute node is not alive. - - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - # Compute down - db.service_get_by_compute_host(self.context, - instance['host']).AndReturn('fake_service2') - self.servicegroup_api.service_is_up('fake_service2').AndReturn(False) - - self.mox.ReplayAll() - self.assertRaises(exception.ComputeServiceUnavailable, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_compute_dest_not_exist(self): - # Raise exception when dest compute node does not exist. - - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - self.driver._live_migration_src_check(self.context, instance) - # Compute down - db.service_get_by_compute_host(self.context, - dest).AndRaise(exception.NotFound()) - - self.mox.ReplayAll() - self.assertRaises(exception.ComputeServiceUnavailable, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_compute_dest_not_alive(self): - # Raise exception when dest compute node is not alive. - - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - self.driver._live_migration_src_check(self.context, instance) - db.service_get_by_compute_host(self.context, - dest).AndReturn('fake_service3') - # Compute is down - self.servicegroup_api.service_is_up('fake_service3').AndReturn(False) - - self.mox.ReplayAll() - self.assertRaises(exception.ComputeServiceUnavailable, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_dest_check_service_same_host(self): - # Confirms exception raises in case dest and src is same host. - - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - block_migration = False - instance = self._live_migration_instance() - # make dest same as src - dest = instance['host'] - - self.driver._live_migration_src_check(self.context, instance) - - self.mox.ReplayAll() - self.assertRaises(exception.UnableToMigrateToSelf, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=False) - - def test_live_migration_dest_check_service_lack_memory(self): - # Confirms exception raises when dest doesn't have enough memory. - - # Flag needed to make FilterScheduler test hit memory limit since the - # default for it is to allow memory overcommit by a factor of 1.5. - self.flags(ram_allocation_ratio=1.0) - - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(self.driver, '_get_compute_info') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - self.driver._live_migration_src_check(self.context, instance) - db.service_get_by_compute_host(self.context, - dest).AndReturn('fake_service3') - self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) - - self.driver._get_compute_info(self.context, dest).AndReturn( - {'memory_mb': 2048, - 'free_disk_gb': 512, - 'local_gb_used': 512, - 'free_ram_mb': 512, - 'local_gb': 1024, - 'vcpus': 4, - 'vcpus_used': 2, - 'updated_at': None}) - - self.mox.ReplayAll() - self.assertRaises(exception.MigrationError, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_different_hypervisor_type_raises(self): - # Confirm live_migration to hypervisor of different type raises. - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'call') - self.mox.StubOutWithMock(rpc, 'cast') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - self.driver._live_migration_src_check(self.context, instance) - self.driver._live_migration_dest_check(self.context, instance, - dest).AndReturn(dest) - - db.service_get_by_compute_host(self.context, dest).AndReturn( - {'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}) - db.service_get_by_compute_host(self.context, - instance['host']).AndReturn( - {'compute_node': [{'hypervisor_type': 'not-xen', - 'hypervisor_version': 1}]}) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_dest_hypervisor_version_older_raises(self): - # Confirm live migration to older hypervisor raises. - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'call') - self.mox.StubOutWithMock(rpc, 'cast') - self.mox.StubOutWithMock(db, 'service_get_by_compute_host') - - dest = 'fake_host2' - block_migration = False - disk_over_commit = False - instance = self._live_migration_instance() - - self.driver._live_migration_src_check(self.context, instance) - self.driver._live_migration_dest_check(self.context, instance, - dest).AndReturn(dest) - - db.service_get_by_compute_host(self.context, dest).AndReturn( - {'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}) - db.service_get_by_compute_host(self.context, - instance['host']).AndReturn( - {'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 2}]}) - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.driver.schedule_live_migration, self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - - def test_live_migration_dest_check_auto_set_host(self): - instance = self._live_migration_instance() - - # Confirm dest is picked by scheduler if not set. - self.mox.StubOutWithMock(self.driver, 'select_hosts') - self.mox.StubOutWithMock(flavors, 'extract_flavor') - - request_spec = {'instance_properties': instance, - 'instance_type': {}, - 'instance_uuids': [instance['uuid']], - 'image': self.image_service.show(self.context, - instance['image_ref']) - } - ignore_hosts = [instance['host']] - filter_properties = {'ignore_hosts': ignore_hosts} - - flavors.extract_flavor(instance).AndReturn({}) - self.driver.select_hosts(self.context, request_spec, - filter_properties).AndReturn(['fake_host2']) - - self.mox.ReplayAll() - result = self.driver._live_migration_dest_check(self.context, instance, - None, ignore_hosts) - self.assertEqual('fake_host2', result) - - def test_live_migration_dest_check_no_image(self): - instance = self._live_migration_instance() - instance['image_ref'] = '' - - # Confirm dest is picked by scheduler if not set. - self.mox.StubOutWithMock(self.driver, 'select_hosts') - self.mox.StubOutWithMock(flavors, 'extract_flavor') - - request_spec = {'instance_properties': instance, - 'instance_type': {}, - 'instance_uuids': [instance['uuid']], - 'image': None - } - ignore_hosts = [instance['host']] - filter_properties = {'ignore_hosts': ignore_hosts} - - flavors.extract_flavor(instance).AndReturn({}) - self.driver.select_hosts(self.context, request_spec, - filter_properties).AndReturn(['fake_host2']) - - self.mox.ReplayAll() - result = self.driver._live_migration_dest_check(self.context, instance, - None, ignore_hosts) - self.assertEqual('fake_host2', result) - - def test_live_migration_auto_set_dest(self): - instance = self._live_migration_instance() - - # Confirm scheduler picks target host if none given. - self.mox.StubOutWithMock(flavors, 'extract_flavor') - self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(self.driver, 'select_hosts') - self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') - self.mox.StubOutWithMock(rpc, 'call') - self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') - - dest = None - block_migration = False - disk_over_commit = False - request_spec = {'instance_properties': instance, - 'instance_type': {}, - 'instance_uuids': [instance['uuid']], - 'image': self.image_service.show(self.context, - instance['image_ref']) - } - - self.driver._live_migration_src_check(self.context, instance) - - flavors.extract_flavor( - instance).MultipleTimes().AndReturn({}) - - # First selected host raises exception.InvalidHypervisorType - self.driver.select_hosts(self.context, request_spec, - {'ignore_hosts': [instance['host']]}).AndReturn(['fake_host2']) - self.driver._live_migration_common_check(self.context, instance, - 'fake_host2').AndRaise(exception.InvalidHypervisorType()) - - # Second selected host raises exception.InvalidCPUInfo - self.driver.select_hosts(self.context, request_spec, - {'ignore_hosts': [instance['host'], - 'fake_host2']}).AndReturn(['fake_host3']) - self.driver._live_migration_common_check(self.context, instance, - 'fake_host3') - rpc.call(self.context, "compute.fake_host3", - {"method": 'check_can_live_migrate_destination', - "namespace": None, - "args": {'instance': instance, - 'block_migration': block_migration, - 'disk_over_commit': disk_over_commit}, - "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, - None).AndRaise(exception.InvalidCPUInfo(reason="")) - - # Third selected host pass all checks - self.driver.select_hosts(self.context, request_spec, - {'ignore_hosts': [instance['host'], - 'fake_host2', - 'fake_host3']}).AndReturn(['fake_host4']) - self.driver._live_migration_common_check(self.context, instance, - 'fake_host4') - rpc.call(self.context, "compute.fake_host4", - {"method": 'check_can_live_migrate_destination', - "namespace": None, - "args": {'instance': instance, - 'block_migration': block_migration, - 'disk_over_commit': disk_over_commit}, - "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, - None).AndReturn({}) - self.driver.compute_rpcapi.live_migration(self.context, - host=instance['host'], instance=instance, dest='fake_host4', - block_migration=block_migration, migrate_data={}) - - self.mox.ReplayAll() - result = self.driver.schedule_live_migration(self.context, - instance=instance, dest=dest, - block_migration=block_migration, - disk_over_commit=disk_over_commit) - self.assertEqual(result, None) - def test_handle_schedule_error_adds_instance_fault(self): instance = {'uuid': 'fake-uuid'} self.mox.StubOutWithMock(db, 'instance_update_and_get_original')