diff --git a/nova/scheduler/distributed_scheduler.py b/nova/scheduler/distributed_scheduler.py index b8fbd55f..74b97f1f 100644 --- a/nova/scheduler/distributed_scheduler.py +++ b/nova/scheduler/distributed_scheduler.py @@ -124,6 +124,34 @@ class DistributedScheduler(driver.Scheduler): return instances + def schedule_prep_resize(self, context, request_spec, *args, **kwargs): + """Select a target for resize. + + Selects a target host for the instance, post-resize, and casts + the prep_resize operation to it. + """ + + # We need the new instance type ID... + instance_type_id = kwargs['instance_type_id'] + + elevated = context.elevated() + LOG.debug(_("Attempting to determine target host for resize to " + "instance type %(instance_type_id)s") % locals()) + + # Convert it to an actual instance type + instance_type = db.instance_type_get(elevated, instance_type_id) + + # Now let's grab a possibility + hosts = self._schedule(elevated, 'compute', request_spec, + *args, **kwargs) + if not hosts: + raise exception.NoValidHost(reason=_("")) + host = hosts.pop(0) + + # Forward off to the host + driver.cast_to_host(context, 'compute', host.host, 'prep_resize', + **kwargs) + def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any @@ -307,12 +335,13 @@ class DistributedScheduler(driver.Scheduler): ram_requirement_mb) # Next, tack on the host weights from the child zones - json_spec = json.dumps(request_spec) - all_zones = self._zone_get_all(elevated) - child_results = self._call_zone_method(elevated, "select", - specs=json_spec, zones=all_zones) - selected_hosts.extend(self._adjust_child_weights( - child_results, all_zones)) + if not request_spec.get('local_zone', False): + json_spec = json.dumps(request_spec) + all_zones = self._zone_get_all(elevated) + child_results = self._call_zone_method(elevated, "select", + specs=json_spec, zones=all_zones) + selected_hosts.extend(self._adjust_child_weights( + child_results, all_zones)) selected_hosts.sort(key=operator.attrgetter('weight')) return selected_hosts[:num_instances] @@ -360,6 +389,12 @@ class DistributedScheduler(driver.Scheduler): This method returns a subset of hosts, in the same format.""" selected_filters = self._choose_host_filters() + # Filter out original host + if ('original_host' in request_spec and + request_spec.get('avoid_original_host', True)): + hosts = [(h, hi) for h, hi in hosts + if h != request_spec['original_host']] + # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py index 126dbe60..511a8fa4 100644 --- a/nova/scheduler/multi.py +++ b/nova/scheduler/multi.py @@ -38,6 +38,7 @@ flags.DEFINE_string('volume_scheduler_driver', # A mapping of methods to topics so we can figure out which driver to use. _METHOD_MAP = {'run_instance': 'compute', 'start_instance': 'compute', + 'prep_resize': 'compute', 'create_volume': 'volume', 'create_volumes': 'volume'} diff --git a/nova/tests/scheduler/test_distributed_scheduler.py b/nova/tests/scheduler/test_distributed_scheduler.py index ad2d1b1b..80781c04 100644 --- a/nova/tests/scheduler/test_distributed_scheduler.py +++ b/nova/tests/scheduler/test_distributed_scheduler.py @@ -82,6 +82,10 @@ def fake_zone_get_all(context): ] +def fake_filter_hosts(topic, request_info, unfiltered_hosts, options): + return unfiltered_hosts + + class DistributedSchedulerTestCase(test.TestCase): """Test case for Distributed Scheduler.""" @@ -205,16 +209,11 @@ class DistributedSchedulerTestCase(test.TestCase): "compute", {}) def test_schedule_happy_day(self): - """_schedule() has no branching logic beyond basic input parameter - checking. Just make sure there's nothing glaringly wrong by doing - a happy day pass through.""" + """Make sure there's nothing glaringly wrong with _schedule() + by doing a happy day pass through.""" self.next_weight = 1.0 - def _fake_filter_hosts(topic, request_info, unfiltered_hosts, - options): - return unfiltered_hosts - def _fake_weighted_sum(functions, hosts, options): self.next_weight += 2.0 host, hostinfo = hosts[0] @@ -224,7 +223,7 @@ class DistributedSchedulerTestCase(test.TestCase): sched = ds_fakes.FakeDistributedScheduler() fake_context = context.RequestContext('user', 'project') sched.zone_manager = ds_fakes.FakeZoneManager() - self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts) + self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts) self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) @@ -243,6 +242,37 @@ class DistributedSchedulerTestCase(test.TestCase): self.assertTrue(weighted_host.host != None) self.assertTrue(weighted_host.zone == None) + def test_schedule_local_zone(self): + """Test to make sure _schedule makes no call out to zones if + local_zone in the request spec is True.""" + + self.next_weight = 1.0 + + def _fake_weighted_sum(functions, hosts, options): + self.next_weight += 2.0 + host, hostinfo = hosts[0] + return least_cost.WeightedHost(self.next_weight, host=host, + hostinfo=hostinfo) + + sched = ds_fakes.FakeDistributedScheduler() + fake_context = context.RequestContext('user', 'project') + sched.zone_manager = ds_fakes.FakeZoneManager() + self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts) + self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + instance_type = dict(memory_mb=512, local_gb=512) + request_spec = dict(num_instances=10, instance_type=instance_type, + local_zone=True) + weighted_hosts = sched._schedule(fake_context, 'compute', + request_spec) + self.assertEquals(len(weighted_hosts), 10) + for weighted_host in weighted_hosts: + # There should be no remote hosts + self.assertTrue(weighted_host.host != None) + self.assertTrue(weighted_host.zone == None) + def test_decrypt_blob(self): """Test that the decrypt method works.""" @@ -269,3 +299,42 @@ class DistributedSchedulerTestCase(test.TestCase): self.assertEquals(weight, 1.0) hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000) self.assertEquals(1000, fn(hostinfo)) + + def test_filter_hosts_avoid(self): + """Test to make sure _filter_hosts() filters original hosts if + avoid_original_host is True.""" + + def _fake_choose_host_filters(): + return [] + + sched = ds_fakes.FakeDistributedScheduler() + fake_context = context.RequestContext('user', 'project') + self.stubs.Set(sched, '_choose_host_filters', + _fake_choose_host_filters) + + hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')] + request_spec = dict(original_host='host2', + avoid_original_host=True) + + filtered = sched._filter_hosts('compute', request_spec, hosts, {}) + self.assertEqual(filtered, + [('host1', '1info'), ('host3', '3info')]) + + def test_filter_hosts_no_avoid(self): + """Test to make sure _filter_hosts() does not filter original + hosts if avoid_original_host is False.""" + + def _fake_choose_host_filters(): + return [] + + sched = ds_fakes.FakeDistributedScheduler() + fake_context = context.RequestContext('user', 'project') + self.stubs.Set(sched, '_choose_host_filters', + _fake_choose_host_filters) + + hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')] + request_spec = dict(original_host='host2', + avoid_original_host=False) + + filtered = sched._filter_hosts('compute', request_spec, hosts, {}) + self.assertEqual(filtered, hosts) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 397b5e26..358d4339 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1443,6 +1443,43 @@ class ComputeAPITestCase(BaseTestCase): self.compute_api.resize(context, instance, None) self.compute.terminate_instance(context, instance_id) + def test_resize_request_spec(self): + def _fake_cast(context, args): + request_spec = args['args']['request_spec'] + self.assertEqual(request_spec['original_host'], 'host2') + self.assertEqual(request_spec['avoid_original_host'], True) + + self.stubs.Set(self.compute_api, '_cast_scheduler_message', + _fake_cast) + + context = self.context.elevated() + instance_id = self._create_instance(dict(host='host2')) + instance = db.instance_get(context, instance_id) + self.compute.run_instance(self.context, instance_id) + try: + self.compute_api.resize(context, instance, None) + finally: + self.compute.terminate_instance(context, instance_id) + + def test_resize_request_spec_noavoid(self): + def _fake_cast(context, args): + request_spec = args['args']['request_spec'] + self.assertEqual(request_spec['original_host'], 'host2') + self.assertEqual(request_spec['avoid_original_host'], False) + + self.stubs.Set(self.compute_api, '_cast_scheduler_message', + _fake_cast) + self.flags(allow_resize_to_same_host=True) + + context = self.context.elevated() + instance_id = self._create_instance(dict(host='host2')) + instance = db.instance_get(context, instance_id) + self.compute.run_instance(self.context, instance_id) + try: + self.compute_api.resize(context, instance, None) + finally: + self.compute.terminate_instance(context, instance_id) + def test_get_all_by_name_regexp(self): """Test searching instances by name (display_name)""" c = context.get_admin_context()