Implement schedule_prep_resize()
Implement schedule_prep_resize() in the distributed scheduler. Adds a request_spec argument to enable the current host of an instance to be excluded for resizes. Corrects bug 888236. Change-Id: Ia52415e79639275a06bef59f1e13ca64bf7243ee
This commit is contained in:
parent
15937a4160
commit
72fa94f72b
|
@ -1294,12 +1294,22 @@ class API(base.Base):
|
|||
vm_state=vm_states.RESIZING,
|
||||
task_state=task_states.RESIZE_PREP)
|
||||
|
||||
request_spec = {
|
||||
'instance_type': new_instance_type,
|
||||
'filter': None,
|
||||
'num_instances': 1,
|
||||
'original_host': instance['host'],
|
||||
'avoid_original_host': not FLAGS.allow_resize_to_same_host,
|
||||
'local_zone': True,
|
||||
}
|
||||
|
||||
self._cast_scheduler_message(context,
|
||||
{"method": "prep_resize",
|
||||
"args": {"topic": FLAGS.compute_topic,
|
||||
"instance_id": instance['uuid'],
|
||||
"update_db": False,
|
||||
"instance_type_id": new_instance_type['id']}})
|
||||
"instance_type_id": new_instance_type['id'],
|
||||
"request_spec": request_spec}})
|
||||
|
||||
@scheduler_api.reroute_compute("add_fixed_ip")
|
||||
def add_fixed_ip(self, context, instance, network_id):
|
||||
|
|
|
@ -30,7 +30,18 @@ from nova.scheduler import driver
|
|||
class ChanceScheduler(driver.Scheduler):
|
||||
"""Implements Scheduler as a random node selector."""
|
||||
|
||||
def _schedule(self, context, topic, **kwargs):
|
||||
def _filter_hosts(self, request_spec, hosts):
|
||||
"""Filter a list of hosts based on request_spec."""
|
||||
|
||||
# Filter out excluded host
|
||||
if (request_spec and 'original_host' in request_spec and
|
||||
request_spec.get('avoid_original_host', True)):
|
||||
hosts = [host for host in hosts
|
||||
if host != request_spec['original_host']]
|
||||
|
||||
return hosts
|
||||
|
||||
def _schedule(self, context, topic, request_spec, **kwargs):
|
||||
"""Picks a host that is up at random."""
|
||||
|
||||
elevated = context.elevated()
|
||||
|
@ -38,12 +49,18 @@ class ChanceScheduler(driver.Scheduler):
|
|||
if not hosts:
|
||||
msg = _("Is the appropriate service running?")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
hosts = self._filter_hosts(request_spec, hosts)
|
||||
if not hosts:
|
||||
msg = _("Could not find another compute")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **kwargs):
|
||||
"""Picks a host that is up at random."""
|
||||
|
||||
host = self._schedule(context, topic, **kwargs)
|
||||
host = self._schedule(context, topic, None, **kwargs)
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
|
||||
|
@ -52,10 +69,15 @@ class ChanceScheduler(driver.Scheduler):
|
|||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule(context, 'compute', **kwargs)
|
||||
host = self._schedule(context, 'compute', request_spec, **kwargs)
|
||||
instance = self.create_instance_db_entry(elevated, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
instances.append(driver.encode_instance(instance))
|
||||
|
||||
return instances
|
||||
|
||||
def schedule_prep_resize(self, context, request_spec, *args, **kwargs):
|
||||
"""Select a target for resize."""
|
||||
host = self._schedule(context, 'compute', request_spec, **kwargs)
|
||||
driver.cast_to_host(context, 'compute', host, 'prep_resize', **kwargs)
|
||||
|
|
|
@ -124,6 +124,34 @@ class DistributedScheduler(driver.Scheduler):
|
|||
|
||||
return instances
|
||||
|
||||
def schedule_prep_resize(self, context, request_spec, *args, **kwargs):
|
||||
"""Select a target for resize.
|
||||
|
||||
Selects a target host for the instance, post-resize, and casts
|
||||
the prep_resize operation to it.
|
||||
"""
|
||||
|
||||
# We need the new instance type ID...
|
||||
instance_type_id = kwargs['instance_type_id']
|
||||
|
||||
elevated = context.elevated()
|
||||
LOG.debug(_("Attempting to determine target host for resize to "
|
||||
"instance type %(instance_type_id)s") % locals())
|
||||
|
||||
# Convert it to an actual instance type
|
||||
instance_type = db.instance_type_get(elevated, instance_type_id)
|
||||
|
||||
# Now let's grab a possibility
|
||||
hosts = self._schedule(elevated, 'compute', request_spec,
|
||||
*args, **kwargs)
|
||||
if not hosts:
|
||||
raise exception.NoValidHost(reason=_(""))
|
||||
host = hosts.pop(0)
|
||||
|
||||
# Forward off to the host
|
||||
driver.cast_to_host(context, 'compute', host.host, 'prep_resize',
|
||||
**kwargs)
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
corresponding to the best hosts to service the request. Any
|
||||
|
@ -307,12 +335,13 @@ class DistributedScheduler(driver.Scheduler):
|
|||
ram_requirement_mb)
|
||||
|
||||
# Next, tack on the host weights from the child zones
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = self._zone_get_all(elevated)
|
||||
child_results = self._call_zone_method(elevated, "select",
|
||||
specs=json_spec, zones=all_zones)
|
||||
selected_hosts.extend(self._adjust_child_weights(
|
||||
child_results, all_zones))
|
||||
if not request_spec.get('local_zone', False):
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = self._zone_get_all(elevated)
|
||||
child_results = self._call_zone_method(elevated, "select",
|
||||
specs=json_spec, zones=all_zones)
|
||||
selected_hosts.extend(self._adjust_child_weights(
|
||||
child_results, all_zones))
|
||||
selected_hosts.sort(key=operator.attrgetter('weight'))
|
||||
return selected_hosts[:num_instances]
|
||||
|
||||
|
@ -360,6 +389,12 @@ class DistributedScheduler(driver.Scheduler):
|
|||
This method returns a subset of hosts, in the same format."""
|
||||
selected_filters = self._choose_host_filters()
|
||||
|
||||
# Filter out original host
|
||||
if ('original_host' in request_spec and
|
||||
request_spec.get('avoid_original_host', True)):
|
||||
hosts = [(h, hi) for h, hi in hosts
|
||||
if h != request_spec['original_host']]
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
|
|
|
@ -38,6 +38,7 @@ flags.DEFINE_string('volume_scheduler_driver',
|
|||
# A mapping of methods to topics so we can figure out which driver to use.
|
||||
_METHOD_MAP = {'run_instance': 'compute',
|
||||
'start_instance': 'compute',
|
||||
'prep_resize': 'compute',
|
||||
'create_volume': 'volume',
|
||||
'create_volumes': 'volume'}
|
||||
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Chance Scheduler.
|
||||
"""
|
||||
|
||||
from nova import test
|
||||
from nova.scheduler import chance
|
||||
|
||||
|
||||
class ChanceSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Chance Scheduler."""
|
||||
|
||||
def test_filter_hosts_avoid(self):
|
||||
"""Test to make sure _filter_hosts() filters original hosts if
|
||||
avoid_original_host is True."""
|
||||
|
||||
sched = chance.ChanceScheduler()
|
||||
|
||||
hosts = ['host1', 'host2', 'host3']
|
||||
request_spec = dict(original_host='host2',
|
||||
avoid_original_host=True)
|
||||
|
||||
filtered = sched._filter_hosts(request_spec, hosts)
|
||||
self.assertEqual(filtered, ['host1', 'host3'])
|
||||
|
||||
def test_filter_hosts_no_avoid(self):
|
||||
"""Test to make sure _filter_hosts() does not filter original
|
||||
hosts if avoid_original_host is False."""
|
||||
|
||||
sched = chance.ChanceScheduler()
|
||||
|
||||
hosts = ['host1', 'host2', 'host3']
|
||||
request_spec = dict(original_host='host2',
|
||||
avoid_original_host=False)
|
||||
|
||||
filtered = sched._filter_hosts(request_spec, hosts)
|
||||
self.assertEqual(filtered, hosts)
|
|
@ -82,6 +82,10 @@ def fake_zone_get_all(context):
|
|||
]
|
||||
|
||||
|
||||
def fake_filter_hosts(topic, request_info, unfiltered_hosts, options):
|
||||
return unfiltered_hosts
|
||||
|
||||
|
||||
class DistributedSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Distributed Scheduler."""
|
||||
|
||||
|
@ -205,16 +209,11 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
"compute", {})
|
||||
|
||||
def test_schedule_happy_day(self):
|
||||
"""_schedule() has no branching logic beyond basic input parameter
|
||||
checking. Just make sure there's nothing glaringly wrong by doing
|
||||
a happy day pass through."""
|
||||
"""Make sure there's nothing glaringly wrong with _schedule()
|
||||
by doing a happy day pass through."""
|
||||
|
||||
self.next_weight = 1.0
|
||||
|
||||
def _fake_filter_hosts(topic, request_info, unfiltered_hosts,
|
||||
options):
|
||||
return unfiltered_hosts
|
||||
|
||||
def _fake_weighted_sum(functions, hosts, options):
|
||||
self.next_weight += 2.0
|
||||
host, hostinfo = hosts[0]
|
||||
|
@ -224,7 +223,7 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
sched = ds_fakes.FakeDistributedScheduler()
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
sched.zone_manager = ds_fakes.FakeZoneManager()
|
||||
self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts)
|
||||
self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts)
|
||||
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
|
||||
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
@ -243,6 +242,37 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
self.assertTrue(weighted_host.host != None)
|
||||
self.assertTrue(weighted_host.zone == None)
|
||||
|
||||
def test_schedule_local_zone(self):
|
||||
"""Test to make sure _schedule makes no call out to zones if
|
||||
local_zone in the request spec is True."""
|
||||
|
||||
self.next_weight = 1.0
|
||||
|
||||
def _fake_weighted_sum(functions, hosts, options):
|
||||
self.next_weight += 2.0
|
||||
host, hostinfo = hosts[0]
|
||||
return least_cost.WeightedHost(self.next_weight, host=host,
|
||||
hostinfo=hostinfo)
|
||||
|
||||
sched = ds_fakes.FakeDistributedScheduler()
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
sched.zone_manager = ds_fakes.FakeZoneManager()
|
||||
self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts)
|
||||
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
|
||||
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
||||
instance_type = dict(memory_mb=512, local_gb=512)
|
||||
request_spec = dict(num_instances=10, instance_type=instance_type,
|
||||
local_zone=True)
|
||||
weighted_hosts = sched._schedule(fake_context, 'compute',
|
||||
request_spec)
|
||||
self.assertEquals(len(weighted_hosts), 10)
|
||||
for weighted_host in weighted_hosts:
|
||||
# There should be no remote hosts
|
||||
self.assertTrue(weighted_host.host != None)
|
||||
self.assertTrue(weighted_host.zone == None)
|
||||
|
||||
def test_decrypt_blob(self):
|
||||
"""Test that the decrypt method works."""
|
||||
|
||||
|
@ -269,3 +299,42 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
self.assertEquals(weight, 1.0)
|
||||
hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
|
||||
self.assertEquals(1000, fn(hostinfo))
|
||||
|
||||
def test_filter_hosts_avoid(self):
|
||||
"""Test to make sure _filter_hosts() filters original hosts if
|
||||
avoid_original_host is True."""
|
||||
|
||||
def _fake_choose_host_filters():
|
||||
return []
|
||||
|
||||
sched = ds_fakes.FakeDistributedScheduler()
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
self.stubs.Set(sched, '_choose_host_filters',
|
||||
_fake_choose_host_filters)
|
||||
|
||||
hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')]
|
||||
request_spec = dict(original_host='host2',
|
||||
avoid_original_host=True)
|
||||
|
||||
filtered = sched._filter_hosts('compute', request_spec, hosts, {})
|
||||
self.assertEqual(filtered,
|
||||
[('host1', '1info'), ('host3', '3info')])
|
||||
|
||||
def test_filter_hosts_no_avoid(self):
|
||||
"""Test to make sure _filter_hosts() does not filter original
|
||||
hosts if avoid_original_host is False."""
|
||||
|
||||
def _fake_choose_host_filters():
|
||||
return []
|
||||
|
||||
sched = ds_fakes.FakeDistributedScheduler()
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
self.stubs.Set(sched, '_choose_host_filters',
|
||||
_fake_choose_host_filters)
|
||||
|
||||
hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')]
|
||||
request_spec = dict(original_host='host2',
|
||||
avoid_original_host=False)
|
||||
|
||||
filtered = sched._filter_hosts('compute', request_spec, hosts, {})
|
||||
self.assertEqual(filtered, hosts)
|
||||
|
|
|
@ -1443,6 +1443,43 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
self.compute_api.resize(context, instance, None)
|
||||
self.compute.terminate_instance(context, instance_id)
|
||||
|
||||
def test_resize_request_spec(self):
|
||||
def _fake_cast(context, args):
|
||||
request_spec = args['args']['request_spec']
|
||||
self.assertEqual(request_spec['original_host'], 'host2')
|
||||
self.assertEqual(request_spec['avoid_original_host'], True)
|
||||
|
||||
self.stubs.Set(self.compute_api, '_cast_scheduler_message',
|
||||
_fake_cast)
|
||||
|
||||
context = self.context.elevated()
|
||||
instance_id = self._create_instance(dict(host='host2'))
|
||||
instance = db.instance_get(context, instance_id)
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
try:
|
||||
self.compute_api.resize(context, instance, None)
|
||||
finally:
|
||||
self.compute.terminate_instance(context, instance_id)
|
||||
|
||||
def test_resize_request_spec_noavoid(self):
|
||||
def _fake_cast(context, args):
|
||||
request_spec = args['args']['request_spec']
|
||||
self.assertEqual(request_spec['original_host'], 'host2')
|
||||
self.assertEqual(request_spec['avoid_original_host'], False)
|
||||
|
||||
self.stubs.Set(self.compute_api, '_cast_scheduler_message',
|
||||
_fake_cast)
|
||||
self.flags(allow_resize_to_same_host=True)
|
||||
|
||||
context = self.context.elevated()
|
||||
instance_id = self._create_instance(dict(host='host2'))
|
||||
instance = db.instance_get(context, instance_id)
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
try:
|
||||
self.compute_api.resize(context, instance, None)
|
||||
finally:
|
||||
self.compute.terminate_instance(context, instance_id)
|
||||
|
||||
def test_get_all_by_name_regexp(self):
|
||||
"""Test searching instances by name (display_name)"""
|
||||
c = context.get_admin_context()
|
||||
|
|
Loading…
Reference in New Issue