Pass a list of instance UUIDs to scheduler
When the RequestSpec object was created, it was assumed that when the request was for more than one instance the scheduler would not need to know the UUIDs for the individual instances and so it was agreed to only pass one instance UUID. If, however, we want the scheduler to be able to claim resources on the selected host, we will need to know the instance UUID, which will be the consumer_id of the allocation. This patch adds a new RPC parameter 'instance_uuids' that will be passed to the scheduler. The next patch in the series adds the logic to the scheduler to use this new field when selecting hosts. Co-Authored-By: Sylvain Bauza <sbauza@redhat.com> Partially-Implements: blueprint placement-claims Change-Id: I44ebdb3e29db950bf2ad0e6b1dbfdecd1ca03530
This commit is contained in:
parent
a1eca94d89
commit
8c86547fa0
|
@ -557,8 +557,9 @@ class ComputeTaskManager(base.Base):
|
|||
context, image, instances)
|
||||
scheduler_utils.populate_retry(
|
||||
filter_properties, instances[0].uuid)
|
||||
instance_uuids = [instance.uuid for instance in instances]
|
||||
hosts = self._schedule_instances(
|
||||
context, request_spec, filter_properties)
|
||||
context, request_spec, filter_properties, instance_uuids)
|
||||
except Exception as exc:
|
||||
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
|
||||
for instance in instances:
|
||||
|
@ -627,14 +628,16 @@ class ComputeTaskManager(base.Base):
|
|||
block_device_mapping=bdms, node=host['nodename'],
|
||||
limits=host['limits'])
|
||||
|
||||
def _schedule_instances(self, context, request_spec, filter_properties):
|
||||
def _schedule_instances(self, context, request_spec, filter_properties,
|
||||
instance_uuids=None):
|
||||
scheduler_utils.setup_instance_group(context, request_spec,
|
||||
filter_properties)
|
||||
# TODO(sbauza): Hydrate here the object until we modify the
|
||||
# scheduler.utils methods to directly use the RequestSpec object
|
||||
spec_obj = objects.RequestSpec.from_primitives(
|
||||
context, request_spec, filter_properties)
|
||||
hosts = self.scheduler_client.select_destinations(context, spec_obj)
|
||||
hosts = self.scheduler_client.select_destinations(context, spec_obj,
|
||||
instance_uuids)
|
||||
return hosts
|
||||
|
||||
@targets_cell
|
||||
|
@ -699,7 +702,8 @@ class ComputeTaskManager(base.Base):
|
|||
scheduler_utils.populate_retry(filter_properties,
|
||||
instance.uuid)
|
||||
hosts = self._schedule_instances(
|
||||
context, request_spec, filter_properties)
|
||||
context, request_spec, filter_properties,
|
||||
[instance.uuid])
|
||||
host_state = hosts[0]
|
||||
scheduler_utils.populate_filter_properties(
|
||||
filter_properties, host_state)
|
||||
|
@ -765,7 +769,8 @@ class ComputeTaskManager(base.Base):
|
|||
request_spec = request_spec.to_legacy_request_spec_dict()
|
||||
try:
|
||||
hosts = self._schedule_instances(
|
||||
context, request_spec, filter_properties)
|
||||
context, request_spec, filter_properties,
|
||||
[instance.uuid])
|
||||
host_dict = hosts.pop(0)
|
||||
host, node, limits = (host_dict['host'],
|
||||
host_dict['nodename'],
|
||||
|
@ -915,9 +920,12 @@ class ComputeTaskManager(base.Base):
|
|||
admin_password, injected_files,
|
||||
requested_networks, block_device_mapping):
|
||||
legacy_spec = request_specs[0].to_legacy_request_spec_dict()
|
||||
# Add all the UUIDs for the instances
|
||||
instance_uuids = [spec.instance_uuid for spec in request_specs]
|
||||
try:
|
||||
hosts = self._schedule_instances(context, legacy_spec,
|
||||
request_specs[0].to_legacy_filter_properties_dict())
|
||||
request_specs[0].to_legacy_filter_properties_dict(),
|
||||
instance_uuids)
|
||||
except Exception as exc:
|
||||
LOG.exception(_LE('Failed to schedule instances'))
|
||||
self._bury_in_cell0(context, request_specs[0], exc,
|
||||
|
|
|
@ -184,7 +184,7 @@ class LiveMigrationTask(base.TaskBase):
|
|||
request_spec.ignore_hosts = attempted_hosts
|
||||
try:
|
||||
host = self.scheduler_client.select_destinations(self.context,
|
||||
request_spec)[0]['host']
|
||||
request_spec, [self.instance.uuid])[0]['host']
|
||||
except messaging.RemoteError as ex:
|
||||
# TODO(ShaoHe Feng) There maybe multi-scheduler, and the
|
||||
# scheduling algorithm is R-R, we can let other scheduler try.
|
||||
|
|
|
@ -70,7 +70,7 @@ class MigrationTask(base.TaskBase):
|
|||
cell=instance_mapping.cell_mapping)
|
||||
|
||||
hosts = self.scheduler_client.select_destinations(
|
||||
self.context, self.request_spec)
|
||||
self.context, self.request_spec, [self.instance.uuid])
|
||||
host_state = hosts[0]
|
||||
|
||||
scheduler_utils.populate_filter_properties(legacy_props,
|
||||
|
|
|
@ -55,7 +55,7 @@ class ChanceScheduler(driver.Scheduler):
|
|||
|
||||
return random.choice(hosts)
|
||||
|
||||
def select_destinations(self, context, spec_obj):
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
"""Selects random destinations."""
|
||||
num_instances = spec_obj.num_instances
|
||||
# NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
|
||||
|
|
|
@ -47,8 +47,9 @@ class SchedulerClient(object):
|
|||
'nova.scheduler.client.report.SchedulerReportClient'))
|
||||
|
||||
@utils.retry_select_destinations
|
||||
def select_destinations(self, context, spec_obj):
|
||||
return self.queryclient.select_destinations(context, spec_obj)
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
return self.queryclient.select_destinations(context, spec_obj,
|
||||
instance_uuids)
|
||||
|
||||
def update_aggregates(self, context, aggregates):
|
||||
self.queryclient.update_aggregates(context, aggregates)
|
||||
|
|
|
@ -22,14 +22,15 @@ class SchedulerQueryClient(object):
|
|||
def __init__(self):
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
|
||||
def select_destinations(self, context, spec_obj):
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
"""Returns destinations(s) best suited for this request_spec and
|
||||
filter_properties.
|
||||
|
||||
The result should be a list of dicts with 'host', 'nodename' and
|
||||
'limits' as keys.
|
||||
"""
|
||||
return self.scheduler_rpcapi.select_destinations(context, spec_obj)
|
||||
return self.scheduler_rpcapi.select_destinations(context, spec_obj,
|
||||
instance_uuids)
|
||||
|
||||
def update_aggregates(self, context, aggregates):
|
||||
"""Updates HostManager internal aggregates information.
|
||||
|
|
|
@ -55,7 +55,7 @@ class Scheduler(object):
|
|||
if self.servicegroup_api.service_is_up(service)]
|
||||
|
||||
@abc.abstractmethod
|
||||
def select_destinations(self, context, spec_obj):
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
"""Must override select_destinations method.
|
||||
|
||||
:return: A list of dicts with 'host', 'nodename' and 'limits' as keys
|
||||
|
|
|
@ -47,14 +47,14 @@ class FilterScheduler(driver.Scheduler):
|
|||
# we split the needed methods into a separate library.
|
||||
self.scheduler_client = scheduler_client.SchedulerClient()
|
||||
|
||||
def select_destinations(self, context, spec_obj):
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
"""Selects a filtered set of hosts and nodes."""
|
||||
self.notifier.info(
|
||||
context, 'scheduler.select_destinations.start',
|
||||
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
|
||||
|
||||
num_instances = spec_obj.num_instances
|
||||
selected_hosts = self._schedule(context, spec_obj)
|
||||
selected_hosts = self._schedule(context, spec_obj, instance_uuids)
|
||||
|
||||
# Couldn't fulfill the request_spec
|
||||
if len(selected_hosts) < num_instances:
|
||||
|
@ -85,7 +85,7 @@ class FilterScheduler(driver.Scheduler):
|
|||
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
|
||||
return dests
|
||||
|
||||
def _schedule(self, context, spec_obj):
|
||||
def _schedule(self, context, spec_obj, instance_uuids):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
|
|
@ -44,7 +44,7 @@ QUOTAS = quota.QUOTAS
|
|||
class SchedulerManager(manager.Manager):
|
||||
"""Chooses a host to run instances on."""
|
||||
|
||||
target = messaging.Target(version='4.3')
|
||||
target = messaging.Target(version='4.4')
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
@ -82,7 +82,7 @@ class SchedulerManager(manager.Manager):
|
|||
@messaging.expected_exceptions(exception.NoValidHost)
|
||||
def select_destinations(self, ctxt,
|
||||
request_spec=None, filter_properties=None,
|
||||
spec_obj=_sentinel):
|
||||
spec_obj=_sentinel, instance_uuids=None):
|
||||
"""Returns destinations(s) best suited for this RequestSpec.
|
||||
|
||||
The result should be a list of dicts with 'host', 'nodename' and
|
||||
|
@ -95,7 +95,7 @@ class SchedulerManager(manager.Manager):
|
|||
spec_obj = objects.RequestSpec.from_primitives(ctxt,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
dests = self.driver.select_destinations(ctxt, spec_obj)
|
||||
dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids)
|
||||
return jsonutils.to_primitive(dests)
|
||||
|
||||
def update_aggregates(self, ctxt, aggregates):
|
||||
|
|
|
@ -94,6 +94,8 @@ class SchedulerAPI(object):
|
|||
changes to existing methods in 4.x after that point should be done such
|
||||
that they can handle the version_cap being set to 4.3.
|
||||
|
||||
* 4.4 - Modify select_destinations() signature by providing the
|
||||
instance_uuids for the request.
|
||||
'''
|
||||
|
||||
VERSION_ALIASES = {
|
||||
|
@ -117,9 +119,13 @@ class SchedulerAPI(object):
|
|||
self.client = rpc.get_client(target, version_cap=version_cap,
|
||||
serializer=serializer)
|
||||
|
||||
def select_destinations(self, ctxt, spec_obj):
|
||||
version = '4.3'
|
||||
msg_args = {'spec_obj': spec_obj}
|
||||
def select_destinations(self, ctxt, spec_obj, instance_uuids):
|
||||
version = '4.4'
|
||||
msg_args = {'instance_uuids': instance_uuids,
|
||||
'spec_obj': spec_obj}
|
||||
if not self.client.can_send_version(version):
|
||||
del msg_args['instance_uuids']
|
||||
version = '4.3'
|
||||
if not self.client.can_send_version(version):
|
||||
del msg_args['spec_obj']
|
||||
msg_args['request_spec'] = spec_obj.to_legacy_request_spec_dict()
|
||||
|
|
|
@ -258,7 +258,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.fake_spec.reset_forced_destinations()
|
||||
self.task.scheduler_client.select_destinations(
|
||||
self.context, self.fake_spec).AndReturn(
|
||||
self.context, self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")
|
||||
self.task._call_livem_checks_on_host("host1")
|
||||
|
@ -297,7 +297,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
self.context, fake_props,
|
||||
{'ignore_hosts': [self.instance_host]}
|
||||
)
|
||||
select_dest.assert_called_once_with(self.context, another_spec)
|
||||
select_dest.assert_called_once_with(self.context, another_spec,
|
||||
[self.instance.uuid])
|
||||
check_compat.assert_called_once_with("host1")
|
||||
call_livem_checks.assert_called_once_with("host1")
|
||||
do_test()
|
||||
|
@ -316,7 +317,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")
|
||||
self.task._call_livem_checks_on_host("host1")
|
||||
|
@ -339,13 +340,13 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")\
|
||||
.AndRaise(error)
|
||||
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host2'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host2")
|
||||
self.task._call_livem_checks_on_host("host2")
|
||||
|
@ -377,14 +378,14 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")
|
||||
self.task._call_livem_checks_on_host("host1")\
|
||||
.AndRaise(exception.Invalid)
|
||||
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host2'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host2")
|
||||
self.task._call_livem_checks_on_host("host2")
|
||||
|
@ -408,14 +409,14 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")
|
||||
self.task._call_livem_checks_on_host("host1")\
|
||||
.AndRaise(exception.MigrationPreCheckError("reason"))
|
||||
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host2'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host2")
|
||||
self.task._call_livem_checks_on_host("host2")
|
||||
|
@ -438,7 +439,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndReturn(
|
||||
self.fake_spec, [self.instance.uuid]).AndReturn(
|
||||
[{'host': 'host1'}])
|
||||
self.task._check_compatible_with_source_hypervisor("host1")\
|
||||
.AndRaise(exception.DestinationHypervisorTooOld)
|
||||
|
@ -461,7 +462,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
scheduler_utils.setup_instance_group(
|
||||
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
|
||||
self.task.scheduler_client.select_destinations(self.context,
|
||||
self.fake_spec).AndRaise(
|
||||
self.fake_spec, [self.instance.uuid]).AndRaise(
|
||||
exception.NoValidHost(reason=""))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
|
|
@ -75,7 +75,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
|||
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
|
||||
self.filter_properties)
|
||||
task.scheduler_client.select_destinations.assert_called_once_with(
|
||||
self.context, self.request_spec)
|
||||
self.context, self.request_spec, [self.instance.uuid])
|
||||
prep_resize_mock.assert_called_once_with(
|
||||
self.context, self.instance, legacy_request_spec['image'],
|
||||
self.flavor, self.hosts[0]['host'], self.reservations,
|
||||
|
|
|
@ -406,7 +406,7 @@ class _BaseTaskTestCase(object):
|
|||
'num_instances': 2}
|
||||
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
|
||||
self.conductor_manager._schedule_instances(self.context,
|
||||
spec, filter_properties).AndReturn(
|
||||
spec, filter_properties, [uuids.fake, uuids.fake]).AndReturn(
|
||||
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
|
||||
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
|
||||
db.block_device_mapping_get_all_by_instance(self.context,
|
||||
|
@ -1025,7 +1025,7 @@ class _BaseTaskTestCase(object):
|
|||
self.conductor.unshelve_instance(self.context, instance, fake_spec)
|
||||
reset_forced_destinations.assert_called_once_with()
|
||||
sched_instances.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
filter_properties, [instance.uuid])
|
||||
# NOTE(sbauza): Since the instance is dehydrated when passing
|
||||
# through the RPC API, we can only assert mock.ANY for it
|
||||
unshelve_instance.assert_called_once_with(
|
||||
|
@ -1100,7 +1100,7 @@ class _BaseTaskTestCase(object):
|
|||
scheduler_utils.build_request_spec(self.context, 'fake_image',
|
||||
mox.IgnoreArg()).AndReturn('req_spec')
|
||||
self.conductor_manager._schedule_instances(self.context,
|
||||
'req_spec', filter_properties).AndReturn(
|
||||
'req_spec', filter_properties, [instance.uuid]).AndReturn(
|
||||
[{'host': 'fake_host',
|
||||
'nodename': 'fake_node',
|
||||
'limits': {}}])
|
||||
|
@ -1183,7 +1183,7 @@ class _BaseTaskTestCase(object):
|
|||
scheduler_utils.build_request_spec(self.context, None,
|
||||
mox.IgnoreArg()).AndReturn('req_spec')
|
||||
self.conductor_manager._schedule_instances(self.context,
|
||||
'req_spec', filter_properties).AndReturn(
|
||||
'req_spec', filter_properties, [instance.uuid]).AndReturn(
|
||||
[{'host': 'fake_host',
|
||||
'nodename': 'fake_node',
|
||||
'limits': {}}])
|
||||
|
@ -1230,6 +1230,7 @@ class _BaseTaskTestCase(object):
|
|||
request_spec = {}
|
||||
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
|
||||
fake_spec = objects.RequestSpec()
|
||||
inst_uuids = [inst_obj.uuid]
|
||||
with test.nested(
|
||||
mock.patch.object(self.conductor_manager.compute_rpcapi,
|
||||
'rebuild_instance'),
|
||||
|
@ -1250,7 +1251,8 @@ class _BaseTaskTestCase(object):
|
|||
**rebuild_args)
|
||||
fp_mock.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec,
|
||||
inst_uuids)
|
||||
compute_args['host'] = expected_host
|
||||
rebuild_mock.assert_called_once_with(self.context,
|
||||
instance=inst_obj,
|
||||
|
@ -1285,7 +1287,8 @@ class _BaseTaskTestCase(object):
|
|||
**rebuild_args)
|
||||
fp_mock.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec,
|
||||
[inst_obj.uuid])
|
||||
self.assertFalse(rebuild_mock.called)
|
||||
|
||||
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
|
||||
|
@ -1393,7 +1396,7 @@ class _BaseTaskTestCase(object):
|
|||
fp_mock.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
select_dest_mock.assert_called_once_with(self.context,
|
||||
augmented_spec)
|
||||
augmented_spec, [inst_obj.uuid])
|
||||
compute_args['host'] = expected_host
|
||||
rebuild_mock.assert_called_once_with(self.context,
|
||||
instance=inst_obj,
|
||||
|
@ -2179,7 +2182,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
|
||||
legacy_filter_props)
|
||||
select_dest_mock.assert_called_once_with(
|
||||
self.context, fake_spec)
|
||||
self.context, fake_spec, [inst_obj.uuid])
|
||||
prep_resize_mock.assert_called_once_with(
|
||||
self.context, inst_obj, legacy_request_spec['image'],
|
||||
flavor, hosts[0]['host'], [resvs],
|
||||
|
@ -2278,8 +2281,9 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
scheduler_utils.build_request_spec(self.context, image,
|
||||
mox.IgnoreArg()).AndReturn(spec)
|
||||
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
|
||||
inst_uuids = [inst.uuid for inst in instances]
|
||||
self.conductor_manager._schedule_instances(self.context,
|
||||
spec, filter_properties).AndReturn(
|
||||
spec, filter_properties, inst_uuids).AndReturn(
|
||||
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
|
||||
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
|
||||
instances[0].save().AndRaise(
|
||||
|
|
|
@ -16,6 +16,7 @@ from nova import context
|
|||
from nova import objects
|
||||
from nova.scheduler.client import query
|
||||
from nova import test
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
|
||||
|
||||
class SchedulerQueryClientTestCase(test.NoDBTestCase):
|
||||
|
@ -32,12 +33,14 @@ class SchedulerQueryClientTestCase(test.NoDBTestCase):
|
|||
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
|
||||
def test_select_destinations(self, mock_select_destinations):
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
self.client.select_destinations(
|
||||
context=self.context,
|
||||
spec_obj=fake_spec
|
||||
spec_obj=fake_spec,
|
||||
instance_uuids=[fake_spec.instance_uuid]
|
||||
)
|
||||
mock_select_destinations.assert_called_once_with(
|
||||
self.context, fake_spec)
|
||||
self.context, fake_spec, [fake_spec.instance_uuid])
|
||||
|
||||
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.update_aggregates')
|
||||
def test_update_aggregates(self, mock_update_aggs):
|
||||
|
|
|
@ -167,5 +167,5 @@ class FakeHostState(host_manager.HostState):
|
|||
|
||||
class FakeScheduler(driver.Scheduler):
|
||||
|
||||
def select_destinations(self, context, request_spec, filter_properties):
|
||||
def select_destinations(self, context, spec_obj, instance_uuids):
|
||||
return []
|
||||
|
|
|
@ -84,7 +84,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.driver.select_destinations,
|
||||
self.context, spec_obj)
|
||||
self.context, spec_obj, [spec_obj.instance_uuid])
|
||||
|
||||
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
|
||||
return_value={'numa_topology': None,
|
||||
|
@ -101,7 +101,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
|
||||
def _test_select_destinations(self, spec_obj):
|
||||
return self.driver.select_destinations(
|
||||
self.context, spec_obj)
|
||||
self.context, spec_obj, [spec_obj.instance_uuid])
|
||||
|
||||
def _get_fake_request_spec(self):
|
||||
# NOTE(sbauza): Prevent to stub the Flavor.get_by_id call just by
|
||||
|
@ -175,8 +175,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
a = timeutils.utcnow()
|
||||
|
||||
for x in range(requests):
|
||||
self.driver.select_destinations(
|
||||
self.context, spec_obj)
|
||||
self.driver.select_destinations(self.context, spec_obj,
|
||||
[spec_obj.instance_uuid])
|
||||
|
||||
b = timeutils.utcnow()
|
||||
c = b - a
|
||||
|
@ -222,7 +222,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
uuids.cell1: host_states_cell1,
|
||||
uuids.cell2: host_states_cell2,
|
||||
}
|
||||
d = self.driver.select_destinations(self.context, spec_obj)
|
||||
d = self.driver.select_destinations(self.context, spec_obj,
|
||||
[spec_obj.instance_uuid])
|
||||
self.assertIn(d[0]['host'], [hs.host for hs in host_states_cell2])
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ from nova import exception
|
|||
from nova import objects
|
||||
from nova.scheduler import chance
|
||||
from nova.tests.unit.scheduler import test_scheduler
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
|
||||
|
||||
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
@ -62,7 +63,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
_return_hosts)
|
||||
|
||||
spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
|
||||
dests = self.driver.select_destinations(self.context, spec_obj)
|
||||
dests = self.driver.select_destinations(self.context, spec_obj,
|
||||
[uuids.instance1, uuids.instance2])
|
||||
|
||||
self.assertEqual(2, len(dests))
|
||||
(host, node) = (dests[0]['host'], dests[0]['nodename'])
|
||||
|
@ -89,6 +91,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
_return_no_host)
|
||||
|
||||
spec_obj = objects.RequestSpec(num_instances=1)
|
||||
spec_obj.instance_uuid = uuids.instance
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.driver.select_destinations, self.context,
|
||||
spec_obj)
|
||||
spec_obj, [spec_obj.instance_uuid])
|
||||
|
|
|
@ -21,6 +21,7 @@ from nova.scheduler import client as scheduler_client
|
|||
from nova.scheduler.client import query as scheduler_query_client
|
||||
from nova.scheduler.client import report as scheduler_report_client
|
||||
from nova import test
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
"""Tests for Scheduler Client."""
|
||||
|
||||
|
||||
|
@ -38,12 +39,15 @@ class SchedulerClientTestCase(test.NoDBTestCase):
|
|||
'select_destinations')
|
||||
def test_select_destinations(self, mock_select_destinations):
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
self.assertIsNone(self.client.queryclient.instance)
|
||||
|
||||
self.client.select_destinations('ctxt', fake_spec)
|
||||
self.client.select_destinations('ctxt', fake_spec,
|
||||
[fake_spec.instance_uuid])
|
||||
|
||||
self.assertIsNotNone(self.client.queryclient.instance)
|
||||
mock_select_destinations.assert_called_once_with('ctxt', fake_spec)
|
||||
mock_select_destinations.assert_called_once_with('ctxt', fake_spec,
|
||||
[fake_spec.instance_uuid])
|
||||
|
||||
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
|
||||
'select_destinations',
|
||||
|
@ -51,7 +55,8 @@ class SchedulerClientTestCase(test.NoDBTestCase):
|
|||
def test_select_destinations_timeout(self, mock_select_destinations):
|
||||
# check if the scheduler service times out properly
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_args = ['ctxt', fake_spec]
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid]]
|
||||
self.assertRaises(messaging.MessagingTimeout,
|
||||
self.client.select_destinations, *fake_args)
|
||||
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)
|
||||
|
@ -62,7 +67,8 @@ class SchedulerClientTestCase(test.NoDBTestCase):
|
|||
def test_select_destinations_timeout_once(self, mock_select_destinations):
|
||||
# scenario: the scheduler service times out & recovers after failure
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_args = ['ctxt', fake_spec]
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid]]
|
||||
self.client.select_destinations(*fake_args)
|
||||
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)
|
||||
|
||||
|
|
|
@ -86,7 +86,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
with mock.patch.object(self.driver.host_manager,
|
||||
'get_filtered_hosts') as mock_get_hosts:
|
||||
mock_get_hosts.side_effect = fake_get_filtered_hosts
|
||||
weighed_hosts = self.driver._schedule(self.context, spec_obj)
|
||||
weighed_hosts = self.driver._schedule(self.context, spec_obj,
|
||||
[uuids.instance])
|
||||
|
||||
self.assertEqual(len(weighed_hosts), 10)
|
||||
for weighed_host in weighed_hosts:
|
||||
|
@ -156,7 +157,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
with mock.patch.object(self.driver.host_manager,
|
||||
'get_filtered_hosts') as mock_get_hosts:
|
||||
mock_get_hosts.side_effect = fake_get_filtered_hosts
|
||||
hosts = self.driver._schedule(self.context, spec_obj)
|
||||
hosts = self.driver._schedule(self.context, spec_obj,
|
||||
[uuids.instance])
|
||||
|
||||
# one host should be chosen
|
||||
self.assertEqual(len(hosts), 1)
|
||||
|
@ -200,7 +202,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
with mock.patch.object(self.driver.host_manager,
|
||||
'get_filtered_hosts') as mock_get_hosts:
|
||||
mock_get_hosts.side_effect = fake_get_filtered_hosts
|
||||
hosts = self.driver._schedule(self.context, spec_obj)
|
||||
hosts = self.driver._schedule(self.context, spec_obj,
|
||||
[uuids.instance])
|
||||
|
||||
# one host should be chosen
|
||||
self.assertEqual(len(hosts), 1)
|
||||
|
@ -255,7 +258,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
with mock.patch.object(self.driver.host_manager,
|
||||
'get_filtered_hosts') as mock_get_hosts:
|
||||
mock_get_hosts.side_effect = fake_get_filtered_hosts
|
||||
hosts = self.driver._schedule(self.context, spec_obj)
|
||||
hosts = self.driver._schedule(self.context, spec_obj,
|
||||
[uuids.instance])
|
||||
|
||||
# one host should be chosen
|
||||
self.assertEqual(1, len(hosts))
|
||||
|
@ -311,10 +315,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
numa_topology=None,
|
||||
instance_group=None)
|
||||
|
||||
instance_uuids = [uuids.instance]
|
||||
with mock.patch.object(self.driver.host_manager,
|
||||
'get_filtered_hosts') as mock_get_hosts:
|
||||
mock_get_hosts.side_effect = fake_get_filtered_hosts
|
||||
dests = self.driver.select_destinations(self.context, spec_obj)
|
||||
dests = self.driver.select_destinations(self.context, spec_obj,
|
||||
instance_uuids)
|
||||
|
||||
(host, node) = (dests[0]['host'], dests[0]['nodename'])
|
||||
self.assertEqual(host, selected_hosts[0])
|
||||
|
@ -332,7 +338,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
spec_obj = objects.RequestSpec(num_instances=1,
|
||||
instance_uuid=uuids.instance)
|
||||
|
||||
self.driver.select_destinations(self.context, spec_obj)
|
||||
self.driver.select_destinations(self.context, spec_obj,
|
||||
[uuids.instance])
|
||||
|
||||
expected = [
|
||||
mock.call(self.context, 'scheduler.select_destinations.start',
|
||||
|
@ -346,7 +353,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
mock_schedule.return_value = []
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.driver.select_destinations, self.context,
|
||||
objects.RequestSpec(num_instances=1))
|
||||
objects.RequestSpec(num_instances=1), [uuids.instance])
|
||||
|
||||
def test_select_destinations_no_valid_host_not_enough(self):
|
||||
# Tests that we have fewer hosts available than number of instances
|
||||
|
@ -356,7 +363,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
return_value=consumed_hosts):
|
||||
try:
|
||||
self.driver.select_destinations(
|
||||
self.context, objects.RequestSpec(num_instances=3))
|
||||
self.context, objects.RequestSpec(num_instances=3),
|
||||
[uuids.instance])
|
||||
self.fail('Expected NoValidHost to be raised.')
|
||||
except exception.NoValidHost as e:
|
||||
# Make sure that we provided a reason why NoValidHost.
|
||||
|
|
|
@ -23,6 +23,7 @@ from nova import context
|
|||
from nova import objects
|
||||
from nova.scheduler import rpcapi as scheduler_rpcapi
|
||||
from nova import test
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
@ -73,7 +74,17 @@ class SchedulerRpcAPITestCase(test.NoDBTestCase):
|
|||
def test_select_destinations(self):
|
||||
fake_spec = objects.RequestSpec()
|
||||
self._test_scheduler_api('select_destinations', rpc_method='call',
|
||||
spec_obj=fake_spec,
|
||||
expected_args={'spec_obj': fake_spec,
|
||||
'instance_uuids': [uuids.instance]},
|
||||
spec_obj=fake_spec, instance_uuids=[uuids.instance],
|
||||
version='4.4')
|
||||
|
||||
def test_select_destinations_4_3(self):
|
||||
self.flags(scheduler='4.3', group='upgrade_levels')
|
||||
fake_spec = objects.RequestSpec()
|
||||
self._test_scheduler_api('select_destinations', rpc_method='call',
|
||||
expected_args={'spec_obj': fake_spec},
|
||||
spec_obj=fake_spec, instance_uuids=[uuids.instance],
|
||||
version='4.3')
|
||||
|
||||
@mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict')
|
||||
|
@ -87,7 +98,7 @@ class SchedulerRpcAPITestCase(test.NoDBTestCase):
|
|||
self._test_scheduler_api('select_destinations', rpc_method='call',
|
||||
expected_args={'request_spec': 'fake_request_spec',
|
||||
'filter_properties': 'fake_prop'},
|
||||
spec_obj=fake_spec,
|
||||
spec_obj=fake_spec, instance_uuids=[uuids.instance],
|
||||
version='4.0')
|
||||
|
||||
def test_update_aggregates(self):
|
||||
|
|
|
@ -31,6 +31,7 @@ from nova import servicegroup
|
|||
from nova import test
|
||||
from nova.tests.unit import fake_server_actions
|
||||
from nova.tests.unit.scheduler import fakes
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
|
||||
|
||||
class SchedulerManagerInitTestCase(test.NoDBTestCase):
|
||||
|
@ -90,22 +91,35 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
|||
self.assertIsInstance(manager.driver, self.driver_cls)
|
||||
|
||||
def test_select_destination(self):
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, spec_obj=fake_spec,
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_called_once_with(None, fake_spec,
|
||||
[fake_spec.instance_uuid])
|
||||
|
||||
def test_select_destination_with_4_3_client(self):
|
||||
fake_spec = objects.RequestSpec()
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, spec_obj=fake_spec)
|
||||
select_destinations.assert_called_once_with(None, fake_spec)
|
||||
select_destinations.assert_called_once_with(None, fake_spec, None)
|
||||
|
||||
# TODO(sbauza): Remove that test once the API v4 is removed
|
||||
@mock.patch.object(objects.RequestSpec, 'from_primitives')
|
||||
def test_select_destination_with_old_client(self, from_primitives):
|
||||
fake_spec = objects.RequestSpec()
|
||||
fake_spec.instance_uuid = uuids.instance
|
||||
from_primitives.return_value = fake_spec
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, request_spec='fake_spec',
|
||||
filter_properties='fake_props')
|
||||
select_destinations.assert_called_once_with(None, fake_spec)
|
||||
filter_properties='fake_props',
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_called_once_with(None, fake_spec,
|
||||
[fake_spec.instance_uuid])
|
||||
|
||||
def test_update_aggregates(self):
|
||||
with mock.patch.object(self.manager.driver.host_manager,
|
||||
|
|
Loading…
Reference in New Issue