Modify select_destinations() to return objects and alts

This changes select_destinations() on the scheduler side to optionally
return Selection objects and alternates. The RPC signature doesn't
change in this patch, though, so everything on the conductor side
remains unchanged. The next patch in the series will make the actual RPC
change.

Blueprint: return-alternate-hosts

Change-Id: I03b95a2106624c2ea24835814ca38e954ec7a997
This commit is contained in:
Ed Leafe 2017-09-26 23:12:11 +00:00
parent 526934eafd
commit e7152eef7b
17 changed files with 412 additions and 118 deletions

View File

@ -1776,6 +1776,12 @@ class LiveMigrationWithOldNovaNotSupported(NovaException):
"upgrade to be complete before it is available.")
class SelectionObjectsWithOldRPCVersionNotSupported(NovaException):
msg_fmt = _("Requests for Selection objects with alternates are not "
"supported in select_destinations() before RPC version 4.5; "
"version %(version)s requested.")
class LiveMigrationURINotAvailable(NovaException):
msg_fmt = _('No live migration URI configured and no default available '
'for "%(virt_type)s" hypervisor virtualization type.')

View File

@ -51,7 +51,8 @@ class ChanceScheduler(driver.Scheduler):
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, spec_obj, instance_uuids):
def _schedule(self, context, topic, spec_obj, instance_uuids,
return_alternates=False):
"""Picks a host that is up at random."""
elevated = context.elevated()
@ -87,7 +88,10 @@ class ChanceScheduler(driver.Scheduler):
# We can't return dupes as alternates, since alternates are used when
# building to the selected host fails.
alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)
if return_alternates:
alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)
else:
alts_per_instance = 0
for sel_host in selected_hosts:
selection = objects.Selection.from_host_state(sel_host)
sel_plus_alts = [selection]
@ -105,8 +109,10 @@ class ChanceScheduler(driver.Scheduler):
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Selects random destinations. Returns a list of HostState objects."""
allocation_request_version=None, return_alternates=False):
"""Selects random destinations. Returns a list of list of Selection
objects.
"""
num_instances = spec_obj.num_instances
# TODO(danms): This needs to be extended to support multiple cells
# and limiting the destination scope to a single requested cell

View File

@ -47,9 +47,10 @@ class SchedulerClient(object):
'nova.scheduler.client.report.SchedulerReportClient'))
@utils.retry_select_destinations
def select_destinations(self, context, spec_obj, instance_uuids):
def select_destinations(self, context, spec_obj, instance_uuids,
return_objects=False, return_alternates=False):
return self.queryclient.select_destinations(context, spec_obj,
instance_uuids)
instance_uuids, return_objects, return_alternates)
def update_aggregates(self, context, aggregates):
self.queryclient.update_aggregates(context, aggregates)

View File

@ -22,15 +22,24 @@ class SchedulerQueryClient(object):
def __init__(self):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
def select_destinations(self, context, spec_obj, instance_uuids):
def select_destinations(self, context, spec_obj, instance_uuids,
return_objects=False, return_alternates=False):
"""Returns destinations(s) best suited for this request_spec and
filter_properties.
The result should be a list of dicts with 'host', 'nodename' and
'limits' as keys.
When return_objects is False, the result will be the "old-style" list
of dicts with 'host', 'nodename' and 'limits' as keys. The value of
return_alternates is ignored.
When return_objects is True, the result will be a list of lists of
Selection objects, with one list per instance. Each instance's list
will contain a Selection representing the selected (and claimed) host,
and, if return_alternates is True, zero or more Selection objects that
represent alternate hosts. The number of alternates returned depends on
the configuration setting `CONF.scheduler.max_attempts`.
"""
return self.scheduler_rpcapi.select_destinations(context, spec_obj,
instance_uuids)
instance_uuids, return_objects, return_alternates)
def update_aggregates(self, context, aggregates):
"""Updates HostManager internal aggregates information.

View File

@ -63,9 +63,8 @@ class Scheduler(object):
@abc.abstractmethod
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Returns a list of Selection objects that have been chosen by the
scheduler driver, one for each requested instance
(spec_obj.num_instances)
allocation_request_version=None, return_alternates=False):
"""Returns a list of lists of Selection objects that have been chosen
by the scheduler driver, one for each requested instance.
"""
return []

View File

@ -47,9 +47,9 @@ class FilterScheduler(driver.Scheduler):
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
allocation_request_version=None, return_alternates=False):
"""Returns a list of lists of Selection objects, which represent the
selected hosts and alternates for each instance.
hosts and (optionally) alternates for each instance.
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
@ -76,6 +76,11 @@ class FilterScheduler(driver.Scheduler):
resources.
:param allocation_request_version: The microversion used to request the
allocations.
:param return_alternates: When True, zero or more alternate hosts are
returned with each selected host. The number
of alternates is determined by the
configuration option
`CONF.scheduler.max_attempts`.
"""
self.notifier.info(
context, 'scheduler.select_destinations.start',
@ -83,7 +88,7 @@ class FilterScheduler(driver.Scheduler):
host_selections = self._schedule(context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version)
allocation_request_version, return_alternates)
self.notifier.info(
context, 'scheduler.select_destinations.end',
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
@ -91,7 +96,7 @@ class FilterScheduler(driver.Scheduler):
def _schedule(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
allocation_request_version=None, return_alternates=False):
"""Returns a list of lists of Selection objects.
:param context: The RequestContext object
@ -118,6 +123,11 @@ class FilterScheduler(driver.Scheduler):
resources.
:param allocation_request_version: The microversion used to request the
allocations.
:param return_alternates: When True, zero or more alternate hosts are
returned with each selected host. The number
of alternates is determined by the
configuration option
`CONF.scheduler.max_attempts`.
"""
elevated = context.elevated()
@ -148,7 +158,7 @@ class FilterScheduler(driver.Scheduler):
# is based on CONF.scheduler.max_attempts; note that if there are not
# enough filtered hosts to provide the full number of alternates, the
# list of hosts may be shorter than this amount.
num_alts = CONF.scheduler.max_attempts
num_alts = CONF.scheduler.max_attempts if return_alternates else 0
if (instance_uuids is None or
not self.USES_ALLOCATION_CANDIDATES or
@ -162,8 +172,6 @@ class FilterScheduler(driver.Scheduler):
# is None, that indicates an older conductor, so we need to return
# the objects without alternates. They will be converted back to
# the older dict format representing HostState objects.
if instance_uuids is None:
num_alts = 0
return self._legacy_find_hosts(num_instances, spec_obj, hosts,
num_alts)

View File

@ -48,7 +48,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
target = messaging.Target(version='4.4')
target = messaging.Target(version='4.5')
_sentinel = object()
@ -82,13 +82,30 @@ class SchedulerManager(manager.Manager):
self.driver.run_periodic_tasks(context)
@messaging.expected_exceptions(exception.NoValidHost)
def select_destinations(self, ctxt,
request_spec=None, filter_properties=None,
spec_obj=_sentinel, instance_uuids=None):
def select_destinations(self, ctxt, request_spec=None,
filter_properties=None, spec_obj=_sentinel, instance_uuids=None,
return_objects=False, return_alternates=False):
"""Returns destinations(s) best suited for this RequestSpec.
The result should be a list of dicts with 'host', 'nodename' and
'limits' as keys.
Starting in Queens, this method returns a list of lists of Selection
objects, with one list for each requested instance. Each instance's
list will have its first element be the Selection object representing
the chosen host for the instance, and if return_alternates is True,
zero or more alternate objects that could also satisfy the request. The
number of alternates is determined by the configuration option
`CONF.scheduler.max_attempts`.
The ability of a calling method to handle this format of returned
destinations is indicated by a True value in the parameter
`return_objects`. However, there may still be some older conductors in
a deployment that have not been updated to Queens, and in that case
return_objects will be False, and the result will be a list of dicts
with 'host', 'nodename' and 'limits' as keys. When return_objects is
False, the value of return_alternates has no effect. The reason there
are two kwarg parameters return_objects and return_alternates is so we
can differentiate between callers that understand the Selection object
format but *don't* want to get alternate hosts, as is the case with the
conductors that handle certain move operations.
"""
LOG.debug("Starting to schedule for instances: %s", instance_uuids)
@ -127,15 +144,22 @@ class SchedulerManager(manager.Manager):
rp_uuid = rr['resource_provider']['uuid']
alloc_reqs_by_rp_uuid[rp_uuid].append(ar)
# Only return alteranates if both return_objects and return_alternates
# are True.
return_alternates = return_alternates and return_objects
selections = self.driver.select_destinations(ctxt, spec_obj,
instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version)
# We don't want to change the return value in this patch, as it
# involves an RPC change. So convert the list of lists of Selection
# objects to a list of host state dicts, which is what the calling
# method expects.
selection_dicts = [sel[0].to_dict() for sel in selections]
return jsonutils.to_primitive(selection_dicts)
allocation_request_version, return_alternates)
# If `return_objects` is False, we need to convert the selections to
# the older format, which is a list of host state dicts.
# NOTE(edleafe): since the RPC calling side is not yet updated in this
# patch, return_objects will always be False. This prevents sending the
# new Selection objects back until a later patch where the calling RPC
# will be changed.
if not return_objects:
selection_dicts = [sel[0].to_dict() for sel in selections]
return jsonutils.to_primitive(selection_dicts)
return selections
def update_aggregates(self, ctxt, aggregates):
"""Updates HostManager internal aggregates information.

View File

@ -19,6 +19,7 @@ Client side of the scheduler manager RPC API.
import oslo_messaging as messaging
import nova.conf
from nova import exception as exc
from nova.objects import base as objects_base
from nova import profiler
from nova import rpc
@ -97,6 +98,13 @@ class SchedulerAPI(object):
* 4.4 - Modify select_destinations() signature by providing the
instance_uuids for the request.
... Pike supports message version 4.4. So any changes to existing
methods in 4.x after that point should be done such
that they can handle the version_cap being set to 4.4.
* 4.5 - Modify select_destinations() to optionally return a list of
lists of Selection objects, along with zero or more alternates.
'''
VERSION_ALIASES = {
@ -121,10 +129,22 @@ class SchedulerAPI(object):
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
def select_destinations(self, ctxt, spec_obj, instance_uuids):
version = '4.4'
def select_destinations(self, ctxt, spec_obj, instance_uuids,
return_objects=False, return_alternates=False):
# Modify the parameters if an older version is requested
version = '4.5'
msg_args = {'instance_uuids': instance_uuids,
'spec_obj': spec_obj}
'spec_obj': spec_obj,
'return_objects': return_objects,
'return_alternates': return_alternates}
if not self.client.can_send_version(version):
if msg_args['return_objects'] or msg_args['return_alternates']:
# The client is requesting an RPC version we can't support.
raise exc.SelectionObjectsWithOldRPCVersionNotSupported(
version=self.client.version_cap)
del msg_args['return_objects']
del msg_args['return_alternates']
version = '4.4'
if not self.client.can_send_version(version):
del msg_args['instance_uuids']
version = '4.3'

View File

@ -483,20 +483,21 @@ def build_filter_properties(scheduler_hints, forced_host,
return filter_properties
def populate_filter_properties(filter_properties, host_state):
def populate_filter_properties(filter_properties, selection):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
if isinstance(host_state, dict):
if isinstance(selection, dict):
# TODO(edleafe): remove support for dicts
host = host_state['host']
nodename = host_state['nodename']
limits = host_state['limits']
host = selection['host']
nodename = selection['nodename']
limits = selection['limits']
else:
host = host_state.host
nodename = host_state.nodename
limits = host_state.limits
host = selection.service_host
nodename = selection.nodename
limits = selection.limits if "limits" in selection else {}
# 'limits' can also be None, so handle that as an empty dict
limits = limits or {}
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)

View File

@ -32,6 +32,20 @@ class SchedulerQueryClientTestCase(test.NoDBTestCase):
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_select_destinations(self, mock_select_destinations):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
self.client.select_destinations(
context=self.context,
spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid],
return_objects=True,
return_alternates=True,
)
mock_select_destinations.assert_called_once_with(self.context,
fake_spec, [fake_spec.instance_uuid], True, True)
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_select_destinations_old_call(self, mock_select_destinations):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
self.client.select_destinations(
@ -39,8 +53,8 @@ class SchedulerQueryClientTestCase(test.NoDBTestCase):
spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid]
)
mock_select_destinations.assert_called_once_with(
self.context, fake_spec, [fake_spec.instance_uuid])
mock_select_destinations.assert_called_once_with(self.context,
fake_spec, [fake_spec.instance_uuid], False, False)
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.update_aggregates')
def test_update_aggregates(self, mock_update_aggs):

View File

@ -283,7 +283,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, None, None)
instance_uuids, None, None, return_alternates=True)
# There should be max_attempts hosts per instance (1 selected, 2 alts)
self.assertEqual(total_returned, len(dests[0]))
self.assertEqual(total_returned, len(dests[1]))

View File

@ -100,7 +100,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
expected = attempts
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid])
spec_obj, [spec_obj.instance_uuid], return_alternates=True)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
@ -112,7 +112,18 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
expected = len(hosts)
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid])
spec_obj, [spec_obj.instance_uuid], return_alternates=True)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
# Now verify that if we pass False for return_alternates, that we only
# get one host in the host_list.
attempts = 5
expected = 1
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid], return_alternates=False)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
@ -131,7 +142,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
attempts = 2
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, instance_uuids)
spec_obj, instance_uuids, return_alternates=True)
self.assertEqual(num_instances, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(attempts, len(host_list))

View File

@ -47,7 +47,7 @@ class SchedulerClientTestCase(test.NoDBTestCase):
self.assertIsNotNone(self.client.queryclient.instance)
mock_select_destinations.assert_called_once_with('ctxt', fake_spec,
[fake_spec.instance_uuid])
[fake_spec.instance_uuid], False, False)
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
'select_destinations',
@ -56,7 +56,8 @@ class SchedulerClientTestCase(test.NoDBTestCase):
# check if the scheduler service times out properly
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid]]
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid], False,
False]
self.assertRaises(messaging.MessagingTimeout,
self.client.select_destinations, *fake_args)
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)
@ -68,7 +69,8 @@ class SchedulerClientTestCase(test.NoDBTestCase):
# scenario: the scheduler service times out & recovers after failure
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid]]
fake_args = ['ctxt', fake_spec, [fake_spec.instance_uuid], False,
False]
self.client.select_destinations(*fake_args)
mock_select_destinations.assert_has_calls([mock.call(*fake_args)] * 2)

View File

@ -184,51 +184,27 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
project_id=uuids.project_id,
instance_group=None)
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
host_state3 = mock.Mock(spec=host_manager.HostState,
host="fake_host3", nodename="fake_node3", uuid=uuids.cn3,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state1, host_state2, host_state3]
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
cell_uuid=uuids.cell1, limits={})
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance]
fake_alloc1 = {"allocations": [
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc3 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn3},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc1],
uuids.cn2: [fake_alloc2], uuids.cn3: [fake_alloc3]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)
sel1 = objects.Selection.from_host_state(host_state1)
sel2 = objects.Selection.from_host_state(host_state2,
allocation_request=fake_alloc2)
sel3 = objects.Selection.from_host_state(host_state3,
allocation_request=fake_alloc3)
expected_selection = [[sel1, sel2, sel3]]
expected_selection = [[objects.Selection.from_host_state(host_state)]]
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
@ -241,7 +217,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(expected_selection, selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state1.consume_from_request.assert_called_once_with(spec_obj)
host_state.consume_from_request.assert_called_once_with(spec_obj)
def test_schedule_successful_claim(self):
self._test_schedule_successful_claim()
@ -357,6 +333,134 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# Ensure we cleaned up the first successfully-claimed instance
mock_cleanup.assert_called_once_with([uuids.instance1])
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_selection_alloc_requests_for_alts(self, mock_get_hosts,
mock_get_all_states, mock_claim):
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
fake_alloc0 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn0},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc1 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0],
uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries,
return_alternates=True)
sel0 = objects.Selection.from_host_state(host_state0)
sel1 = objects.Selection.from_host_state(host_state1,
allocation_request=fake_alloc1)
sel2 = objects.Selection.from_host_state(host_state2,
allocation_request=fake_alloc2)
expected_selection = [[sel0, sel1, sel2]]
self.assertEqual(expected_selection, selected_hosts)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_selection_alloc_requests_no_alts(self, mock_get_hosts,
mock_get_all_states, mock_claim):
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
fake_alloc0 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn0},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc1 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0],
uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries,
return_alternates=False)
sel0 = objects.Selection.from_host_state(host_state0)
expected_selection = [[sel0]]
self.assertEqual(expected_selection, selected_hosts)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
@ -633,16 +737,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
host_state = host_manager.HostState('host', 'node', uuids.cell)
host_state.limits['vcpu'] = 5
selection = objects.Selection(service_host="host", nodename="node",
cell_uuid=uuids.cell)
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
selection)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpu': 5}, host_state.limits)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
def test_select_destinations_match_num_instances(self, mock_schedule):
@ -666,7 +767,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
@ -694,7 +795,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
@ -772,7 +873,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None)
instance_uuids, alloc_reqs, None, return_alternates=True)
self.assertEqual(num_instances, len(dests))
# Filtering and weighing hosts should be called num_instances + 1 times
# unless num_instances == 1.
@ -842,7 +943,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None)
instance_uuids, alloc_reqs, None, return_alternates=True)
# There should be max_attempts hosts per instance (1 selected, 2 alts)
self.assertEqual(total_returned, len(dests[0]))
self.assertEqual(total_returned, len(dests[1]))
@ -892,7 +993,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None)
instance_uuids, alloc_reqs, None, return_alternates=True)
self.assertEqual(num_instances, len(dests))
selected_hosts = [dest[0] for dest in dests]
# The number returned for each destination should be the less of the

View File

@ -20,6 +20,7 @@ import mock
from oslo_config import cfg
from nova import context
from nova import exception as exc
from nova import objects
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
@ -76,9 +77,19 @@ class SchedulerRpcAPITestCase(test.NoDBTestCase):
fake_spec = objects.RequestSpec()
self._test_scheduler_api('select_destinations', rpc_method='call',
expected_args={'spec_obj': fake_spec,
'instance_uuids': [uuids.instance]},
'instance_uuids': [uuids.instance], 'return_objects': True,
'return_alternates': True},
spec_obj=fake_spec, instance_uuids=[uuids.instance],
version='4.4')
return_objects=True, return_alternates=True, version='4.5')
def test_select_destinations_4_4(self):
self.flags(scheduler='4.4', group='upgrade_levels')
fake_spec = objects.RequestSpec()
self._test_scheduler_api('select_destinations', rpc_method='call',
expected_args={'spec_obj': fake_spec,
'instance_uuids': [uuids.instance]}, spec_obj=fake_spec,
instance_uuids=[uuids.instance], return_objects=False,
return_alternates=False, version='4.4')
def test_select_destinations_4_3(self):
self.flags(scheduler='4.3', group='upgrade_levels')
@ -86,7 +97,22 @@ class SchedulerRpcAPITestCase(test.NoDBTestCase):
self._test_scheduler_api('select_destinations', rpc_method='call',
expected_args={'spec_obj': fake_spec},
spec_obj=fake_spec, instance_uuids=[uuids.instance],
version='4.3')
return_alternates=False, version='4.3')
def test_select_destinations_old_with_new_params(self):
self.flags(scheduler='4.4', group='upgrade_levels')
fake_spec = objects.RequestSpec()
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = scheduler_rpcapi.SchedulerAPI()
self.assertRaises(exc.SelectionObjectsWithOldRPCVersionNotSupported,
rpcapi.select_destinations, ctxt, fake_spec, ['fake_uuids'],
return_objects=True, return_alternates=True)
self.assertRaises(exc.SelectionObjectsWithOldRPCVersionNotSupported,
rpcapi.select_destinations, ctxt, fake_spec, ['fake_uuids'],
return_objects=False, return_alternates=True)
self.assertRaises(exc.SelectionObjectsWithOldRPCVersionNotSupported,
rpcapi.select_destinations, ctxt, fake_spec, ['fake_uuids'],
return_objects=True, return_alternates=False)
@mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict')
@mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict')

View File

@ -123,9 +123,67 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, fake_version)
mock.sentinel.p_sums, fake_version, False)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
# Now call select_destinations() with True values for the params
# introduced in RPC version 4.5
select_destinations.reset_mock()
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid],
return_objects=True, return_alternates=True)
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, fake_version, True)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination_return_objects(self, mock_get_ac,
mock_rfrs):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
fake_version = "9.42"
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums, fake_version)
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
for x, cn in enumerate(fakes.COMPUTE_NODES)
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
sel_obj = objects.Selection(service_host="fake_host",
nodename="fake_node", compute_node_uuid=uuids.compute_node,
cell_uuid=uuids.cell, limits=None)
select_destinations.return_value = [[sel_obj]]
# Pass True; should get the Selection object back.
dests = self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid],
return_objects=True, return_alternates=True)
sel_host = dests[0][0]
self.assertIsInstance(sel_host, objects.Selection)
# Since both return_objects and return_alternates are True, the
# driver should have been called with True for return_alternates.
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, fake_version, True)
# Now pass False for return objects, but keep return_alternates as
# True. Verify that the manager converted the Selection object back
# to a dict.
select_destinations.reset_mock()
dests = self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid],
return_objects=False, return_alternates=True)
sel_host = dests[0]
self.assertIsInstance(sel_host, dict)
# Even though return_alternates was passed as True, since
# return_objects was False, the driver should have been called with
# return_alternates as False.
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, fake_version, False)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
@ -184,7 +242,8 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec)
select_destinations.assert_called_once_with(None, fake_spec, None,
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0")
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0",
False)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
# TODO(sbauza): Remove that test once the API v4 is removed
@ -210,7 +269,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, "42.0")
mock.sentinel.p_sums, "42.0", False)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
def test_update_aggregates(self):

View File

@ -146,10 +146,11 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
self.assertNotIn('forced_host', filt_props)
self.assertNotIn('forced_node', filt_props)
def _test_populate_filter_props(self, host_state_obj=True,
def _test_populate_filter_props(self, selection_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
force_nodes=None,
no_limits=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
@ -168,30 +169,33 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
if no_limits:
fake_limits = None
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
fake_limits = objects.SchedulerLimits(vcpu=1, disk_gb=2,
memory_mb=3, numa_topology=None)
selection = objects.Selection(service_host="fake-host",
nodename="fake-node", limits=fake_limits)
if not selection_obj:
selection = selection.to_dict()
fake_limits = fake_limits.to_dict()
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
selection)
enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
selection)
if force_hosts:
expected_limits = None
elif no_limits:
expected_limits = {}
else:
expected_limits = 'fake-limits'
expected_limits = fake_limits
self.assertEqual(expected_limits,
filter_properties.get('limits'))
@ -207,7 +211,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
self._test_populate_filter_props(selection_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
@ -226,6 +230,9 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
self._test_populate_filter_props(force_nodes=['force-node1',
'force-node2'])
def test_populate_filter_props_no_limits(self):
self._test_populate_filter_props(no_limits=True)
def test_populate_retry_exception_at_max_attempts(self):
self.flags(max_attempts=2, group='scheduler')
msg = 'The exception text was preserved!'