Merge "add dict of allocation requests to select_dests()"

This commit is contained in:
Jenkins 2017-07-20 11:00:00 +00:00 committed by Gerrit Code Review
commit f6515e7383
8 changed files with 145 additions and 45 deletions

View File

@ -59,7 +59,7 @@ class ChanceScheduler(driver.Scheduler):
return random.choice(hosts)
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
alloc_reqs_by_rp_uuid, provider_summaries):
"""Selects random destinations."""
num_instances = spec_obj.num_instances
# NOTE(timello): Returns a list of dicts with 'host', 'nodename' and

View File

@ -41,9 +41,33 @@ class FilterScheduler(driver.Scheduler):
self.notifier = rpc.get_notifier('scheduler')
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
alloc_reqs_by_rp_uuid, provider_summaries):
"""Returns a sorted list of HostState objects that satisfy the
supplied request_spec.
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
:param instance_uuids: List of UUIDs, one for each value of the spec
object's num_instances attribute
:param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
UUID, of the allocation requests that may
be used to claim resources against
matched hosts. If None, indicates either
the placement API wasn't reachable or
that there were no allocation requests
returned by the placement API. If the
latter, the provider_summaries will be an
empty dict, not None.
:param provider_summaries: Optional dict, keyed by resource provider
UUID, of information that will be used by
the filters/weighers in selecting matching
hosts for a request. If None, indicates that
the scheduler driver should grab all compute
node information locally and that the
Placement API is not used. If an empty dict,
indicates the Placement API returned no
potential matches for the requested
resources.
"""
self.notifier.info(
context, 'scheduler.select_destinations.start',
@ -51,7 +75,7 @@ class FilterScheduler(driver.Scheduler):
num_instances = spec_obj.num_instances
selected_hosts = self._schedule(context, spec_obj, instance_uuids,
provider_summaries)
alloc_reqs_by_rp_uuid, provider_summaries)
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
@ -79,9 +103,34 @@ class FilterScheduler(driver.Scheduler):
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
return selected_hosts
def _schedule(self, context, spec_obj, instance_uuids, provider_summaries):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
def _schedule(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries):
"""Returns a list of hosts that meet the required specs, ordered by
their fitness.
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
:param instance_uuids: List of UUIDs, one for each value of the spec
object's num_instances attribute
:param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
UUID, of the allocation requests that may
be used to claim resources against
matched hosts. If None, indicates either
the placement API wasn't reachable or
that there were no allocation requests
returned by the placement API. If the
latter, the provider_summaries will be an
empty dict, not None.
:param provider_summaries: Optional dict, keyed by resource provider
UUID, of information that will be used by
the filters/weighers in selecting matching
hosts for a request. If None, indicates that
the scheduler driver should grab all compute
node information locally and that the
Placement API is not used. If an empty dict,
indicates the Placement API returned no
potential matches for the requested
resources.
"""
elevated = context.elevated()

View File

@ -19,6 +19,8 @@
Scheduler Service
"""
import collections
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@ -116,28 +118,40 @@ class SchedulerManager(manager.Manager):
request_spec,
filter_properties)
resources = utils.resources_from_request_spec(spec_obj)
alloc_reqs, p_sums = None, None
alloc_reqs_by_rp_uuid, provider_summaries = None, None
if self.driver.USES_ALLOCATION_CANDIDATES:
res = self.placement_client.get_allocation_candidates(resources)
# We have to handle the case that we failed to connect to the
# Placement service and the safe_connect decorator on
# get_allocation_candidates returns None.
alloc_reqs, p_sums = res if res is not None else (None, None)
if res is None:
# We have to handle the case that we failed to connect to the
# Placement service and the safe_connect decorator on
# get_allocation_candidates returns None.
alloc_reqs, provider_summaries = None, None
else:
alloc_reqs, provider_summaries = res
if not alloc_reqs:
LOG.debug("Got no allocation candidates from the Placement "
"API. This may be a temporary occurrence as compute "
"nodes start up and begin reporting inventory to "
"the Placement service.")
# TODO(jaypipes): Setting p_sums to None triggers the scheduler
# to load all compute nodes to do scheduling "the old way".
# Really, we should raise NoValidHosts here, but all functional
# tests will fall over if we do that without changing the
# PlacementFixture to load compute node inventory into the
# placement database before starting functional tests.
p_sums = None
# TODO(jaypipes): Setting provider_summaries to None triggers
# the scheduler to load all compute nodes to do scheduling "the
# old way". Really, we should raise NoValidHosts here, but all
# functional tests will fall over if we do that without
# changing the PlacementFixture to load compute node inventory
# into the placement database before starting functional tests.
provider_summaries = None
else:
# Build a dict of lists of allocation requests, keyed by
# provider UUID, so that when we attempt to claim resources for
# a host, we can grab an allocation request easily
alloc_reqs_by_rp_uuid = collections.defaultdict(list)
for ar in alloc_reqs:
for rr in ar['allocations']:
rp_uuid = rr['resource_provider']['uuid']
alloc_reqs_by_rp_uuid[rp_uuid].append(ar)
dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
p_sums)
alloc_reqs_by_rp_uuid, provider_summaries)
dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
return jsonutils.to_primitive(dest_dicts)

View File

@ -127,6 +127,23 @@ COMPUTE_NODES = [
host='fake', hypervisor_hostname='fake-hyp'),
]
ALLOC_REQS = [
{
'allocations': [
{
'resource_provider': {
'uuid': cn.uuid,
},
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 512,
},
},
]
} for cn in COMPUTE_NODES
]
RESOURCE_PROVIDERS = [
dict(
uuid=uuidsentinel.rp1,

View File

@ -87,7 +87,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations,
self.context, spec_obj, [spec_obj.instance_uuid],
{})
{}, {})
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
@ -103,13 +103,14 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(result[0].host, fake_host.host)
def _test_select_destinations(self, spec_obj):
p_sums = {}
provider_summaries = {}
for cell_hosts in self.driver.all_host_states.values():
for hs in cell_hosts:
p_sums[hs.uuid] = hs
provider_summaries[hs.uuid] = hs
return self.driver.select_destinations(
self.context, spec_obj, [spec_obj.instance_uuid], p_sums)
self.context, spec_obj, [spec_obj.instance_uuid], {},
provider_summaries)
def _get_fake_request_spec(self):
# NOTE(sbauza): Prevent to stub the Flavor.get_by_id call just by
@ -179,14 +180,14 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = self._get_fake_host_state(x)
host_states.append(host_state)
self.driver.all_host_states = {uuids.cell: host_states}
p_sums = {hs.uuid: hs for hs in host_states}
provider_summaries = {hs.uuid: hs for hs in host_states}
def run_test():
a = timeutils.utcnow()
for x in range(requests):
self.driver.select_destinations(self.context, spec_obj,
[spec_obj.instance_uuid], p_sums)
[spec_obj.instance_uuid], {}, provider_summaries)
b = timeutils.utcnow()
c = b - a
@ -232,12 +233,12 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuids.cell1: host_states_cell1,
uuids.cell2: host_states_cell2,
}
p_sums = {
provider_summaries = {
cn.uuid: cn for cn in host_states_cell1 + host_states_cell2
}
d = self.driver.select_destinations(self.context, spec_obj,
[spec_obj.instance_uuid], p_sums)
[spec_obj.instance_uuid], {}, provider_summaries)
self.assertIn(d[0].host, [hs.host for hs in host_states_cell2])

View File

@ -64,7 +64,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
dests = self.driver.select_destinations(self.context, spec_obj,
[uuids.instance1, uuids.instance2],
[uuids.instance1, uuids.instance2], {},
mock.sentinel.p_sums)
self.assertEqual(2, len(dests))
@ -95,5 +95,5 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
spec_obj.instance_uuid = uuids.instance
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
spec_obj, [spec_obj.instance_uuid],
spec_obj, [spec_obj.instance_uuid], {},
mock.sentinel.p_sums)

View File

@ -57,7 +57,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_uuids = [uuids.instance]
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj,
instance_uuids, mock.sentinel.provider_summaries)
instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@ -105,8 +106,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
]
ctx = mock.Mock()
self.driver._schedule(ctx, spec_obj,
instance_uuids, mock.sentinel.provider_summaries)
self.driver._schedule(ctx, spec_obj, instance_uuids,
mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries)
# Check that _get_sorted_hosts() is called twice and that the
# second time, we pass it the hosts that were returned from
@ -262,10 +264,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_schedule.return_value = [mock.sentinel.hs1]
dests = self.driver.select_destinations(self.context, spec_obj,
mock.sentinel.instance_uuids, mock.sentinel.p_sums)
mock.sentinel.instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock_schedule.assert_called_once_with(self.context, spec_obj,
mock.sentinel.instance_uuids, mock.sentinel.p_sums)
mock.sentinel.instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
self.assertEqual([mock.sentinel.hs1], dests)
@ -290,7 +294,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context, spec_obj,
mock.sentinel.instance_uuids, mock.sentinel.p_sums)
mock.sentinel.instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
# Verify that the host state object has been marked as not updated so
# it's picked up in the next pull from the DB for compute node objects
@ -309,7 +314,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_uuid=uuids.instance)
self.driver.select_destinations(self.context, spec_obj,
[uuids.instance], {})
[uuids.instance], {}, None)
expected = [
mock.call(self.context, 'scheduler.select_destinations.start',

View File

@ -99,14 +99,19 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
def test_select_destination(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
for x, cn in enumerate(fakes.COMPUTE_NODES)
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], mock.sentinel.p_sums)
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -127,7 +132,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], None)
[fake_spec.instance_uuid], None, None)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -149,7 +154,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.context, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(
self.context, fake_spec, [fake_spec.instance_uuid], None)
self.context, fake_spec, [fake_spec.instance_uuid], None, None)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -171,7 +176,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], None)
[fake_spec.instance_uuid], None, None)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -179,13 +184,17 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
'get_allocation_candidates')
def test_select_destination_with_4_3_client(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
for x, cn in enumerate(fakes.COMPUTE_NODES)
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec)
select_destinations.assert_called_once_with(None, fake_spec, None,
mock.sentinel.p_sums)
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
# TODO(sbauza): Remove that test once the API v4 is removed
@ -198,15 +207,20 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
from_primitives.return_value = fake_spec
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
for x, cn in enumerate(fakes.COMPUTE_NODES)
}
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, request_spec='fake_spec',
filter_properties='fake_props',
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], mock.sentinel.p_sums)
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
def test_update_aggregates(self):