placement: scheduler uses allocation candidates

This patch replaces the scheduler's use of GET
/resource_providers?resources=XXX with GET
/allocation_candidates?resources=XXX.

In doing so, we move the interaction with the placement API out of the
scheduler driver interface and up into the scheduler manager. This
allows us to make fewer changes to the underlying HostManager and
SchedulerDriver interfaces and isolate communication with the placement
API in a single place.

The provider_summaries part of the response from GET
/allocation_candidates is used to generate the UUIDs that winnow the
number of compute nodes retrieved by the filter scheduler during
scheduling. Following patches will add in support for actually doing
the claim from the scheduler against one or more resources providers by
examining the allocation_requests part of the HTTP response and picking
one that contains the host the scheduler picked during its _schedule()
loop.

Change-Id: I1c0bd2987dcbc38f23b71db2bc8e3267f85168c8
blueprint: placement-allocation-requests
This commit is contained in:
Jay Pipes 2017-06-21 16:19:55 -04:00
parent dcea3ff687
commit 48268c73e3
21 changed files with 250 additions and 117 deletions

View File

@ -195,6 +195,11 @@ Pike (16.0.0)
* The ``nova.scheduler.filter_scheduler.FilterScheduler`` in Pike will
no longer fall back to not using the Placement Service, even if older
computes are running in the deployment.
* The scheduler now requests allocation candidates from the Placement service
during scheduling. The allocation candidates information was introduced in
the Placement API 1.10 microversion, so you should upgrade the placement
service **before** the Nova scheduler service so that the scheduler can take
advantage of the allocation candidate information.
REST API

View File

@ -53,6 +53,8 @@ class CachingScheduler(filter_scheduler.FilterScheduler):
refreshed.
"""
USES_ALLOCATION_CANDIDATES = False
def __init__(self, *args, **kwargs):
super(CachingScheduler, self).__init__(*args, **kwargs)
self.all_host_states = None
@ -65,7 +67,7 @@ class CachingScheduler(filter_scheduler.FilterScheduler):
# fetch the list of hosts.
self.all_host_states = self._get_up_hosts(elevated)
def _get_all_host_states(self, context, spec_obj):
def _get_all_host_states(self, context, spec_obj, provider_summaries):
"""Called from the filter scheduler, in a template pattern."""
if self.all_host_states is None:
# NOTE(johngarbutt) We only get here when we a scheduler request

View File

@ -32,6 +32,8 @@ CONF = nova.conf.CONF
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
USES_ALLOCATION_CANDIDATES = False
def _filter_hosts(self, hosts, spec_obj):
"""Filter a list of hosts based on RequestSpec."""
@ -55,7 +57,8 @@ class ChanceScheduler(driver.Scheduler):
return random.choice(hosts)
def select_destinations(self, context, spec_obj, instance_uuids):
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
"""Selects random destinations."""
num_instances = spec_obj.num_instances
# NOTE(timello): Returns a list of dicts with 'host', 'nodename' and

View File

@ -264,37 +264,47 @@ class SchedulerReportClient(object):
url,
endpoint_filter=self.ks_filter, raise_exc=False, **kwargs)
# TODO(sbauza): Change that poor interface into passing a rich versioned
# object that would provide the ResourceProvider requirements.
@safe_connect
def get_filtered_resource_providers(self, filters):
"""Returns a list of ResourceProviders matching the requirements
expressed by the filters argument, which can include a dict named
'resources' where amounts are keyed by resource class names.
def get_allocation_candidates(self, resources):
"""Returns a tuple of (allocation_requests, provider_summaries).
eg. filters = {'resources': {'VCPU': 1}}
The allocation requests are a collection of potential JSON objects that
can be passed to the PUT /allocations/{consumer_uuid} Placement REST
API to claim resources against one or more resource providers that meet
the requested resource constraints.
The provider summaries is a dict, keyed by resource provider UUID, of
inventory and capacity information for any resource provider involved
in the allocation requests.
:returns: A tuple with a list of allocation request dicts and a dict of
provider information or (None, None) if the request failed
:param resources: A dict, keyed by resource class name, of requested
amounts of those resources
"""
resources = filters.pop("resources", None)
if resources:
resource_query = ",".join(sorted("%s:%s" % (rc, amount)
for (rc, amount) in resources.items()))
filters['resources'] = resource_query
resp = self.get("/resource_providers?%s" % parse.urlencode(filters),
version='1.4')
resource_query = ",".join(
sorted("%s:%s" % (rc, amount)
for (rc, amount) in resources.items()))
qs_params = {
'resources': resource_query,
}
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
resp = self.get(url, version='1.10')
if resp.status_code == 200:
data = resp.json()
return data.get('resource_providers', [])
else:
msg = _LE("Failed to retrieve filtered list of resource providers "
"from placement API for filters %(filters)s. "
"Got %(status_code)d: %(err_text)s.")
args = {
'filters': filters,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
return None
return data['allocation_requests'], data['provider_summaries']
msg = ("Failed to retrieve allocation candidates from placement API "
"for filters %(resources)s. Got %(status_code)d: %(err_text)s.")
args = {
'resources': resources,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
return None, None
@safe_connect
def _get_provider_aggregates(self, rp_uuid):

View File

@ -35,6 +35,12 @@ CONF = nova.conf.CONF
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
USES_ALLOCATION_CANDIDATES = True
"""Indicates that the scheduler driver calls the Placement API for
allocation candidates and uses those allocation candidates in its
decision-making.
"""
def __init__(self):
self.host_manager = driver.DriverManager(
"nova.scheduler.host_manager",
@ -55,7 +61,8 @@ class Scheduler(object):
if self.servicegroup_api.service_is_up(service)]
@abc.abstractmethod
def select_destinations(self, context, spec_obj, instance_uuids):
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
"""Returns a list of HostState objects that have been chosen by the
scheduler driver, one for each requested instance
(spec_obj.num_instances)

View File

@ -28,10 +28,7 @@ import nova.conf
from nova import exception
from nova.i18n import _
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import driver
from nova.scheduler import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@ -42,12 +39,9 @@ class FilterScheduler(driver.Scheduler):
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.notifier = rpc.get_notifier('scheduler')
# TODO(sbauza): It seems weird that we load a scheduler client for
# the FilterScheduler but it will be the PlacementClient later on once
# we split the needed methods into a separate library.
self.scheduler_client = scheduler_client.SchedulerClient()
def select_destinations(self, context, spec_obj, instance_uuids):
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
"""Returns a sorted list of HostState objects that satisfy the
supplied request_spec.
"""
@ -56,7 +50,8 @@ class FilterScheduler(driver.Scheduler):
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
num_instances = spec_obj.num_instances
selected_hosts = self._schedule(context, spec_obj, instance_uuids)
selected_hosts = self._schedule(context, spec_obj, instance_uuids,
provider_summaries)
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
@ -84,7 +79,7 @@ class FilterScheduler(driver.Scheduler):
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
return [host.obj for host in selected_hosts]
def _schedule(self, context, spec_obj, instance_uuids):
def _schedule(self, context, spec_obj, instance_uuids, provider_summaries):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
@ -98,7 +93,8 @@ class FilterScheduler(driver.Scheduler):
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(elevated, spec_obj)
hosts = self._get_all_host_states(elevated, spec_obj,
provider_summaries)
selected_hosts = []
num_instances = spec_obj.num_instances
@ -134,19 +130,15 @@ class FilterScheduler(driver.Scheduler):
spec_obj.instance_group.obj_reset_changes(['hosts'])
return selected_hosts
def _get_all_host_states(self, context, spec_obj):
def _get_all_host_states(self, context, spec_obj, provider_summaries):
"""Template method, so a subclass can implement caching."""
resources = utils.resources_from_request_spec(spec_obj)
filters = {'resources': resources}
reportclient = self.scheduler_client.reportclient
rps = reportclient.get_filtered_resource_providers(filters)
# NOTE(sbauza): In case the Placement service is not running yet or
# when returning an exception, we wouldn't get any resource providers.
# If so, let's return an empty list so _schedule would raise a
# NoValidHosts.
if not rps:
return []
compute_uuids = [rp['uuid'] for rp in rps]
# NOTE(jaypipes): None is treated differently from an empty dict. We
# pass None when we want to grab all compute nodes (for instance, when
# using the caching scheduler. We pass an empty dict when the Placement
# API found no providers that match the requested constraints.
compute_uuids = None
if provider_summaries is not None:
compute_uuids = list(provider_summaries.keys())
return self.host_manager.get_host_states_by_uuids(context,
compute_uuids,
spec_obj)

View File

@ -109,6 +109,7 @@ class HostState(object):
def __init__(self, host, node, cell_uuid):
self.host = host
self.nodename = node
self.uuid = None
self._lock_name = (host, node)
# Mutable available resources.
@ -196,6 +197,8 @@ class HostState(object):
return
all_ram_mb = compute.memory_mb
self.uuid = compute.uuid
# Assume virtual size is all consumed by instances if use qcow2 disk.
free_gb = compute.free_disk_gb
least_gb = compute.disk_available_least

View File

@ -32,6 +32,8 @@ from nova import manager
from nova import objects
from nova.objects import host_mapping as host_mapping_obj
from nova import quota
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
@ -57,6 +59,8 @@ class SchedulerManager(manager.Manager):
_sentinel = object()
def __init__(self, scheduler_driver=None, *args, **kwargs):
client = scheduler_client.SchedulerClient()
self.placement_client = client.reportclient
if not scheduler_driver:
scheduler_driver = CONF.scheduler.driver
self.driver = driver.DriverManager(
@ -103,7 +107,26 @@ class SchedulerManager(manager.Manager):
spec_obj = objects.RequestSpec.from_primitives(ctxt,
request_spec,
filter_properties)
dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids)
resources = utils.resources_from_request_spec(spec_obj)
alloc_reqs, p_sums = None, None
if self.driver.USES_ALLOCATION_CANDIDATES:
res = self.placement_client.get_allocation_candidates(resources)
alloc_reqs, p_sums = res
if not alloc_reqs:
LOG.debug("Got no allocation candidates from the Placement "
"API. This may be a temporary occurrence as compute "
"nodes start up and begin reporting inventory to "
"the Placement service.")
# TODO(jaypipes): Setting p_sums to None triggers the scheduler
# to load all compute nodes to do scheduling "the old way".
# Really, we should raise NoValidHosts here, but all functional
# tests will fall over if we do that without changing the
# PlacementFixture to load compute node inventory into the
# placement database before starting functional tests.
p_sums = None
dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
p_sums)
dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
return jsonutils.to_primitive(dest_dicts)

View File

@ -80,6 +80,7 @@ class _IntegratedTestBase(test.TestCase):
self._setup_services()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.useFixture(nova_fixtures.PlacementFixture())
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

View File

@ -80,6 +80,7 @@ class NotificationSampleTestBase(test.TestCase,
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(nova_fixtures.PlacementFixture())
self.start_service('conductor')
self.start_service('scheduler')

View File

@ -34,6 +34,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
super(TestSerialConsoleLiveMigrate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# Replace libvirt with fakelibvirt

View File

@ -46,6 +46,7 @@ class TestListServersIpFilter(test.TestCase):
self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.useFixture(nova_fixtures.PlacementFixture())
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']

View File

@ -39,6 +39,7 @@ class ServersPreSchedulingTestCase(test.TestCase):
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NoopConductorFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -352,47 +352,40 @@ class TestProviderOperations(SchedulerReportClientTestCase):
mock.sentinel.name,
)
def test_get_filtered_resource_providers(self):
uuid = uuids.compute_node
def test_get_allocation_candidates(self):
resp_mock = mock.Mock(status_code=200)
json_data = {
'resource_providers': [
{'uuid': uuid,
'name': uuid,
'generation': 42}
],
'allocation_requests': mock.sentinel.alloc_reqs,
'provider_summaries': mock.sentinel.p_sums,
}
filters = {'resources': {'VCPU': 1, 'MEMORY_MB': 1024}}
resources = {'VCPU': 1, 'MEMORY_MB': 1024}
resp_mock.json.return_value = json_data
self.ks_sess_mock.get.return_value = resp_mock
result = self.client.get_filtered_resource_providers(filters)
alloc_reqs, p_sums = self.client.get_allocation_candidates(resources)
expected_provider_dict = dict(
uuid=uuid,
name=uuid,
generation=42,
)
expected_url = '/resource_providers?%s' % parse.urlencode(
expected_url = '/allocation_candidates?%s' % parse.urlencode(
{'resources': 'MEMORY_MB:1024,VCPU:1'})
self.ks_sess_mock.get.assert_called_once_with(
expected_url, endpoint_filter=mock.ANY, raise_exc=False,
headers={'OpenStack-API-Version': 'placement 1.4'})
self.assertEqual(expected_provider_dict, result[0])
headers={'OpenStack-API-Version': 'placement 1.10'})
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.assertEqual(mock.sentinel.p_sums, p_sums)
def test_get_filtered_resource_providers_not_found(self):
def test_get_allocation_candidates_not_found(self):
# Ensure _get_resource_provider() just returns None when the placement
# API doesn't find a resource provider matching a UUID
resp_mock = mock.Mock(status_code=404)
self.ks_sess_mock.get.return_value = resp_mock
result = self.client.get_filtered_resource_providers({'foo': 'bar'})
res = self.client.get_allocation_candidates({'foo': 'bar'})
expected_url = '/resource_providers?foo=bar'
expected_url = '/allocation_candidates?resources=foo%3Abar'
self.ks_sess_mock.get.assert_called_once_with(
expected_url, endpoint_filter=mock.ANY, raise_exc=False,
headers={'OpenStack-API-Version': 'placement 1.4'})
self.assertIsNone(result)
headers={'OpenStack-API-Version': 'placement 1.10'})
self.assertIsNone(res[0])
self.assertIsNone(res[0])
def test_get_resource_provider_found(self):
# Ensure _get_resource_provider() returns a dict of resource provider

View File

@ -73,6 +73,7 @@ NUMA_TOPOLOGIES_W_HT = [
COMPUTE_NODES = [
objects.ComputeNode(
uuid=uuidsentinel.cn1,
id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0,
@ -84,6 +85,7 @@ COMPUTE_NODES = [
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn2,
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0,
@ -95,6 +97,7 @@ COMPUTE_NODES = [
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn3,
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0,
@ -106,6 +109,7 @@ COMPUTE_NODES = [
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn4,
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0,
@ -118,6 +122,7 @@ COMPUTE_NODES = [
disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
uuid=uuidsentinel.cn5,
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
host='fake', hypervisor_hostname='fake-hyp'),
]

View File

@ -49,7 +49,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
self.driver.all_host_states = {uuids.cell: []}
self.driver._get_all_host_states(self.context, None)
self.driver._get_all_host_states(self.context, None,
mock.sentinel.provider_uuids)
self.assertFalse(mock_up_hosts.called)
self.assertEqual({uuids.cell: []}, self.driver.all_host_states)
@ -60,7 +61,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = self._get_fake_host_state()
mock_up_hosts.return_value = {uuids.cell: [host_state]}
result = self.driver._get_all_host_states(self.context, None)
result = self.driver._get_all_host_states(self.context, None,
mock.sentinel.provider_uuids)
self.assertTrue(mock_up_hosts.called)
self.assertEqual({uuids.cell: [host_state]},
@ -84,7 +86,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations,
self.context, spec_obj, [spec_obj.instance_uuid])
self.context, spec_obj, [spec_obj.instance_uuid],
{})
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
@ -100,8 +103,13 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(result[0].host, fake_host.host)
def _test_select_destinations(self, spec_obj):
p_sums = {}
for cell_hosts in self.driver.all_host_states.values():
for hs in cell_hosts:
p_sums[hs.uuid] = hs
return self.driver.select_destinations(
self.context, spec_obj, [spec_obj.instance_uuid])
self.context, spec_obj, [spec_obj.instance_uuid], p_sums)
def _get_fake_request_spec(self):
# NOTE(sbauza): Prevent to stub the Flavor.get_by_id call just by
@ -140,6 +148,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
'host_%s' % index,
'node_%s' % index,
uuids.cell)
host_state.uuid = getattr(uuids, 'host_%s' % index)
host_state.free_ram_mb = 50000
host_state.total_usable_ram_mb = 50000
host_state.free_disk_mb = 4096
@ -170,13 +179,14 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = self._get_fake_host_state(x)
host_states.append(host_state)
self.driver.all_host_states = {uuids.cell: host_states}
p_sums = {hs.uuid: hs for hs in host_states}
def run_test():
a = timeutils.utcnow()
for x in range(requests):
self.driver.select_destinations(self.context, spec_obj,
[spec_obj.instance_uuid])
[spec_obj.instance_uuid], p_sums)
b = timeutils.utcnow()
c = b - a
@ -222,8 +232,12 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuids.cell1: host_states_cell1,
uuids.cell2: host_states_cell2,
}
p_sums = {
cn.uuid: cn for cn in host_states_cell1 + host_states_cell2
}
d = self.driver.select_destinations(self.context, spec_obj,
[spec_obj.instance_uuid])
[spec_obj.instance_uuid], p_sums)
self.assertIn(d[0].host, [hs.host for hs in host_states_cell2])

View File

@ -64,7 +64,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
dests = self.driver.select_destinations(self.context, spec_obj,
[uuids.instance1, uuids.instance2])
[uuids.instance1, uuids.instance2],
mock.sentinel.p_sums)
self.assertEqual(2, len(dests))
(host, node) = (dests[0].host, dests[0].nodename)
@ -94,4 +95,5 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
spec_obj.instance_uuid = uuids.instance
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
spec_obj, [spec_obj.instance_uuid])
spec_obj, [spec_obj.instance_uuid],
mock.sentinel.p_sums)

View File

@ -44,16 +44,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_filtered_resource_providers',
return_value=fakes.RESOURCE_PROVIDERS)
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=objects.service.SERVICE_VERSION)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_happy_day(self, mock_get_extra, mock_get_mv,
mock_get_rps, mock_get_all,
mock_get_all,
mock_by_host, mock_get_by_binary):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
@ -86,8 +83,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
with mock.patch.object(self.driver.host_manager,
'get_filtered_hosts') as mock_get_hosts:
mock_get_hosts.side_effect = fake_get_filtered_hosts
provider_uuids = {cn.uuid: cn for cn in fakes.COMPUTE_NODES}
weighed_hosts = self.driver._schedule(self.context, spec_obj,
[uuids.instance])
[uuids.instance], provider_uuids)
self.assertEqual(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
@ -125,16 +123,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_filtered_resource_providers',
return_value=fakes.RESOURCE_PROVIDERS)
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=objects.service.SERVICE_VERSION)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_host_pool(self, mock_get_extra, mock_get_mv,
mock_get_rps, mock_get_all,
mock_get_all,
mock_by_host, mock_get_by_binary):
"""Make sure the host_subset_size property works properly."""
@ -157,8 +152,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
with mock.patch.object(self.driver.host_manager,
'get_filtered_hosts') as mock_get_hosts:
mock_get_hosts.side_effect = fake_get_filtered_hosts
provider_uuids = {cn.uuid: cn for cn in fakes.COMPUTE_NODES}
hosts = self.driver._schedule(self.context, spec_obj,
[uuids.instance])
[uuids.instance], provider_uuids)
# one host should be chosen
self.assertEqual(len(hosts), 1)
@ -168,16 +164,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_filtered_resource_providers',
return_value=fakes.RESOURCE_PROVIDERS)
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=objects.service.SERVICE_VERSION)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_large_host_pool(self, mock_get_extra, mock_get_mv,
mock_get_rps, mock_get_all,
mock_get_all,
mock_by_host, mock_get_by_binary):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
@ -202,8 +195,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
with mock.patch.object(self.driver.host_manager,
'get_filtered_hosts') as mock_get_hosts:
mock_get_hosts.side_effect = fake_get_filtered_hosts
provider_uuids = {cn.uuid: cn for cn in fakes.COMPUTE_NODES}
hosts = self.driver._schedule(self.context, spec_obj,
[uuids.instance])
[uuids.instance], provider_uuids)
# one host should be chosen
self.assertEqual(len(hosts), 1)
@ -213,16 +207,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_filtered_resource_providers',
return_value=fakes.RESOURCE_PROVIDERS)
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=objects.service.SERVICE_VERSION)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_chooses_best_host(self, mock_get_extra, mock_get_mv,
mock_get_rps, mock_cn_get_all,
mock_cn_get_all,
mock_get_by_binary,
mock_get_inst_info):
"""If host_subset_size is 1, the largest host with greatest weight
@ -258,8 +249,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
with mock.patch.object(self.driver.host_manager,
'get_filtered_hosts') as mock_get_hosts:
mock_get_hosts.side_effect = fake_get_filtered_hosts
provider_uuids = {cn.uuid: cn for cn in fakes.COMPUTE_NODES}
hosts = self.driver._schedule(self.context, spec_obj,
[uuids.instance])
[uuids.instance], provider_uuids)
# one host should be chosen
self.assertEqual(1, len(hosts))
@ -271,14 +263,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_filtered_resource_providers',
return_value=fakes.RESOURCE_PROVIDERS)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_select_destinations(self, mock_get_extra,
mock_get_rps, mock_get_all,
mock_get_all,
mock_by_host, mock_get_by_binary):
"""select_destinations is basically a wrapper around _schedule().
@ -319,8 +308,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
with mock.patch.object(self.driver.host_manager,
'get_filtered_hosts') as mock_get_hosts:
mock_get_hosts.side_effect = fake_get_filtered_hosts
p_sums = {
cn.uuid: cn
for cn in fakes.COMPUTE_NODES
}
dests = self.driver.select_destinations(self.context, spec_obj,
instance_uuids)
instance_uuids, p_sums)
(host, node) = (dests[0].host, dests[0].nodename)
self.assertEqual(host, selected_hosts[0])
@ -339,7 +332,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_uuid=uuids.instance)
self.driver.select_destinations(self.context, spec_obj,
[uuids.instance])
[uuids.instance], {})
expected = [
mock.call(self.context, 'scheduler.select_destinations.start',
@ -353,7 +346,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_schedule.return_value = []
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
objects.RequestSpec(num_instances=1), [uuids.instance])
objects.RequestSpec(num_instances=1), [uuids.instance], {})
def test_select_destinations_no_valid_host_not_enough(self):
# Tests that we have fewer hosts available than number of instances
@ -364,7 +357,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
try:
self.driver.select_destinations(
self.context, objects.RequestSpec(num_instances=3),
[uuids.instance])
[uuids.instance], {})
self.fail('Expected NoValidHost to be raised.')
except exception.NoValidHost as e:
# Make sure that we provided a reason why NoValidHost.

View File

@ -1078,6 +1078,7 @@ class HostStateTestCase(test.NoDBTestCase):
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
@ -1121,6 +1122,7 @@ class HostStateTestCase(test.NoDBTestCase):
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
@ -1154,6 +1156,7 @@ class HostStateTestCase(test.NoDBTestCase):
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
@ -1313,6 +1316,7 @@ class HostStateTestCase(test.NoDBTestCase):
]
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
metrics=jsonutils.dumps(metrics),
memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,

View File

@ -90,36 +90,99 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
def test_select_destination(self):
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid])
[fake_spec.instance_uuid], mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
def test_select_destination_with_4_3_client(self):
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination_old_placement(self, mock_get_ac, mock_rfrs):
"""Tests that we will pass None for the provider_summaries parameter to
the scheduler driver select_destinations() method when the scheduler
report client's get_allocation_candidates() returns None, None as it
would if placement service hasn't been upgraded before scheduler.
"""
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
place_res = (None, None)
mock_get_ac.return_value = place_res
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], None)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination_no_candidates(self, mock_get_ac, mock_rfrs):
"""Tests that we will pass None for the provider_summaries parameter to
the scheduler driver select_destinations() method when the scheduler
report client's get_allocation_candidates() returns [], {} which it
would if placement service hasn't yet had compute nodes populate
inventory.
"""
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
place_res = ([], {})
mock_get_ac.return_value = place_res
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec,
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], None)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
def test_select_destination_with_4_3_client(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec)
select_destinations.assert_called_once_with(None, fake_spec, None)
select_destinations.assert_called_once_with(None, fake_spec, None,
mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
# TODO(sbauza): Remove that test once the API v4 is removed
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocation_candidates')
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_select_destination_with_old_client(self, from_primitives):
def test_select_destination_with_old_client(self, from_primitives,
mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
from_primitives.return_value = fake_spec
place_res = (mock.sentinel.alloc_reqs, mock.sentinel.p_sums)
mock_get_ac.return_value = place_res
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, request_spec='fake_spec',
filter_properties='fake_props',
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid])
[fake_spec.instance_uuid], mock.sentinel.p_sums)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
def test_update_aggregates(self):
with mock.patch.object(self.manager.driver.host_manager,

View File

@ -0,0 +1,9 @@
---
upgrade:
- |
The scheduler now requests allocation candidates from the Placement
service during scheduling. The allocation candidates information
was introduced in the Placement API 1.10 microversion, so you should
upgrade the placement service before the Nova scheduler service so that
the scheduler can take advantage of the allocation candidate
information.