Return Selection objects from the scheduler driver

This changes the returned value from the scheduler driver's
select_destinations() to a list of lists of Selection objects. It
doesn't actually change the returned value from the scheduler manager to
the conductor; that will be done in the next patch in the series, as
that will require an RPC change.

During review it was noticed that the signature for the abstract
scheduler driver class was not updated when 'alloc_reqs_by_rp_uuid'
parameter was added back in e041fddeb0, so
I've updated it here to make all driver signatures the same.

Blueprint: return-alternate-hosts

Change-Id: I9f864455c69e1355a3cf06d7ba8b98fa3bcf619c
This commit is contained in:
EdLeafe 2017-07-21 21:16:42 +00:00 committed by Ed Leafe
parent 638e8d3254
commit b33cfe8bb8
10 changed files with 343 additions and 201 deletions

View File

@ -27,6 +27,7 @@ from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.scheduler import driver
CONF = nova.conf.CONF
@ -64,8 +65,8 @@ class ChanceScheduler(driver.Scheduler):
msg = _("Could not find another compute")
raise exception.NoValidHost(reason=msg)
# Note that we don't claim in the chance scheduler
num_instances = len(instance_uuids)
selected_host_lists = []
# If possible, we'd like to return distinct hosts for each instance.
# But when there are fewer available hosts than requested instances, we
# will need to return some duplicates.
@ -75,11 +76,21 @@ class ChanceScheduler(driver.Scheduler):
selected_hosts = [random.choice(hosts)
for i in range(num_instances)]
# This is the overall list of values to be returned. There will be one
# item per instance, and that item will be a list of Selection objects
# representing the selected host and zero or more alternates.
# NOTE(edleafe): in a multi-cell environment, this can return
# alternates from different cells. When support for multiple cells is
# implemented in select_destinations, this will have to be updated to
# restrict alternates to come from the same cell.
selections_to_return = []
# We can't return dupes as alternates, since alternates are used when
# building to the selected host fails.
alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)
for sel_host in selected_hosts:
sel_plus_alts = [sel_host]
selection = objects.Selection.from_host_state(sel_host)
sel_plus_alts = [selection]
while len(sel_plus_alts) < alts_per_instance:
candidate = random.choice(hosts)
if (candidate not in sel_plus_alts) and (
@ -87,32 +98,21 @@ class ChanceScheduler(driver.Scheduler):
# We don't want to include a selected host as an alternate,
# as it will have a high likelihood of not having enough
# resources left after it has an instance built on it.
sel_plus_alts.append(candidate)
selected_host_lists.append(sel_plus_alts)
return selected_host_lists
alt_select = objects.Selection.from_host_state(candidate)
sel_plus_alts.append(alt_select)
selections_to_return.append(sel_plus_alts)
return selections_to_return
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries):
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Selects random destinations. Returns a list of HostState objects."""
num_instances = spec_obj.num_instances
# TODO(danms): This needs to be extended to support multiple cells
# and limiting the destination scope to a single requested cell
dests = []
host_cls = self.host_manager.host_state_cls
host_lists = self._schedule(context, compute_rpcapi.RPC_TOPIC,
host_selections = self._schedule(context, compute_rpcapi.RPC_TOPIC,
spec_obj, instance_uuids)
for idx in range(len(instance_uuids)):
host_list = host_lists[idx]
host_states = [host_cls(host, None, None)
for host in host_list]
dests.append(host_states)
if len(dests) < num_instances:
if len(host_selections) < num_instances:
reason = _('There are not enough hosts available.')
raise exception.NoValidHost(reason=reason)
# Don't change the return value in this patch. A later patch in this
# series will change all the method signatures to accept the new return
# data structure. This temporary value mimics the current return value
# of a list of hosts, one per instance.
temp_ret = [dest[0] for dest in dests]
return temp_ret
return host_selections

View File

@ -308,7 +308,8 @@ class SchedulerReportClient(object):
@safe_connect
def get_allocation_candidates(self, resources):
"""Returns a tuple of (allocation_requests, provider_summaries).
"""Returns a tuple of (allocation_requests, provider_summaries,
allocation_request_version).
The allocation_requests are a collection of potential JSON objects that
can be passed to the PUT /allocations/{consumer_uuid} Placement REST
@ -319,8 +320,10 @@ class SchedulerReportClient(object):
inventory and capacity information for any resource provider involved
in the allocation_requests.
:returns: A tuple with a list of allocation_request dicts and a dict of
provider information or (None, None) if the request failed
:returns: A tuple with a list of allocation_request dicts, a dict of
provider information, and the microversion used to request
this data from placement, or (None, None, None) if the
request failed
:param nova.scheduler.utils.ResourceRequest resources:
A ResourceRequest object representing the requested resources and
@ -339,11 +342,13 @@ class SchedulerReportClient(object):
'resources': resource_query,
}
version = '1.10'
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
resp = self.get(url, version='1.10')
resp = self.get(url, version=version)
if resp.status_code == 200:
data = resp.json()
return data['allocation_requests'], data['provider_summaries']
return (data['allocation_requests'], data['provider_summaries'],
version)
msg = ("Failed to retrieve allocation candidates from placement API "
"for filters %(resources)s. Got %(status_code)d: %(err_text)s.")
@ -353,7 +358,7 @@ class SchedulerReportClient(object):
'err_text': resp.text,
}
LOG.error(msg, args)
return None, None
return None, None, None
@safe_connect
def _get_provider_aggregates(self, rp_uuid):
@ -982,7 +987,7 @@ class SchedulerReportClient(object):
@safe_connect
@retries
def claim_resources(self, consumer_uuid, alloc_request, project_id,
user_id):
user_id, allocation_request_version=None):
"""Creates allocation records for the supplied instance UUID against
the supplied resource providers.
@ -1002,8 +1007,13 @@ class SchedulerReportClient(object):
placement's PUT /allocations API
:param project_id: The project_id associated with the allocations.
:param user_id: The user_id associated with the allocations.
:param allocation_request_version: The microversion used to request the
allocations.
:returns: True if the allocations were created, False otherwise.
"""
# Older clients might not send the allocation_request_version, so
# default to 1.10
allocation_request_version = allocation_request_version or '1.10'
# Ensure we don't change the supplied alloc request since it's used in
# a loop within the scheduler against multiple instance claims
ar = copy.deepcopy(alloc_request)
@ -1022,7 +1032,7 @@ class SchedulerReportClient(object):
payload['project_id'] = project_id
payload['user_id'] = user_id
r = self.put(url, payload, version='1.10')
r = self.put(url, payload, version=allocation_request_version)
if r.status_code != 204:
# NOTE(jaypipes): Yes, it sucks doing string comparison like this
# but we have no error codes, only error messages.

View File

@ -62,8 +62,9 @@ class Scheduler(object):
@abc.abstractmethod
def select_destinations(self, context, spec_obj, instance_uuids,
provider_summaries):
"""Returns a list of HostState objects that have been chosen by the
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Returns a list of Selection objects that have been chosen by the
scheduler driver, one for each requested instance
(spec_obj.num_instances)
"""

View File

@ -27,6 +27,7 @@ from six.moves import range
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova import rpc
from nova.scheduler import client
from nova.scheduler import driver
@ -45,14 +46,10 @@ class FilterScheduler(driver.Scheduler):
self.placement_client = scheduler_client.reportclient
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries):
"""Returns a list of sorted lists of HostState objects (1 for each
instance) that would satisfy the supplied request_spec. Each of those
lists consist of [chosen_host, alternate1, ..., alternateN], where the
'chosen_host' has already had its resources claimed in Placement,
followed by zero or more alternates. The alternates are hosts that can
satisfy the request, and are included so that if the build for the
chosen host fails, the cell conductor can retry.
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Returns a list of lists of Selection objects, which represent the
selected hosts and alternates for each instance.
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
@ -77,28 +74,25 @@ class FilterScheduler(driver.Scheduler):
indicates the Placement API returned no
potential matches for the requested
resources.
:param allocation_request_version: The microversion used to request the
allocations.
"""
self.notifier.info(
context, 'scheduler.select_destinations.start',
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
selected_host_lists = self._schedule(context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries)
host_selections = self._schedule(context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version)
self.notifier.info(
context, 'scheduler.select_destinations.end',
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
# NOTE(edleafe) - In this patch we only create the lists of [chosen,
# alt1, alt2, etc.]. In a later patch we will change what we return, so
# for this patch just return the selected hosts.
selected_hosts = [sel_host[0] for sel_host in selected_host_lists]
return selected_hosts
return host_selections
def _schedule(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries):
"""Returns a list of hosts that meet the required specs, ordered by
their fitness.
These hosts will have already had their resources claimed in Placement.
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None):
"""Returns a list of lists of Selection objects.
:param context: The RequestContext object
:param spec_obj: The RequestSpec object
@ -122,6 +116,8 @@ class FilterScheduler(driver.Scheduler):
indicates the Placement API returned no
potential matches for the requested
resources.
:param allocation_request_version: The microversion used to request the
allocations.
"""
elevated = context.elevated()
@ -152,7 +148,7 @@ class FilterScheduler(driver.Scheduler):
# is based on CONF.scheduler.max_attempts; note that if there are not
# enough filtered hosts to provide the full number of alternates, the
# list of hosts may be shorter than this amount.
num_to_return = CONF.scheduler.max_attempts
num_alts = CONF.scheduler.max_attempts
if (instance_uuids is None or
not self.USES_ALLOCATION_CANDIDATES or
@ -164,13 +160,12 @@ class FilterScheduler(driver.Scheduler):
# placement API, alloc_reqs_by_rp_uuid will be None, so we skip
# claiming in that case as well. In the case where instance_uuids
# is None, that indicates an older conductor, so we need to return
# the older-style HostState objects without alternates.
# NOTE(edleafe): moving this logic into a separate method, as this
# method is already way too long. It will also make it easier to
# clean up once we no longer have to worry about older conductors.
include_alternates = (instance_uuids is not None)
# the objects without alternates. They will be converted back to
# the older dict format representing HostState objects.
if instance_uuids is None:
num_alts = 0
return self._legacy_find_hosts(num_instances, spec_obj, hosts,
num_to_return, include_alternates)
num_alts)
# A list of the instance UUIDs that were successfully claimed against
# in the placement API. If we are not able to successfully claim for
@ -199,13 +194,16 @@ class FilterScheduler(driver.Scheduler):
for host in hosts:
cn_uuid = host.uuid
if cn_uuid not in alloc_reqs_by_rp_uuid:
LOG.debug("Found host state %s that wasn't in "
"allocation_requests. Skipping.", cn_uuid)
msg = ("A host state with uuid = '%s' that did not have a "
"matching allocation_request was encountered while "
"scheduling. This host was skipped.")
LOG.debug(msg, cn_uuid)
continue
alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
if self._claim_resources(elevated, spec_obj, instance_uuid,
alloc_reqs):
alloc_reqs,
allocation_request_version=allocation_request_version):
claimed_host = host
break
@ -233,7 +231,8 @@ class FilterScheduler(driver.Scheduler):
# We have selected and claimed hosts for each instance. Now we need to
# find alternates for each host.
selections_to_return = self._get_alternate_hosts(
claimed_hosts, spec_obj, hosts, num, num_to_return)
claimed_hosts, spec_obj, hosts, num, num_alts,
alloc_reqs_by_rp_uuid, allocation_request_version)
return selections_to_return
def _ensure_sufficient_hosts(self, hosts, required_count,
@ -274,7 +273,8 @@ class FilterScheduler(driver.Scheduler):
for uuid in instance_uuids:
self.placement_client.delete_allocation_for_instance(uuid)
def _claim_resources(self, ctx, spec_obj, instance_uuid, alloc_reqs):
def _claim_resources(self, ctx, spec_obj, instance_uuid, alloc_reqs,
allocation_request_version=None):
"""Given an instance UUID (representing the consumer of resources), the
HostState object for the host that was chosen for the instance, and a
list of allocation_request JSON objects, attempt to claim resources for
@ -295,6 +295,8 @@ class FilterScheduler(driver.Scheduler):
(along with the project and user ID to the placement
API's PUT /allocations/{consumer_uuid} call to claim
resources for the instance
:param allocation_request_version: The microversion used to request the
allocations.
"""
if utils.request_is_rebuild(spec_obj):
@ -324,10 +326,10 @@ class FilterScheduler(driver.Scheduler):
alloc_req = alloc_reqs[0]
return self.placement_client.claim_resources(instance_uuid,
alloc_req, project_id, user_id)
alloc_req, project_id, user_id,
allocation_request_version=allocation_request_version)
def _legacy_find_hosts(self, num_instances, spec_obj, hosts,
num_to_return, include_alternates):
def _legacy_find_hosts(self, num_instances, spec_obj, hosts, num_alts):
"""Some schedulers do not do claiming, or we can sometimes not be able
to if the Placement service is not reachable. Additionally, we may be
working with older conductors that don't pass in instance_uuids.
@ -335,11 +337,9 @@ class FilterScheduler(driver.Scheduler):
# The list of hosts selected for each instance
selected_hosts = []
# This the overall list of values to be returned. There will be one
# item per instance, and when 'include_alternates' is True, that item
# will be a list of HostState objects representing the selected host
# along with alternates from the same cell. When 'include_alternates'
# is False, the return value will be a list of HostState objects, with
# one per requested instance.
# item per instance, and each item will be a list of Selection objects
# representing the selected host along with zero or more alternates
# from the same cell.
selections_to_return = []
for num in range(num_instances):
@ -356,12 +356,9 @@ class FilterScheduler(driver.Scheduler):
# raise a NoValidHost exception.
self._ensure_sufficient_hosts(selected_hosts, num_instances)
if include_alternates:
selections_to_return = self._get_alternate_hosts(
selected_hosts, spec_obj, hosts, num, num_to_return)
return selections_to_return
# No alternatives but we still need to return a list of lists of hosts
return [[host] for host in selected_hosts]
selections_to_return = self._get_alternate_hosts(selected_hosts,
spec_obj, hosts, num, num_alts)
return selections_to_return
@staticmethod
def _consume_selected_host(selected_host, spec_obj):
@ -373,7 +370,8 @@ class FilterScheduler(driver.Scheduler):
spec_obj.instance_group.obj_reset_changes(['hosts'])
def _get_alternate_hosts(self, selected_hosts, spec_obj, hosts, index,
num_to_return):
num_alts, alloc_reqs_by_rp_uuid=None,
allocation_request_version=None):
# We only need to filter/weigh the hosts again if we're dealing with
# more than one instance since the single selected host will get
# filtered out of the list of alternates below.
@ -383,13 +381,14 @@ class FilterScheduler(driver.Scheduler):
# hosts again to get an accurate count for alternates.
hosts = self._get_sorted_hosts(spec_obj, hosts, index)
# This is the overall list of values to be returned. There will be one
# item per instance, and that item will be a list of HostState objects
# item per instance, and each item will be a list of Selection objects
# representing the selected host along with alternates from the same
# cell.
selections_to_return = []
for selected_host in selected_hosts:
# This is the list of hosts for one particular instance.
selected_plus_alts = [selected_host]
selection = objects.Selection.from_host_state(selected_host)
selected_plus_alts = [selection]
cell_uuid = selected_host.cell_uuid
# This will populate the alternates with many of the same unclaimed
# hosts. This is OK, as it should be rare for a build to fail. And
@ -399,10 +398,32 @@ class FilterScheduler(driver.Scheduler):
# will have had its resources reduced and will have a much lower
# chance of being able to fit another instance on it.
for host in hosts:
if len(selected_plus_alts) >= num_to_return:
if len(selected_plus_alts) >= num_alts:
break
if host.cell_uuid == cell_uuid and host not in selected_hosts:
selected_plus_alts.append(host)
if alloc_reqs_by_rp_uuid is not None:
alt_uuid = host.uuid
if alt_uuid not in alloc_reqs_by_rp_uuid:
msg = ("A host state with uuid = '%s' that did "
"not have a matching allocation_request "
"was encountered while scheduling. This "
"host was skipped.")
LOG.debug(msg, alt_uuid)
continue
# TODO(jaypipes): Loop through all allocation_requests
# instead of just trying the first one. For now, since
# we'll likely want to order the allocation_requests in
# the future based on information in the provider
# summaries, we'll just try to claim resources using
# the first allocation_request
alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0]
alt_selection = (
objects.Selection.from_host_state(host, alloc_req,
allocation_request_version))
else:
alt_selection = objects.Selection.from_host_state(host)
selected_plus_alts.append(alt_selection)
selections_to_return.append(selected_plus_alts)
return selections_to_return

View File

@ -45,19 +45,23 @@ CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
def _host_state_obj_to_dict(host_state):
limits = dict(host_state.limits)
def _selection_obj_to_dict(selection):
if selection.limits is not None:
limits = selection.limits.to_dict()
else:
limits = {}
# The NUMATopologyFilter can set 'numa_topology' in the limits dict
# to a NUMATopologyLimits object which we need to convert to a primitive
# before this hits jsonutils.to_primitive(). We only check for that known
# case specifically as we don't care about handling out of tree filters
# or drivers injecting non-serializable things in the limits dict.
if 'numa_topology' in limits:
numa_limit = limits.get("numa_topology")
if numa_limit is not None:
limits['numa_topology'] = limits['numa_topology'].obj_to_primitive()
return {
'host': host_state.host,
'nodename': host_state.nodename,
'limits': limits
'host': selection.service_host,
'nodename': selection.nodename,
'limits': limits,
}
@ -122,9 +126,11 @@ class SchedulerManager(manager.Manager):
# We have to handle the case that we failed to connect to the
# Placement service and the safe_connect decorator on
# get_allocation_candidates returns None.
alloc_reqs, provider_summaries = None, None
alloc_reqs, provider_summaries, allocation_request_version = (
None, None, None)
else:
alloc_reqs, provider_summaries = res
(alloc_reqs, provider_summaries,
allocation_request_version) = res
if not alloc_reqs:
LOG.debug("Got no allocation candidates from the Placement "
"API. This may be a temporary occurrence as compute "
@ -141,10 +147,16 @@ class SchedulerManager(manager.Manager):
rp_uuid = rr['resource_provider']['uuid']
alloc_reqs_by_rp_uuid[rp_uuid].append(ar)
dests = self.driver.select_destinations(ctxt, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries)
dest_dicts = [_host_state_obj_to_dict(d) for d in dests]
return jsonutils.to_primitive(dest_dicts)
selections = self.driver.select_destinations(ctxt, spec_obj,
instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version)
# We don't want to change the return value in this patch, as it
# involves an RPC change. So convert the list of lists of Selection
# objects to a list of host state dicts, which is what the calling
# method expects.
selected = [sel[0] for sel in selections]
selection_dicts = [_selection_obj_to_dict(claim) for claim in selected]
return jsonutils.to_primitive(selection_dicts)
def update_aggregates(self, ctxt, aggregates):
"""Updates HostManager internal aggregates information.

View File

@ -1217,7 +1217,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums = self.client.get_allocation_candidates(resources)
alloc_reqs, p_sums, allocation_request_version = \
self.client.get_allocation_candidates(resources)
expected_url = '/allocation_candidates?%s' % parse.urlencode(
{'resources': 'MEMORY_MB:1024,VCPU:1'})

View File

@ -100,7 +100,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
result = self._test_select_destinations(spec_obj)
self.assertEqual(1, len(result))
self.assertEqual(result[0].host, fake_host.host)
self.assertEqual(result[0][0].service_host, fake_host.host)
def _test_select_destinations(self, spec_obj):
provider_summaries = {}
@ -239,7 +239,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
d = self.driver.select_destinations(self.context, spec_obj,
[spec_obj.instance_uuid], {}, provider_summaries)
self.assertIn(d[0].host, [hs.host for hs in host_states_cell2])
self.assertIn(d[0][0].service_host,
[hs.host for hs in host_states_cell2])
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch("nova.scheduler.caching_scheduler.CachingScheduler."

View File

@ -20,10 +20,22 @@ import mock
from nova import exception
from nova import objects
from nova.scheduler import chance
from nova.scheduler import host_manager
from nova.tests.unit.scheduler import test_scheduler
from nova.tests import uuidsentinel as uuids
def _generate_fake_hosts(num):
hosts = []
for i in range(num):
fake_host_state = host_manager.HostState("host%s" % i, "fake_node",
uuids.cell)
fake_host_state.uuid = getattr(uuids, "host%s" % i)
fake_host_state.limits = {}
hosts.append(fake_host_state)
return hosts
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
@ -53,7 +65,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_select_destinations(self, mock_hosts_up):
mock_hosts_up.return_value = ['host1', 'host2', 'host3', 'host4']
mock_hosts_up.return_value = _generate_fake_hosts(4)
spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
dests = self.driver.select_destinations(self.context, spec_obj,
[uuids.instance1, uuids.instance2], {},
@ -67,7 +79,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_select_destinations_no_valid_host(self, mock_hosts_up,
mock_filter):
mock_hosts_up.return_value = ['host1', 'host2', 'host3', 'host4']
mock_hosts_up.return_value = _generate_fake_hosts(4)
mock_filter.return_value = []
spec_obj = objects.RequestSpec(num_instances=1)
@ -79,7 +91,7 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_schedule_success_single_instance(self, mock_hosts_up):
hosts = ["host%s" % i for i in range(20)]
hosts = _generate_fake_hosts(20)
mock_hosts_up.return_value = hosts
spec_obj = objects.RequestSpec(num_instances=1, ignore_hosts=None)
spec_obj.instance_uuid = uuids.instance
@ -87,10 +99,10 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
attempts = 2
expected = attempts
self.flags(max_attempts=attempts, group="scheduler")
result = self.driver._schedule(self.context, "compute", spec_obj,
[spec_obj.instance_uuid])
self.assertEqual(1, len(result))
for host_list in result:
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid])
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
# Now set max_attempts to a number larger than the available hosts. It
@ -99,15 +111,15 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
attempts = len(hosts) + 1
expected = len(hosts)
self.flags(max_attempts=attempts, group="scheduler")
result = self.driver._schedule(self.context, "compute", spec_obj,
[spec_obj.instance_uuid])
self.assertEqual(1, len(result))
for host_list in result:
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid])
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_schedule_success_multiple_instances(self, mock_hosts_up):
hosts = ["host%s" % i for i in range(20)]
hosts = _generate_fake_hosts(20)
mock_hosts_up.return_value = hosts
num_instances = 4
spec_obj = objects.RequestSpec(num_instances=num_instances,
@ -118,10 +130,10 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
# Set the max_attempts to 2
attempts = 2
self.flags(max_attempts=attempts, group="scheduler")
result = self.driver._schedule(self.context, "compute", spec_obj,
instance_uuids)
self.assertEqual(num_instances, len(result))
for host_list in result:
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, instance_uuids)
self.assertEqual(num_instances, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(attempts, len(host_list))
# Verify that none of the selected hosts appear as alternates
# Set the max_attempts to 5 to get 4 alternates per instance

View File

@ -17,6 +17,7 @@ Tests For Filter Scheduler.
"""
import mock
from oslo_serialization import jsonutils
from nova import exception
from nova import objects
@ -31,6 +32,26 @@ from nova.tests.unit.scheduler import test_scheduler
from nova.tests import uuidsentinel as uuids
fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0)
fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2,
"numa_topology": fake_numa_limit}
fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit)
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.compute_node},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc_version = "1.23"
json_alloc = jsonutils.dumps(fake_alloc)
fake_selection = objects.Selection(service_host="fake_host",
nodename="fake_node", compute_node_uuid=uuids.compute_node,
cell_uuid=uuids.cell, limits=fake_limit_obj,
allocation_request=json_alloc,
allocation_request_version=fake_alloc_version)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
@ -67,24 +88,26 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell)
host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
limits={})
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
instance_uuids = None
instance_uuids = [uuids.instance]
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj,
instance_uuids, None, mock.sentinel.provider_summaries)
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
None, mock.sentinel.provider_summaries)
expected_hosts = [[objects.Selection.from_host_state(host_state)]]
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
self.assertEqual(len(selected_hosts), 1)
self.assertEqual([[host_state]], selected_hosts)
self.assertEqual(expected_hosts, selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
@ -116,7 +139,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host=mock.sentinel.host, uuid=uuids.cn1)
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
limits={}, cell_uuid=uuids.cell)
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
@ -133,7 +157,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
self.assertEqual(len(selected_hosts), 1)
self.assertEqual([[host_state]], selected_hosts)
expected_host = objects.Selection.from_host_state(host_state)
self.assertEqual([[expected_host]], selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
@ -159,33 +184,64 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
all_host_states = [host_state]
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
host_state3 = mock.Mock(spec=host_manager.HostState,
host="fake_host3", nodename="fake_node3", uuid=uuids.cn3,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state1, host_state2, host_state3]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance]
alloc_reqs_by_rp_uuid = {
uuids.cn1: [mock.sentinel.alloc_req],
}
fake_alloc1 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc3 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn3},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc1],
uuids.cn2: [fake_alloc2], uuids.cn3: [fake_alloc3]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)
sel1 = objects.Selection.from_host_state(host_state1)
sel2 = objects.Selection.from_host_state(host_state2,
allocation_request=fake_alloc2)
sel3 = objects.Selection.from_host_state(host_state3,
allocation_request=fake_alloc3)
expected_selection = [[sel1, sel2, sel3]]
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called()
mock_claim.assert_called_once_with(ctx.elevated.return_value, spec_obj,
uuids.instance, [mock.sentinel.alloc_req])
uuids.instance, alloc_reqs_by_rp_uuid[uuids.cn1],
allocation_request_version=None)
self.assertEqual(len(selected_hosts), 1)
self.assertEqual([[host_state]], selected_hosts)
self.assertEqual(expected_selection, selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
host_state1.consume_from_request.assert_called_once_with(spec_obj)
def test_schedule_successful_claim(self):
self._test_schedule_successful_claim()
@ -229,22 +285,24 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_uuids = [uuids.instance]
alloc_reqs_by_rp_uuid = {
uuids.cn1: [mock.sentinel.alloc_req],
uuids.cn1: {"allocations": [mock.sentinel.alloc_req]},
}
ctx = mock.Mock()
fake_version = "1.99"
self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx,
spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries)
mock.sentinel.provider_summaries,
allocation_request_version=fake_version)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value, spec_obj,
uuids.instance, [mock.sentinel.alloc_req])
uuids.instance, alloc_reqs_by_rp_uuid[uuids.cn1],
allocation_request_version=fake_version)
mock_cleanup.assert_not_called()
# Ensure that we have consumed the resources on the chosen host states
self.assertFalse(host_state.consume_from_request.called)
@ -272,7 +330,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
cell_uuid=uuids.cell1, limits={}, updated='fake')
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.side_effect = [
@ -283,9 +342,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
alloc_reqs_by_rp_uuid = {
uuids.cn1: [mock.sentinel.alloc_req],
}
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]}
ctx = mock.Mock()
self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx,
spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
@ -319,16 +382,18 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance_group=ig)
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
uuid=uuids.cn1, cell_uuid=uuids.cell1)
nodename="node1", limits={}, uuid=uuids.cn1,
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
uuid=uuids.cn2, cell_uuid=uuids.cell2)
nodename="node2", limits={}, uuid=uuids.cn2,
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
alloc_reqs_by_rp_uuid = {
uuids.cn1: [mock.sentinel.alloc_req_cn1],
uuids.cn2: [mock.sentinel.alloc_req_cn2],
uuids.cn1: {"allocations": ["fake_cn1_alloc"]},
uuids.cn2: {"allocations": ["fake_cn2_alloc"]},
}
# Simulate host 1 and host 2 being randomly returned first by
@ -346,10 +411,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# Check that we called _claim_resources() for both the first and second
# host state
claim_calls = [
mock.call(ctx.elevated.return_value, spec_obj,
uuids.instance0, [mock.sentinel.alloc_req_cn2]),
mock.call(ctx.elevated.return_value, spec_obj,
uuids.instance1, [mock.sentinel.alloc_req_cn1]),
mock.call(ctx.elevated.return_value, spec_obj, uuids.instance0,
alloc_reqs_by_rp_uuid[uuids.cn2],
allocation_request_version=None),
mock.call(ctx.elevated.return_value, spec_obj, uuids.instance1,
alloc_reqs_by_rp_uuid[uuids.cn1],
allocation_request_version=None),
]
mock_claim.assert_has_calls(claim_calls)
@ -535,7 +602,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
pc = self.placement_client
pc.claim_resources.return_value = True
pc.claim_resources.assert_called_once_with(uuids.instance,
mock.sentinel.alloc_req, uuids.project_id, uuids.user_id)
mock.sentinel.alloc_req, uuids.project_id, uuids.user_id,
allocation_request_version=None)
self.assertTrue(res)
@mock.patch('nova.scheduler.utils.request_is_rebuild')
@ -591,17 +659,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
project_id=uuids.project_id,
num_instances=1)
mock_schedule.return_value = [[mock.sentinel.hs1]]
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
self.assertEqual([mock.sentinel.hs1], dests)
mock.sentinel.p_sums, mock.sentinel.ar_version)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
@ -621,19 +687,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
project_id=uuids.project_id,
num_instances=2)
host_state = mock.Mock(spec=host_manager.HostState,
cell_uuid=uuids.cell)
mock_schedule.return_value = [[host_state]]
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
self.assertEqual([host_state], dests)
mock.sentinel.p_sums, mock.sentinel.ar_version)
self.assertEqual([[fake_selection]], dests)
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_claim_resources", return_value=True)
@ -689,7 +751,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = {}
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
@ -755,7 +817,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
cell_uuid)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = {}
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
# There are two instances so _get_sorted_hosts is called once per
@ -809,7 +871,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = {}
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
@ -852,7 +914,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
@mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
def test_select_destinations_notifications(self, mock_schedule):
mock_schedule.return_value = [[mock.Mock()]]
mock_schedule.return_value = ([[mock.Mock()]], [[mock.Mock()]])
with mock.patch.object(self.driver.notifier, 'info') as mock_info:
expected = {'num_instances': 1,

View File

@ -110,7 +110,8 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
def test_select_destination(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
fake_version = "9.42"
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums, fake_version)
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
@ -122,7 +123,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock.sentinel.p_sums, fake_version)
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -147,7 +148,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
report client's get_allocation_candidates() returns None, None as it
would if placement service hasn't been upgraded before scheduler.
"""
place_res = (None, None)
place_res = (None, None, None)
self._test_select_destination(place_res)
def test_select_destination_placement_connect_fails(self):
@ -165,7 +166,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
would if placement service hasn't yet had compute nodes populate
inventory.
"""
place_res = ([], {})
place_res = ([], {}, None)
self._test_select_destination(place_res)
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
@ -173,7 +174,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
'get_allocation_candidates')
def test_select_destination_with_4_3_client(self, mock_get_ac, mock_rfrs):
fake_spec = objects.RequestSpec()
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums, "42.0")
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
@ -183,7 +184,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
) as select_destinations:
self.manager.select_destinations(None, spec_obj=fake_spec)
select_destinations.assert_called_once_with(None, fake_spec, None,
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums)
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0")
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
# TODO(sbauza): Remove that test once the API v4 is removed
@ -196,7 +197,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
fake_spec = objects.RequestSpec()
fake_spec.instance_uuid = uuids.instance
from_primitives.return_value = fake_spec
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums)
place_res = (fakes.ALLOC_REQS, mock.sentinel.p_sums, "42.0")
mock_get_ac.return_value = place_res
expected_alloc_reqs_by_rp_uuid = {
cn.uuid: [fakes.ALLOC_REQS[x]]
@ -209,7 +210,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
instance_uuids=[fake_spec.instance_uuid])
select_destinations.assert_called_once_with(None, fake_spec,
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums)
mock.sentinel.p_sums, "42.0")
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
def test_update_aggregates(self):
@ -266,36 +267,57 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
cell_mapping=cm2)]
self.manager._discover_hosts_in_cells(mock.sentinel.context)
def test_host_state_obj_to_dict_numa_topology_limits_conversion(self):
"""Tests that _host_state_obj_to_dict properly converts a
NUMATopologyLimits object in the HostState.limits if found and
that other unexpected objects aren't converted.
def test_selection_obj_to_dict(self):
"""Tests that _selection_obj_to_dict() method properly converts a
Selection object to the corresponding dict.
"""
host_state = host_manager.HostState(
'fake-host', 'fake-node', uuids.cell_uuid)
# The NUMATopologyFilter sets host_state.limits['numa_topology'] to
# a NUMATopologyLimits object which is what we want to verify gets
# converted to a primitive in _host_state_obj_to_dict.
numa_limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=CONF.cpu_allocation_ratio,
ram_allocation_ratio=CONF.ram_allocation_ratio)
host_state.limits['numa_topology'] = numa_limits
# Set some other unexpected object to assert we don't convert it.
ignored_limits = objects.SchedulerLimits()
host_state.limits['ignored'] = ignored_limits
result = manager._host_state_obj_to_dict(host_state)
fake_numa_limit = objects.numa.NUMATopologyLimits(
cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0)
fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2,
"numa_topology": fake_numa_limit}
fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit)
sel_obj = objects.Selection(service_host="fakehost",
nodename="fakenode", compute_node_uuid=uuids.host,
cell_uuid=uuids.cell, limits=fake_limit_obj,
allocation_request="fake", allocation_request_version="99.9")
expected = {
'host': 'fake-host',
'nodename': 'fake-node',
'limits': {
'numa_topology': numa_limits.obj_to_primitive(),
'ignored': ignored_limits
}
}
'host': 'fakehost',
'nodename': 'fakenode',
'limits': {
'disk_gb': 100,
'memory_mb': 1024,
'numa_topology': {
'nova_object.changes': [
'cpu_allocation_ratio',
'ram_allocation_ratio'],
'nova_object.data': {
'cpu_allocation_ratio': 1.0,
'ram_allocation_ratio': 1.0},
'nova_object.name': 'NUMATopologyLimits',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0'}}}
result = manager._selection_obj_to_dict(sel_obj)
self.assertDictEqual(expected, result)
def test_selection_obj_to_dict_no_numa(self):
"""Tests that _selection_obj_to_dict() method properly converts a
Selection object to the corresponding dict when the numa_topology field
is None.
"""
fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2,
"numa_topology": None}
fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit)
sel_obj = objects.Selection(service_host="fakehost",
nodename="fakenode", compute_node_uuid=uuids.host,
cell_uuid=uuids.cell, limits=fake_limit_obj,
allocation_request="fake", allocation_request_version="99.9")
expected = {"host": "fakehost",
"nodename": "fakenode",
"limits": {
"disk_gb": 100,
"memory_mb": 1024}}
result = manager._selection_obj_to_dict(sel_obj)
self.assertDictEqual(expected, result)
# Make sure the original limits weren't changed.
self.assertIsInstance(host_state.limits['numa_topology'],
objects.NUMATopologyLimits)
class SchedulerInitTestCase(test.NoDBTestCase):