From 665299949494d78d88ff8c22caa2a4f3978c054b Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 29 Jun 2021 12:31:22 +0100 Subject: [PATCH] scheduler: Merge driver into manager There's only one driver now, which means there isn't really a driver at all. Move the code into the manager altogether and avoid a useless layer of abstraction. Change-Id: I609df5b707e05ea70c8a738701423ca751682575 Signed-off-by: Stephen Finucane --- .../testing/eventlet-profiling.rst | 27 +- nova/scheduler/driver.py | 505 ------- nova/scheduler/manager.py | 572 +++++++- .../functional/libvirt/test_numa_servers.py | 2 +- .../libvirt/test_pci_sriov_servers.py | 2 +- .../regressions/test_bug_1781710.py | 8 +- nova/tests/functional/test_servers.py | 2 +- nova/tests/unit/scheduler/test_driver.py | 1126 ---------------- nova/tests/unit/scheduler/test_manager.py | 1173 ++++++++++++++++- 9 files changed, 1670 insertions(+), 1747 deletions(-) delete mode 100644 nova/scheduler/driver.py delete mode 100644 nova/tests/unit/scheduler/test_driver.py diff --git a/doc/source/contributor/testing/eventlet-profiling.rst b/doc/source/contributor/testing/eventlet-profiling.rst index 460778aea298..96c58ba9112f 100644 --- a/doc/source/contributor/testing/eventlet-profiling.rst +++ b/doc/source/contributor/testing/eventlet-profiling.rst @@ -100,20 +100,20 @@ profiling and benchmarking scenarios so not all changes are relevant here): [notifications] notification_format = unversioned -Change the code in ``nova/scheduler/driver.py`` as follows to start the -profiler at the start of ``select_destinations`` call and to dump the +Change the code in ``nova/scheduler/manager.py`` as follows to start the +profiler at the start of the ``_select_destinations`` call and to dump the statistics at the end. For example: .. code-block:: diff - diff --git nova/scheduler/driver.py nova/scheduler/driver.py - index 555236e8a1..efa84b5a47 100644 - --- nova/scheduler/driver.py - +++ nova/scheduler/driver.py - @@ -95,6 +95,10 @@ class SchedulerDriver: - determined by the configuration option - `CONF.scheduler.max_attempts`. - """ + diff --git nova/scheduler/manager.py nova/scheduler/manager.py + index 9cee6b3bfc..4859b21fb1 100644 + --- nova/scheduler/manager.py + +++ nova/scheduler/manager.py + @@ -237,6 +237,10 @@ class SchedulerManager(manager.Manager): + alloc_reqs_by_rp_uuid, provider_summaries, + allocation_request_version=None, return_alternates=False, + ): + from eventlet.green import profile + pr = profile.Profile() + pr.start() @@ -121,15 +121,14 @@ statistics at the end. For example: self.notifier.info( context, 'scheduler.select_destinations.start', {'request_spec': spec_obj.to_legacy_request_spec_dict()}) - @@ -114,6 +118,10 @@ class SchedulerDriver: - context=context, request_spec=spec_obj, + @@ -260,6 +264,9 @@ class SchedulerManager(manager.Manager): action=fields_obj.NotificationAction.SELECT_DESTINATIONS, phase=fields_obj.NotificationPhase.END) - + + + pr.stop() + pr.dump_stats('/tmp/select_destinations/%s.prof' % ':'.join(instance_uuids)) + - return host_selections + return selections def _schedule( diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py deleted file mode 100644 index cc1df85bf18a..000000000000 --- a/nova/scheduler/driver.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright (c) 2010 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Driver for the nova-scheduler service. - -You can customize this scheduler by specifying your own host filters and -weighers. -""" - -import random - -from oslo_log import log as logging - -from nova.compute import utils as compute_utils -import nova.conf -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import fields as fields_obj -from nova import rpc -from nova.scheduler.client import report -from nova.scheduler import host_manager -from nova.scheduler import utils -from nova import servicegroup - -CONF = nova.conf.CONF -LOG = logging.getLogger(__name__) - - -class SchedulerDriver: - """The scheduler driver. - - Filters and weighs compute hosts to determine the best host to schedule an - instance to. - """ - - def __init__(self): - self.host_manager = host_manager.HostManager() - self.servicegroup_api = servicegroup.API() - self.notifier = rpc.get_notifier('scheduler') - self.placement_client = report.SchedulerReportClient() - - def select_destinations( - self, context, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, provider_summaries, - allocation_request_version=None, return_alternates=False, - ): - """Returns a list of lists of Selection objects that have been chosen - by the scheduler driver, one for each requested instance. - - :param context: The RequestContext object - :param spec_obj: The RequestSpec object - :param instance_uuids: List of UUIDs, one for each value of the spec - object's num_instances attribute - :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider - UUID, of the allocation_requests that may be used to claim - resources against matched hosts. If None, indicates either the - placement API wasn't reachable or that there were no - allocation_requests returned by the placement API. If the latter, - the provider_summaries will be an empty dict, not None. - :param provider_summaries: Optional dict, keyed by resource provider - UUID, of information that will be used by the filters/weighers in - selecting matching hosts for a request. If None, indicates that the - scheduler driver should grab all compute node information locally - and that the Placement API is not used. If an empty dict, indicates - the Placement API returned no potential matches for the requested - resources. - :param allocation_request_version: The microversion used to request the - allocations. - :param return_alternates: When True, zero or more alternate hosts are - returned with each selected host. The number of alternates is - determined by the configuration option - `CONF.scheduler.max_attempts`. - """ - self.notifier.info( - context, 'scheduler.select_destinations.start', - {'request_spec': spec_obj.to_legacy_request_spec_dict()}) - compute_utils.notify_about_scheduler_action( - context=context, request_spec=spec_obj, - action=fields_obj.NotificationAction.SELECT_DESTINATIONS, - phase=fields_obj.NotificationPhase.START) - - host_selections = self._schedule( - context, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, provider_summaries, - allocation_request_version, return_alternates) - self.notifier.info( - context, 'scheduler.select_destinations.end', - {'request_spec': spec_obj.to_legacy_request_spec_dict()}) - compute_utils.notify_about_scheduler_action( - context=context, request_spec=spec_obj, - action=fields_obj.NotificationAction.SELECT_DESTINATIONS, - phase=fields_obj.NotificationPhase.END) - return host_selections - - def _schedule( - self, context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, - provider_summaries, allocation_request_version=None, - return_alternates=False - ): - """Returns a list of lists of Selection objects. - - :param context: The RequestContext object - :param spec_obj: The RequestSpec object - :param instance_uuids: List of instance UUIDs to place or move. - :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider - UUID, of the allocation_requests that may be used to claim - resources against matched hosts. If None, indicates either the - placement API wasn't reachable or that there were no - allocation_requests returned by the placement API. If the latter, - the provider_summaries will be an empty dict, not None. - :param provider_summaries: Optional dict, keyed by resource provider - UUID, of information that will be used by the filters/weighers in - selecting matching hosts for a request. If None, indicates that the - scheduler driver should grab all compute node information locally - and that the Placement API is not used. If an empty dict, indicates - the Placement API returned no potential matches for the requested - resources. - :param allocation_request_version: The microversion used to request the - allocations. - :param return_alternates: When True, zero or more alternate hosts are - returned with each selected host. The number of alternates is - determined by the configuration option - `CONF.scheduler.max_attempts`. - """ - elevated = context.elevated() - - # Find our local list of acceptable hosts by repeatedly - # filtering and weighing our options. Each time we choose a - # host, we virtually consume resources on it so subsequent - # selections can adjust accordingly. - - # Note: remember, we are using a generator-iterator here. So only - # traverse this list once. This can bite you if the hosts - # are being scanned in a filter or weighing function. - hosts = self._get_all_host_states( - elevated, spec_obj, provider_summaries) - - # NOTE(sbauza): The RequestSpec.num_instances field contains the number - # of instances created when the RequestSpec was used to first boot some - # instances. This is incorrect when doing a move or resize operation, - # so prefer the length of instance_uuids unless it is None. - num_instances = (len(instance_uuids) if instance_uuids - else spec_obj.num_instances) - - # For each requested instance, we want to return a host whose resources - # for the instance have been claimed, along with zero or more - # alternates. These alternates will be passed to the cell that the - # selected host is in, so that if for some reason the build fails, the - # cell conductor can retry building the instance on one of these - # alternates instead of having to simply fail. The number of alternates - # is based on CONF.scheduler.max_attempts; note that if there are not - # enough filtered hosts to provide the full number of alternates, the - # list of hosts may be shorter than this amount. - num_alts = CONF.scheduler.max_attempts - 1 if return_alternates else 0 - - if instance_uuids is None or alloc_reqs_by_rp_uuid is None: - # If there was a problem communicating with the - # placement API, alloc_reqs_by_rp_uuid will be None, so we skip - # claiming in that case as well. In the case where instance_uuids - # is None, that indicates an older conductor, so we need to return - # the objects without alternates. They will be converted back to - # the older dict format representing HostState objects. - # TODO(stephenfin): Remove this when we bump scheduler the RPC API - # version to 5.0 - return self._legacy_find_hosts( - context, num_instances, spec_obj, hosts, num_alts, - instance_uuids=instance_uuids) - - # A list of the instance UUIDs that were successfully claimed against - # in the placement API. If we are not able to successfully claim for - # all involved instances, we use this list to remove those allocations - # before returning - claimed_instance_uuids = [] - - # The list of hosts that have been selected (and claimed). - claimed_hosts = [] - - for num, instance_uuid in enumerate(instance_uuids): - # In a multi-create request, the first request spec from the list - # is passed to the scheduler and that request spec's instance_uuid - # might not be the same as the instance we're processing, so we - # update the instance_uuid in that case before passing the request - # spec to filters since at least one filter - # (ServerGroupAntiAffinityFilter) depends on that information being - # accurate. - spec_obj.instance_uuid = instance_uuid - # Reset the field so it's not persisted accidentally. - spec_obj.obj_reset_changes(['instance_uuid']) - - hosts = self._get_sorted_hosts(spec_obj, hosts, num) - if not hosts: - # NOTE(jaypipes): If we get here, that means not all instances - # in instance_uuids were able to be matched to a selected host. - # Any allocations will be cleaned up in the - # _ensure_sufficient_hosts() call. - break - - # Attempt to claim the resources against one or more resource - # providers, looping over the sorted list of possible hosts - # looking for an allocation_request that contains that host's - # resource provider UUID - claimed_host = None - for host in hosts: - cn_uuid = host.uuid - if cn_uuid not in alloc_reqs_by_rp_uuid: - msg = ("A host state with uuid = '%s' that did not have a " - "matching allocation_request was encountered while " - "scheduling. This host was skipped.") - LOG.debug(msg, cn_uuid) - continue - - alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid] - # TODO(jaypipes): Loop through all allocation_requests instead - # of just trying the first one. For now, since we'll likely - # want to order the allocation_requests in the future based on - # information in the provider summaries, we'll just try to - # claim resources using the first allocation_request - alloc_req = alloc_reqs[0] - if utils.claim_resources( - elevated, self.placement_client, spec_obj, instance_uuid, - alloc_req, - allocation_request_version=allocation_request_version, - ): - claimed_host = host - break - - if claimed_host is None: - # We weren't able to claim resources in the placement API - # for any of the sorted hosts identified. So, clean up any - # successfully-claimed resources for prior instances in - # this request and return an empty list which will cause - # select_destinations() to raise NoValidHost - LOG.debug("Unable to successfully claim against any host.") - break - - claimed_instance_uuids.append(instance_uuid) - claimed_hosts.append(claimed_host) - - # Now consume the resources so the filter/weights will change for - # the next instance. - self._consume_selected_host( - claimed_host, spec_obj, instance_uuid=instance_uuid) - - # Check if we were able to fulfill the request. If not, this call will - # raise a NoValidHost exception. - self._ensure_sufficient_hosts( - context, claimed_hosts, num_instances, claimed_instance_uuids) - - # We have selected and claimed hosts for each instance. Now we need to - # find alternates for each host. - return self._get_alternate_hosts( - claimed_hosts, spec_obj, hosts, num, num_alts, - alloc_reqs_by_rp_uuid, allocation_request_version) - - def _ensure_sufficient_hosts( - self, context, hosts, required_count, claimed_uuids=None, - ): - """Checks that we have selected a host for each requested instance. If - not, log this failure, remove allocations for any claimed instances, - and raise a NoValidHost exception. - """ - if len(hosts) == required_count: - # We have enough hosts. - return - - if claimed_uuids: - self._cleanup_allocations(context, claimed_uuids) - - # NOTE(Rui Chen): If multiple creates failed, set the updated time - # of selected HostState to None so that these HostStates are - # refreshed according to database in next schedule, and release - # the resource consumed by instance in the process of selecting - # host. - for host in hosts: - host.updated = None - - # Log the details but don't put those into the reason since - # we don't want to give away too much information about our - # actual environment. - LOG.debug( - 'There are %(hosts)d hosts available but ' - '%(required_count)d instances requested to build.', - {'hosts': len(hosts), 'required_count': required_count}) - reason = _('There are not enough hosts available.') - raise exception.NoValidHost(reason=reason) - - def _cleanup_allocations(self, context, instance_uuids): - """Removes allocations for the supplied instance UUIDs.""" - if not instance_uuids: - return - - LOG.debug("Cleaning up allocations for %s", instance_uuids) - for uuid in instance_uuids: - self.placement_client.delete_allocation_for_instance(context, uuid) - - def _legacy_find_hosts( - self, context, num_instances, spec_obj, hosts, num_alts, - instance_uuids=None, - ): - """Find hosts without invoking placement. - - We may not be able to claim if the Placement service is not reachable. - Additionally, we may be working with older conductors that don't pass - in instance_uuids. - """ - # The list of hosts selected for each instance - selected_hosts = [] - - for num in range(num_instances): - instance_uuid = instance_uuids[num] if instance_uuids else None - if instance_uuid: - # Update the RequestSpec.instance_uuid before sending it to - # the filters in case we're doing a multi-create request, but - # don't persist the change. - spec_obj.instance_uuid = instance_uuid - spec_obj.obj_reset_changes(['instance_uuid']) - - hosts = self._get_sorted_hosts(spec_obj, hosts, num) - if not hosts: - # No hosts left, so break here, and the - # _ensure_sufficient_hosts() call below will handle this. - break - - selected_host = hosts[0] - selected_hosts.append(selected_host) - self._consume_selected_host( - selected_host, spec_obj, instance_uuid=instance_uuid) - - # Check if we were able to fulfill the request. If not, this call will - # raise a NoValidHost exception. - self._ensure_sufficient_hosts(context, selected_hosts, num_instances) - - # This the overall list of values to be returned. There will be one - # item per instance, and each item will be a list of Selection objects - # representing the selected host along with zero or more alternates - # from the same cell. - return self._get_alternate_hosts( - selected_hosts, spec_obj, hosts, num, num_alts) - - @staticmethod - def _consume_selected_host(selected_host, spec_obj, instance_uuid=None): - LOG.debug( - "Selected host: %(host)s", {'host': selected_host}, - instance_uuid=instance_uuid) - selected_host.consume_from_request(spec_obj) - # If we have a server group, add the selected host to it for the - # (anti-)affinity filters to filter out hosts for subsequent instances - # in a multi-create request. - if spec_obj.instance_group is not None: - spec_obj.instance_group.hosts.append(selected_host.host) - # hosts has to be not part of the updates when saving - spec_obj.instance_group.obj_reset_changes(['hosts']) - # The ServerGroupAntiAffinityFilter also relies on - # HostState.instances being accurate within a multi-create request. - if instance_uuid and instance_uuid not in selected_host.instances: - # Set a stub since ServerGroupAntiAffinityFilter only cares - # about the keys. - selected_host.instances[instance_uuid] = objects.Instance( - uuid=instance_uuid) - - def _get_alternate_hosts( - self, selected_hosts, spec_obj, hosts, index, num_alts, - alloc_reqs_by_rp_uuid=None, allocation_request_version=None, - ): - # We only need to filter/weigh the hosts again if we're dealing with - # more than one instance and are going to be picking alternates. - if index > 0 and num_alts > 0: - # The selected_hosts have all had resources 'claimed' via - # _consume_selected_host, so we need to filter/weigh and sort the - # hosts again to get an accurate count for alternates. - hosts = self._get_sorted_hosts(spec_obj, hosts, index) - - # This is the overall list of values to be returned. There will be one - # item per instance, and each item will be a list of Selection objects - # representing the selected host along with alternates from the same - # cell. - selections_to_return = [] - for selected_host in selected_hosts: - # This is the list of hosts for one particular instance. - if alloc_reqs_by_rp_uuid: - selected_alloc_req = alloc_reqs_by_rp_uuid.get( - selected_host.uuid)[0] - else: - selected_alloc_req = None - - selection = objects.Selection.from_host_state( - selected_host, allocation_request=selected_alloc_req, - allocation_request_version=allocation_request_version) - selected_plus_alts = [selection] - cell_uuid = selected_host.cell_uuid - - # This will populate the alternates with many of the same unclaimed - # hosts. This is OK, as it should be rare for a build to fail. And - # if there are not enough hosts to fully populate the alternates, - # it's fine to return fewer than we'd like. Note that we exclude - # any claimed host from consideration as an alternate because it - # will have had its resources reduced and will have a much lower - # chance of being able to fit another instance on it. - for host in hosts: - if len(selected_plus_alts) >= num_alts + 1: - break - - if host.cell_uuid == cell_uuid and host not in selected_hosts: - if alloc_reqs_by_rp_uuid is not None: - alt_uuid = host.uuid - if alt_uuid not in alloc_reqs_by_rp_uuid: - msg = ("A host state with uuid = '%s' that did " - "not have a matching allocation_request " - "was encountered while scheduling. This " - "host was skipped.") - LOG.debug(msg, alt_uuid) - continue - - # TODO(jaypipes): Loop through all allocation_requests - # instead of just trying the first one. For now, since - # we'll likely want to order the allocation_requests in - # the future based on information in the provider - # summaries, we'll just try to claim resources using - # the first allocation_request - alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0] - alt_selection = objects.Selection.from_host_state( - host, alloc_req, allocation_request_version) - else: - alt_selection = objects.Selection.from_host_state(host) - selected_plus_alts.append(alt_selection) - - selections_to_return.append(selected_plus_alts) - - return selections_to_return - - def _get_sorted_hosts(self, spec_obj, host_states, index): - """Returns a list of HostState objects that match the required - scheduling constraints for the request spec object and have been sorted - according to the weighers. - """ - filtered_hosts = self.host_manager.get_filtered_hosts(host_states, - spec_obj, index) - - LOG.debug("Filtered %(hosts)s", {'hosts': filtered_hosts}) - - if not filtered_hosts: - return [] - - weighed_hosts = self.host_manager.get_weighed_hosts( - filtered_hosts, spec_obj) - if CONF.filter_scheduler.shuffle_best_same_weighed_hosts: - # NOTE(pas-ha) Randomize best hosts, relying on weighed_hosts - # being already sorted by weight in descending order. - # This decreases possible contention and rescheduling attempts - # when there is a large number of hosts having the same best - # weight, especially so when host_subset_size is 1 (default) - best_hosts = [ - w for w in weighed_hosts - if w.weight == weighed_hosts[0].weight - ] - random.shuffle(best_hosts) - weighed_hosts = best_hosts + weighed_hosts[len(best_hosts):] - - # Log the weighed hosts before stripping off the wrapper class so that - # the weight value gets logged. - LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts}) - # Strip off the WeighedHost wrapper class... - weighed_hosts = [h.obj for h in weighed_hosts] - - # We randomize the first element in the returned list to alleviate - # congestion where the same host is consistently selected among - # numerous potential hosts for similar request specs. - host_subset_size = CONF.filter_scheduler.host_subset_size - if host_subset_size < len(weighed_hosts): - weighed_subset = weighed_hosts[0:host_subset_size] - else: - weighed_subset = weighed_hosts - - chosen_host = random.choice(weighed_subset) - weighed_hosts.remove(chosen_host) - return [chosen_host] + weighed_hosts - - def _get_all_host_states(self, context, spec_obj, provider_summaries): - """Template method, so a subclass can implement caching.""" - # The provider_summaries variable will be an empty dict when the - # Placement API found no providers that match the requested - # constraints, which in turn makes compute_uuids an empty list and - # get_host_states_by_uuids will return an empty generator-iterator - # also, which will eventually result in a NoValidHost error. - compute_uuids = None - if provider_summaries is not None: - compute_uuids = list(provider_summaries.keys()) - return self.host_manager.get_host_states_by_uuids( - context, compute_uuids, spec_obj) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index fa2a96e4cee9..9cee6b3bfcd8 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -20,27 +20,31 @@ Scheduler Service """ import collections +import random from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import periodic_task +from nova.compute import utils as compute_utils import nova.conf from nova import exception +from nova.i18n import _ from nova import manager from nova import objects +from nova.objects import fields as fields_obj from nova.objects import host_mapping as host_mapping_obj from nova import quota +from nova import rpc from nova.scheduler.client import report -from nova.scheduler import driver +from nova.scheduler import host_manager from nova.scheduler import request_filter from nova.scheduler import utils - - -LOG = logging.getLogger(__name__) +from nova import servicegroup CONF = nova.conf.CONF +LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS @@ -48,19 +52,23 @@ HOST_MAPPING_EXISTS_WARNING = False class SchedulerManager(manager.Manager): - """Chooses a host to run instances on.""" + """Chooses a host to run instances on. + + Filters and weighs compute hosts to determine the best host to schedule an + instance to. + """ target = messaging.Target(version='4.5') _sentinel = object() def __init__(self, *args, **kwargs): + self.host_manager = host_manager.HostManager() + self.servicegroup_api = servicegroup.API() + self.notifier = rpc.get_notifier('scheduler') self.placement_client = report.SchedulerReportClient() - self.driver = driver.SchedulerDriver() - super(SchedulerManager, self).__init__( - service_name='scheduler', *args, **kwargs - ) + super().__init__(service_name='scheduler', *args, **kwargs) @periodic_task.periodic_task( spacing=CONF.scheduler.discover_hosts_in_cells_interval, @@ -70,15 +78,22 @@ class SchedulerManager(manager.Manager): try: host_mappings = host_mapping_obj.discover_hosts(context) if host_mappings: - LOG.info('Discovered %(count)i new hosts: %(hosts)s', - {'count': len(host_mappings), - 'hosts': ','.join(['%s:%s' % (hm.cell_mapping.name, - hm.host) - for hm in host_mappings])}) + LOG.info( + 'Discovered %(count)i new hosts: %(hosts)s', + { + 'count': len(host_mappings), + 'hosts': ','.join([ + '%s:%s' % (hm.cell_mapping.name, hm.host) + for hm in host_mappings + ]), + }, + ) except exception.HostMappingExists as exp: - msg = ('This periodic task should only be enabled on a single ' - 'scheduler to prevent collisions between multiple ' - 'schedulers: %s' % str(exp)) + msg = ( + 'This periodic task should only be enabled on a single ' + 'scheduler to prevent collisions between multiple ' + 'schedulers: %s' % str(exp) + ) if not HOST_MAPPING_EXISTS_WARNING: LOG.warning(msg) HOST_MAPPING_EXISTS_WARNING = True @@ -93,12 +108,14 @@ class SchedulerManager(manager.Manager): # NOTE(mriedem): Similarly there is a host-to-cell cache which should # be reset if a host is deleted from a cell and "discovered" in another # cell. - self.driver.host_manager.refresh_cells_caches() + self.host_manager.refresh_cells_caches() @messaging.expected_exceptions(exception.NoValidHost) - def select_destinations(self, ctxt, request_spec=None, - filter_properties=None, spec_obj=_sentinel, instance_uuids=None, - return_objects=False, return_alternates=False): + def select_destinations( + self, context, request_spec=None, + filter_properties=None, spec_obj=_sentinel, instance_uuids=None, + return_objects=False, return_alternates=False, + ): """Returns destinations(s) best suited for this RequestSpec. Starting in Queens, this method returns a list of lists of Selection @@ -126,24 +143,23 @@ class SchedulerManager(manager.Manager): # TODO(sbauza): Change the method signature to only accept a spec_obj # argument once API v5 is provided. if spec_obj is self._sentinel: - spec_obj = objects.RequestSpec.from_primitives(ctxt, - request_spec, - filter_properties) + spec_obj = objects.RequestSpec.from_primitives( + context, request_spec, filter_properties) is_rebuild = utils.request_is_rebuild(spec_obj) alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \ = None, None, None if not is_rebuild: try: - request_filter.process_reqspec(ctxt, spec_obj) + request_filter.process_reqspec(context, spec_obj) except exception.RequestFilterFailed as e: raise exception.NoValidHost(reason=e.message) resources = utils.resources_from_request_spec( - ctxt, spec_obj, self.driver.host_manager, + context, spec_obj, self.host_manager, enable_pinning_translate=True) - res = self.placement_client.get_allocation_candidates(ctxt, - resources) + res = self.placement_client.get_allocation_candidates( + context, resources) if res is None: # We have to handle the case that we failed to connect to the # Placement service and the safe_connect decorator on @@ -163,15 +179,19 @@ class SchedulerManager(manager.Manager): # weed out hosts that are actually using new style configuration # but simply don't have enough free PCPUs (or any PCPUs). # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set' - if (resources.cpu_pinning_requested and - not CONF.workarounds.disable_fallback_pcpu_query): - LOG.debug('Requesting fallback allocation candidates with ' - 'VCPU instead of PCPU') + if ( + resources.cpu_pinning_requested and + not CONF.workarounds.disable_fallback_pcpu_query + ): + LOG.debug( + 'Requesting fallback allocation candidates with ' + 'VCPU instead of PCPU' + ) resources = utils.resources_from_request_spec( - ctxt, spec_obj, self.driver.host_manager, + context, spec_obj, self.host_manager, enable_pinning_translate=False) res = self.placement_client.get_allocation_candidates( - ctxt, resources) + context, resources) if res: # merge the allocation requests and provider summaries from # the two requests together @@ -181,42 +201,472 @@ class SchedulerManager(manager.Manager): provider_summaries.update(provider_summaries_fallback) if not alloc_reqs: - LOG.info("Got no allocation candidates from the Placement " - "API. This could be due to insufficient resources " - "or a temporary occurrence as compute nodes start " - "up.") + LOG.info( + "Got no allocation candidates from the Placement API. " + "This could be due to insufficient resources or a " + "temporary occurrence as compute nodes start up." + ) raise exception.NoValidHost(reason="") - else: - # Build a dict of lists of allocation requests, keyed by - # provider UUID, so that when we attempt to claim resources for - # a host, we can grab an allocation request easily - alloc_reqs_by_rp_uuid = collections.defaultdict(list) - for ar in alloc_reqs: - for rp_uuid in ar['allocations']: - alloc_reqs_by_rp_uuid[rp_uuid].append(ar) + + # Build a dict of lists of allocation requests, keyed by + # provider UUID, so that when we attempt to claim resources for + # a host, we can grab an allocation request easily + alloc_reqs_by_rp_uuid = collections.defaultdict(list) + for ar in alloc_reqs: + for rp_uuid in ar['allocations']: + alloc_reqs_by_rp_uuid[rp_uuid].append(ar) # Only return alternates if both return_objects and return_alternates # are True. return_alternates = return_alternates and return_objects - selections = self.driver.select_destinations(ctxt, spec_obj, - instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, - allocation_request_version, return_alternates) + + selections = self._select_destinations( + context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, + provider_summaries, allocation_request_version, return_alternates) + # If `return_objects` is False, we need to convert the selections to # the older format, which is a list of host state dicts. if not return_objects: selection_dicts = [sel[0].to_dict() for sel in selections] return jsonutils.to_primitive(selection_dicts) + return selections + def _select_destinations( + self, context, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, provider_summaries, + allocation_request_version=None, return_alternates=False, + ): + self.notifier.info( + context, 'scheduler.select_destinations.start', + {'request_spec': spec_obj.to_legacy_request_spec_dict()}) + compute_utils.notify_about_scheduler_action( + context=context, request_spec=spec_obj, + action=fields_obj.NotificationAction.SELECT_DESTINATIONS, + phase=fields_obj.NotificationPhase.START) + + # Only return alternates if both return_objects and return_alternates + # are True. + selections = self._schedule( + context, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, provider_summaries, + allocation_request_version, return_alternates) + + self.notifier.info( + context, 'scheduler.select_destinations.end', + {'request_spec': spec_obj.to_legacy_request_spec_dict()}) + compute_utils.notify_about_scheduler_action( + context=context, request_spec=spec_obj, + action=fields_obj.NotificationAction.SELECT_DESTINATIONS, + phase=fields_obj.NotificationPhase.END) + + return selections + + def _schedule( + self, context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, + provider_summaries, allocation_request_version=None, + return_alternates=False + ): + """Returns a list of lists of Selection objects. + + :param context: The RequestContext object + :param spec_obj: The RequestSpec object + :param instance_uuids: List of instance UUIDs to place or move. + :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider + UUID, of the allocation_requests that may be used to claim + resources against matched hosts. If None, indicates either the + placement API wasn't reachable or that there were no + allocation_requests returned by the placement API. If the latter, + the provider_summaries will be an empty dict, not None. + :param provider_summaries: Optional dict, keyed by resource provider + UUID, of information that will be used by the filters/weighers in + selecting matching hosts for a request. If None, indicates that + we should grab all compute node information locally + and that the Placement API is not used. If an empty dict, indicates + the Placement API returned no potential matches for the requested + resources. + :param allocation_request_version: The microversion used to request the + allocations. + :param return_alternates: When True, zero or more alternate hosts are + returned with each selected host. The number of alternates is + determined by the configuration option + `CONF.scheduler.max_attempts`. + """ + elevated = context.elevated() + + # Find our local list of acceptable hosts by repeatedly + # filtering and weighing our options. Each time we choose a + # host, we virtually consume resources on it so subsequent + # selections can adjust accordingly. + + # Note: remember, we are using a generator-iterator here. So only + # traverse this list once. This can bite you if the hosts + # are being scanned in a filter or weighing function. + hosts = self._get_all_host_states( + elevated, spec_obj, provider_summaries) + + # NOTE(sbauza): The RequestSpec.num_instances field contains the number + # of instances created when the RequestSpec was used to first boot some + # instances. This is incorrect when doing a move or resize operation, + # so prefer the length of instance_uuids unless it is None. + num_instances = (len(instance_uuids) if instance_uuids + else spec_obj.num_instances) + + # For each requested instance, we want to return a host whose resources + # for the instance have been claimed, along with zero or more + # alternates. These alternates will be passed to the cell that the + # selected host is in, so that if for some reason the build fails, the + # cell conductor can retry building the instance on one of these + # alternates instead of having to simply fail. The number of alternates + # is based on CONF.scheduler.max_attempts; note that if there are not + # enough filtered hosts to provide the full number of alternates, the + # list of hosts may be shorter than this amount. + num_alts = CONF.scheduler.max_attempts - 1 if return_alternates else 0 + + if instance_uuids is None or alloc_reqs_by_rp_uuid is None: + # If there was a problem communicating with the + # placement API, alloc_reqs_by_rp_uuid will be None, so we skip + # claiming in that case as well. In the case where instance_uuids + # is None, that indicates an older conductor, so we need to return + # the objects without alternates. They will be converted back to + # the older dict format representing HostState objects. + # TODO(stephenfin): Remove this when we bump scheduler the RPC API + # version to 5.0 + return self._legacy_find_hosts( + context, num_instances, spec_obj, hosts, num_alts, + instance_uuids=instance_uuids) + + # A list of the instance UUIDs that were successfully claimed against + # in the placement API. If we are not able to successfully claim for + # all involved instances, we use this list to remove those allocations + # before returning + claimed_instance_uuids = [] + + # The list of hosts that have been selected (and claimed). + claimed_hosts = [] + + for num, instance_uuid in enumerate(instance_uuids): + # In a multi-create request, the first request spec from the list + # is passed to the scheduler and that request spec's instance_uuid + # might not be the same as the instance we're processing, so we + # update the instance_uuid in that case before passing the request + # spec to filters since at least one filter + # (ServerGroupAntiAffinityFilter) depends on that information being + # accurate. + spec_obj.instance_uuid = instance_uuid + # Reset the field so it's not persisted accidentally. + spec_obj.obj_reset_changes(['instance_uuid']) + + hosts = self._get_sorted_hosts(spec_obj, hosts, num) + if not hosts: + # NOTE(jaypipes): If we get here, that means not all instances + # in instance_uuids were able to be matched to a selected host. + # Any allocations will be cleaned up in the + # _ensure_sufficient_hosts() call. + break + + # Attempt to claim the resources against one or more resource + # providers, looping over the sorted list of possible hosts + # looking for an allocation_request that contains that host's + # resource provider UUID + claimed_host = None + for host in hosts: + cn_uuid = host.uuid + if cn_uuid not in alloc_reqs_by_rp_uuid: + msg = ("A host state with uuid = '%s' that did not have a " + "matching allocation_request was encountered while " + "scheduling. This host was skipped.") + LOG.debug(msg, cn_uuid) + continue + + alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid] + # TODO(jaypipes): Loop through all allocation_requests instead + # of just trying the first one. For now, since we'll likely + # want to order the allocation_requests in the future based on + # information in the provider summaries, we'll just try to + # claim resources using the first allocation_request + alloc_req = alloc_reqs[0] + if utils.claim_resources( + elevated, self.placement_client, spec_obj, instance_uuid, + alloc_req, + allocation_request_version=allocation_request_version, + ): + claimed_host = host + break + + if claimed_host is None: + # We weren't able to claim resources in the placement API + # for any of the sorted hosts identified. So, clean up any + # successfully-claimed resources for prior instances in + # this request and return an empty list which will cause + # select_destinations() to raise NoValidHost + LOG.debug("Unable to successfully claim against any host.") + break + + claimed_instance_uuids.append(instance_uuid) + claimed_hosts.append(claimed_host) + + # Now consume the resources so the filter/weights will change for + # the next instance. + self._consume_selected_host( + claimed_host, spec_obj, instance_uuid=instance_uuid) + + # Check if we were able to fulfill the request. If not, this call will + # raise a NoValidHost exception. + self._ensure_sufficient_hosts( + context, claimed_hosts, num_instances, claimed_instance_uuids) + + # We have selected and claimed hosts for each instance. Now we need to + # find alternates for each host. + return self._get_alternate_hosts( + claimed_hosts, spec_obj, hosts, num, num_alts, + alloc_reqs_by_rp_uuid, allocation_request_version) + + def _ensure_sufficient_hosts( + self, context, hosts, required_count, claimed_uuids=None, + ): + """Checks that we have selected a host for each requested instance. If + not, log this failure, remove allocations for any claimed instances, + and raise a NoValidHost exception. + """ + if len(hosts) == required_count: + # We have enough hosts. + return + + if claimed_uuids: + self._cleanup_allocations(context, claimed_uuids) + + # NOTE(Rui Chen): If multiple creates failed, set the updated time + # of selected HostState to None so that these HostStates are + # refreshed according to database in next schedule, and release + # the resource consumed by instance in the process of selecting + # host. + for host in hosts: + host.updated = None + + # Log the details but don't put those into the reason since + # we don't want to give away too much information about our + # actual environment. + LOG.debug( + 'There are %(hosts)d hosts available but ' + '%(required_count)d instances requested to build.', + {'hosts': len(hosts), 'required_count': required_count}) + reason = _('There are not enough hosts available.') + raise exception.NoValidHost(reason=reason) + + def _cleanup_allocations(self, context, instance_uuids): + """Removes allocations for the supplied instance UUIDs.""" + if not instance_uuids: + return + + LOG.debug("Cleaning up allocations for %s", instance_uuids) + for uuid in instance_uuids: + self.placement_client.delete_allocation_for_instance(context, uuid) + + def _legacy_find_hosts( + self, context, num_instances, spec_obj, hosts, num_alts, + instance_uuids=None, + ): + """Find hosts without invoking placement. + + We may not be able to claim if the Placement service is not reachable. + Additionally, we may be working with older conductors that don't pass + in instance_uuids. + """ + # The list of hosts selected for each instance + selected_hosts = [] + + for num in range(num_instances): + instance_uuid = instance_uuids[num] if instance_uuids else None + if instance_uuid: + # Update the RequestSpec.instance_uuid before sending it to + # the filters in case we're doing a multi-create request, but + # don't persist the change. + spec_obj.instance_uuid = instance_uuid + spec_obj.obj_reset_changes(['instance_uuid']) + + hosts = self._get_sorted_hosts(spec_obj, hosts, num) + if not hosts: + # No hosts left, so break here, and the + # _ensure_sufficient_hosts() call below will handle this. + break + + selected_host = hosts[0] + selected_hosts.append(selected_host) + self._consume_selected_host( + selected_host, spec_obj, instance_uuid=instance_uuid) + + # Check if we were able to fulfill the request. If not, this call will + # raise a NoValidHost exception. + self._ensure_sufficient_hosts(context, selected_hosts, num_instances) + + # This the overall list of values to be returned. There will be one + # item per instance, and each item will be a list of Selection objects + # representing the selected host along with zero or more alternates + # from the same cell. + return self._get_alternate_hosts( + selected_hosts, spec_obj, hosts, num, num_alts) + + @staticmethod + def _consume_selected_host(selected_host, spec_obj, instance_uuid=None): + LOG.debug( + "Selected host: %(host)s", {'host': selected_host}, + instance_uuid=instance_uuid) + selected_host.consume_from_request(spec_obj) + # If we have a server group, add the selected host to it for the + # (anti-)affinity filters to filter out hosts for subsequent instances + # in a multi-create request. + if spec_obj.instance_group is not None: + spec_obj.instance_group.hosts.append(selected_host.host) + # hosts has to be not part of the updates when saving + spec_obj.instance_group.obj_reset_changes(['hosts']) + # The ServerGroupAntiAffinityFilter also relies on + # HostState.instances being accurate within a multi-create request. + if instance_uuid and instance_uuid not in selected_host.instances: + # Set a stub since ServerGroupAntiAffinityFilter only cares + # about the keys. + selected_host.instances[instance_uuid] = objects.Instance( + uuid=instance_uuid) + + def _get_alternate_hosts( + self, selected_hosts, spec_obj, hosts, index, num_alts, + alloc_reqs_by_rp_uuid=None, allocation_request_version=None, + ): + # We only need to filter/weigh the hosts again if we're dealing with + # more than one instance and are going to be picking alternates. + if index > 0 and num_alts > 0: + # The selected_hosts have all had resources 'claimed' via + # _consume_selected_host, so we need to filter/weigh and sort the + # hosts again to get an accurate count for alternates. + hosts = self._get_sorted_hosts(spec_obj, hosts, index) + + # This is the overall list of values to be returned. There will be one + # item per instance, and each item will be a list of Selection objects + # representing the selected host along with alternates from the same + # cell. + selections_to_return = [] + for selected_host in selected_hosts: + # This is the list of hosts for one particular instance. + if alloc_reqs_by_rp_uuid: + selected_alloc_req = alloc_reqs_by_rp_uuid.get( + selected_host.uuid)[0] + else: + selected_alloc_req = None + + selection = objects.Selection.from_host_state( + selected_host, allocation_request=selected_alloc_req, + allocation_request_version=allocation_request_version) + selected_plus_alts = [selection] + cell_uuid = selected_host.cell_uuid + + # This will populate the alternates with many of the same unclaimed + # hosts. This is OK, as it should be rare for a build to fail. And + # if there are not enough hosts to fully populate the alternates, + # it's fine to return fewer than we'd like. Note that we exclude + # any claimed host from consideration as an alternate because it + # will have had its resources reduced and will have a much lower + # chance of being able to fit another instance on it. + for host in hosts: + if len(selected_plus_alts) >= num_alts + 1: + break + + if host.cell_uuid == cell_uuid and host not in selected_hosts: + if alloc_reqs_by_rp_uuid is not None: + alt_uuid = host.uuid + if alt_uuid not in alloc_reqs_by_rp_uuid: + msg = ("A host state with uuid = '%s' that did " + "not have a matching allocation_request " + "was encountered while scheduling. This " + "host was skipped.") + LOG.debug(msg, alt_uuid) + continue + + # TODO(jaypipes): Loop through all allocation_requests + # instead of just trying the first one. For now, since + # we'll likely want to order the allocation_requests in + # the future based on information in the provider + # summaries, we'll just try to claim resources using + # the first allocation_request + alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0] + alt_selection = objects.Selection.from_host_state( + host, alloc_req, allocation_request_version) + else: + alt_selection = objects.Selection.from_host_state(host) + selected_plus_alts.append(alt_selection) + + selections_to_return.append(selected_plus_alts) + + return selections_to_return + + def _get_sorted_hosts(self, spec_obj, host_states, index): + """Returns a list of HostState objects that match the required + scheduling constraints for the request spec object and have been sorted + according to the weighers. + """ + filtered_hosts = self.host_manager.get_filtered_hosts(host_states, + spec_obj, index) + + LOG.debug("Filtered %(hosts)s", {'hosts': filtered_hosts}) + + if not filtered_hosts: + return [] + + weighed_hosts = self.host_manager.get_weighed_hosts( + filtered_hosts, spec_obj) + if CONF.filter_scheduler.shuffle_best_same_weighed_hosts: + # NOTE(pas-ha) Randomize best hosts, relying on weighed_hosts + # being already sorted by weight in descending order. + # This decreases possible contention and rescheduling attempts + # when there is a large number of hosts having the same best + # weight, especially so when host_subset_size is 1 (default) + best_hosts = [ + w for w in weighed_hosts + if w.weight == weighed_hosts[0].weight + ] + random.shuffle(best_hosts) + weighed_hosts = best_hosts + weighed_hosts[len(best_hosts):] + + # Log the weighed hosts before stripping off the wrapper class so that + # the weight value gets logged. + LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts}) + # Strip off the WeighedHost wrapper class... + weighed_hosts = [h.obj for h in weighed_hosts] + + # We randomize the first element in the returned list to alleviate + # congestion where the same host is consistently selected among + # numerous potential hosts for similar request specs. + host_subset_size = CONF.filter_scheduler.host_subset_size + if host_subset_size < len(weighed_hosts): + weighed_subset = weighed_hosts[0:host_subset_size] + else: + weighed_subset = weighed_hosts + + chosen_host = random.choice(weighed_subset) + weighed_hosts.remove(chosen_host) + return [chosen_host] + weighed_hosts + + def _get_all_host_states(self, context, spec_obj, provider_summaries): + """Template method, so a subclass can implement caching.""" + # The provider_summaries variable will be an empty dict when the + # Placement API found no providers that match the requested + # constraints, which in turn makes compute_uuids an empty list and + # get_host_states_by_uuids will return an empty generator-iterator + # also, which will eventually result in a NoValidHost error. + compute_uuids = None + if provider_summaries is not None: + compute_uuids = list(provider_summaries.keys()) + return self.host_manager.get_host_states_by_uuids( + context, compute_uuids, spec_obj) + def update_aggregates(self, ctxt, aggregates): """Updates HostManager internal aggregates information. :param aggregates: Aggregate(s) to update :type aggregates: :class:`nova.objects.Aggregate` - or :class:`nova.objects.AggregateList` + or :class:`nova.objects.AggregateList` """ # NOTE(sbauza): We're dropping the user context now as we don't need it - self.driver.host_manager.update_aggregates(aggregates) + self.host_manager.update_aggregates(aggregates) def delete_aggregate(self, ctxt, aggregate): """Deletes HostManager internal information about a specific aggregate. @@ -225,25 +675,25 @@ class SchedulerManager(manager.Manager): :type aggregate: :class:`nova.objects.Aggregate` """ # NOTE(sbauza): We're dropping the user context now as we don't need it - self.driver.host_manager.delete_aggregate(aggregate) + self.host_manager.delete_aggregate(aggregate) def update_instance_info(self, context, host_name, instance_info): """Receives information about changes to a host's instances, and - updates the driver's HostManager with that information. + updates the HostManager with that information. """ - self.driver.host_manager.update_instance_info(context, host_name, - instance_info) + self.host_manager.update_instance_info( + context, host_name, instance_info) def delete_instance_info(self, context, host_name, instance_uuid): """Receives information about the deletion of one of a host's - instances, and updates the driver's HostManager with that information. + instances, and updates the HostManager with that information. """ - self.driver.host_manager.delete_instance_info(context, host_name, - instance_uuid) + self.host_manager.delete_instance_info( + context, host_name, instance_uuid) def sync_instance_info(self, context, host_name, instance_uuids): """Receives a sync request from a host, and passes it on to the - driver's HostManager. + HostManager. """ - self.driver.host_manager.sync_instance_info(context, host_name, - instance_uuids) + self.host_manager.sync_instance_info( + context, host_name, instance_uuids) diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py index d946dcc2020d..a33d482dcd06 100644 --- a/nova/tests/functional/libvirt/test_numa_servers.py +++ b/nova/tests/functional/libvirt/test_numa_servers.py @@ -42,7 +42,7 @@ class NUMAServersTestBase(base.ServersTestBase): # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect # this - host_manager = self.scheduler.manager.driver.host_manager + host_manager = self.scheduler.manager.host_manager numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter'] host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes) _p = mock.patch('nova.scheduler.filters' diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py index 7f4f0b9648ac..0280c3f0f281 100644 --- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py +++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py @@ -54,7 +54,7 @@ class _PCIServersTestBase(base.ServersTestBase): # Mock the 'PciPassthroughFilter' filter, as most tests need to inspect # this - host_manager = self.scheduler.manager.driver.host_manager + host_manager = self.scheduler.manager.host_manager pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter'] host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes) self.mock_filter = self.useFixture(fixtures.MockPatch( diff --git a/nova/tests/functional/regressions/test_bug_1781710.py b/nova/tests/functional/regressions/test_bug_1781710.py index 59f95c9d9935..32f54420935c 100644 --- a/nova/tests/functional/regressions/test_bug_1781710.py +++ b/nova/tests/functional/regressions/test_bug_1781710.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.scheduler import driver as scheduler_driver +from nova.scheduler import manager as scheduler_manager from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional import fixtures as func_fixtures @@ -71,11 +71,11 @@ class AntiAffinityMultiCreateRequest(test.TestCase, group = self.api.post_server_groups( {'name': 'test group', 'policy': 'anti-affinity'}) - # Stub out Scheduler._get_alternate_hosts so we can assert what + # Stub out SchedulerManager._get_alternate_hosts so we can assert what # is coming back for alternate hosts is what we'd expect after the # initial hosts are selected for each instance. original_get_alternate_hosts = ( - scheduler_driver.SchedulerDriver._get_alternate_hosts) + scheduler_manager.SchedulerManager._get_alternate_hosts) def stub_get_alternate_hosts(*a, **kw): # Intercept the result so we can assert there are no alternates. @@ -96,7 +96,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase, return selections_to_return self.stub_out( - 'nova.scheduler.driver.SchedulerDriver._get_alternate_hosts', + 'nova.scheduler.manager.SchedulerManager._get_alternate_hosts', stub_get_alternate_hosts) # Now create two servers in that group. diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py index d20de6385f3a..8a54b0522bae 100644 --- a/nova/tests/functional/test_servers.py +++ b/nova/tests/functional/test_servers.py @@ -3654,7 +3654,7 @@ class ServerDeleteBuildTests(integrated_helpers.ProviderUsageBaseTestCase): networks='none') with test.nested( - mock.patch('nova.scheduler.driver.SchedulerDriver' + mock.patch('nova.scheduler.manager.SchedulerManager' '._ensure_sufficient_hosts'), mock.patch('nova.conductor.manager.ComputeTaskManager' '._bury_in_cell0'), diff --git a/nova/tests/unit/scheduler/test_driver.py b/nova/tests/unit/scheduler/test_driver.py deleted file mode 100644 index e6dbfcc1db46..000000000000 --- a/nova/tests/unit/scheduler/test_driver.py +++ /dev/null @@ -1,1126 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_serialization import jsonutils -from oslo_utils.fixture import uuidsentinel as uuids - -from nova import context -from nova import exception -from nova import objects -from nova.scheduler import driver as scheduler_driver -from nova.scheduler import host_manager -from nova.scheduler import utils as scheduler_utils -from nova.scheduler import weights -from nova import servicegroup -from nova import test # noqa - - -fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0, - ram_allocation_ratio=1.0) -fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2, - "numa_topology": fake_numa_limit} -fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit) -fake_alloc = {"allocations": [ - {"resource_provider": {"uuid": uuids.compute_node}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} -fake_alloc_version = "1.23" -json_alloc = jsonutils.dumps(fake_alloc) -fake_selection = objects.Selection(service_host="fake_host", - nodename="fake_node", compute_node_uuid=uuids.compute_node, - cell_uuid=uuids.cell, limits=fake_limit_obj, - allocation_request=json_alloc, - allocation_request_version=fake_alloc_version) - - -class SchedulerTestCase(test.NoDBTestCase): - """Test case for scheduler driver.""" - - @mock.patch.object(host_manager.HostManager, '_init_instance_info', - new=mock.Mock()) - @mock.patch.object(host_manager.HostManager, '_init_aggregates', - new=mock.Mock()) - @mock.patch('nova.scheduler.client.report.SchedulerReportClient', - autospec=True) - @mock.patch('nova.scheduler.client.query.SchedulerQueryClient', - autospec=True) - def setUp(self, mock_sch_query, mock_sch_report): - super().setUp() - - self.driver = scheduler_driver.SchedulerDriver() - self.context = context.RequestContext('fake_user', 'fake_project') - self.topic = 'fake_topic' - self.servicegroup_api = servicegroup.API() - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_placement_bad_comms( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - """If there was a problem communicating with the Placement service, - alloc_reqs_by_rp_uuid will be None and we need to avoid trying to claim - in the Placement API. - """ - spec_obj = objects.RequestSpec( - num_instances=1, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=None, instance_uuid=uuids.instance) - # Reset the RequestSpec changes so they don't interfere with the - # assertion at the end of the test. - spec_obj.obj_reset_changes(recursive=True) - - host_state = mock.Mock(spec=host_manager.HostState, host="fake_host", - uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node", - limits={}, aggregates=[]) - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - - visited_instances = set([]) - - def fake_get_sorted_hosts(_spec_obj, host_states, index): - # Keep track of which instances are passed to the filters. - visited_instances.add(_spec_obj.instance_uuid) - return all_host_states - - mock_get_hosts.side_effect = fake_get_sorted_hosts - - instance_uuids = [uuids.instance] - ctx = mock.Mock() - selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids, - None, mock.sentinel.provider_summaries) - - expected_hosts = [[objects.Selection.from_host_state(host_state)]] - mock_get_all_states.assert_called_once_with( - ctx.elevated.return_value, spec_obj, - mock.sentinel.provider_summaries) - mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) - - self.assertEqual(len(selected_hosts), 1) - self.assertEqual(expected_hosts, selected_hosts) - - # Ensure that we have consumed the resources on the chosen host states - host_state.consume_from_request.assert_called_once_with(spec_obj) - - # And ensure we never called claim_resources() - self.assertFalse(mock_claim.called) - - # Make sure that the RequestSpec.instance_uuid is not dirty. - self.assertEqual(sorted(instance_uuids), sorted(visited_instances)) - self.assertEqual(0, len(spec_obj.obj_what_changed()), - spec_obj.obj_what_changed()) - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_old_conductor( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - """Old conductor can call scheduler without the instance_uuids - parameter. When this happens, we need to ensure we do not attempt to - claim resources in the placement API since obviously we need instance - UUIDs to perform those claims. - """ - group = objects.InstanceGroup(hosts=[]) - spec_obj = objects.RequestSpec( - num_instances=1, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=group) - - host_state = mock.Mock(spec=host_manager.HostState, - host="fake_host", nodename="fake_node", uuid=uuids.cn1, - limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[]) - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.return_value = all_host_states - - instance_uuids = None - ctx = mock.Mock() - selected_hosts = self.driver._schedule(ctx, spec_obj, - instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid, - mock.sentinel.provider_summaries) - - mock_get_all_states.assert_called_once_with( - ctx.elevated.return_value, spec_obj, - mock.sentinel.provider_summaries) - mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) - - self.assertEqual(len(selected_hosts), 1) - expected_host = objects.Selection.from_host_state(host_state) - self.assertEqual([[expected_host]], selected_hosts) - - # Ensure that we have consumed the resources on the chosen host states - host_state.consume_from_request.assert_called_once_with(spec_obj) - - # And ensure we never called claim_resources() - self.assertFalse(mock_claim.called) - # And that the host is added to the server group but there are no - # instances tracked in the host_state. - self.assertIn(host_state.host, group.hosts) - self.assertEqual(0, len(host_state.instances)) - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def _test_schedule_successful_claim( - self, mock_get_hosts, mock_get_all_states, mock_claim, num_instances=1, - ): - spec_obj = objects.RequestSpec( - num_instances=num_instances, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=None) - - host_state = mock.Mock(spec=host_manager.HostState, - host="fake_host", nodename="fake_node", uuid=uuids.cn1, - cell_uuid=uuids.cell1, limits={}, aggregates=[]) - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.return_value = all_host_states - mock_claim.return_value = True - - instance_uuids = [uuids.instance] - fake_alloc = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn1}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]} - ctx = mock.Mock() - selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries) - - sel_obj = objects.Selection.from_host_state(host_state, - allocation_request=fake_alloc) - expected_selection = [[sel_obj]] - mock_get_all_states.assert_called_once_with( - ctx.elevated.return_value, spec_obj, - mock.sentinel.provider_summaries) - mock_get_hosts.assert_called() - mock_claim.assert_called_once_with(ctx.elevated.return_value, - self.driver.placement_client, spec_obj, uuids.instance, - alloc_reqs_by_rp_uuid[uuids.cn1][0], - allocation_request_version=None) - - self.assertEqual(len(selected_hosts), 1) - self.assertEqual(expected_selection, selected_hosts) - - # Ensure that we have consumed the resources on the chosen host states - host_state.consume_from_request.assert_called_once_with(spec_obj) - - def test_schedule_successful_claim(self): - self._test_schedule_successful_claim() - - def test_schedule_old_reqspec_and_move_operation(self): - """This test is for verifying that in case of a move operation with an - original RequestSpec created for 3 concurrent instances, we only verify - the instance that is moved. - """ - self._test_schedule_successful_claim(num_instances=3) - - @mock.patch('nova.scheduler.driver.SchedulerDriver._cleanup_allocations') - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_unsuccessful_claim( - self, mock_get_hosts, mock_get_all_states, mock_claim, mock_cleanup, - ): - """Tests that we return an empty list if we are unable to successfully - claim resources for the instance - """ - spec_obj = objects.RequestSpec( - num_instances=1, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=None) - - host_state = mock.Mock(spec=host_manager.HostState, - host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1) - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.return_value = all_host_states - mock_claim.return_value = False - - instance_uuids = [uuids.instance] - alloc_reqs_by_rp_uuid = { - uuids.cn1: [{"allocations": mock.sentinel.alloc_req}], - } - ctx = mock.Mock() - fake_version = "1.99" - self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx, - spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, - mock.sentinel.provider_summaries, - allocation_request_version=fake_version) - - mock_get_all_states.assert_called_once_with( - ctx.elevated.return_value, spec_obj, - mock.sentinel.provider_summaries) - mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) - mock_claim.assert_called_once_with(ctx.elevated.return_value, - self.driver.placement_client, spec_obj, uuids.instance, - alloc_reqs_by_rp_uuid[uuids.cn1][0], - allocation_request_version=fake_version) - - mock_cleanup.assert_not_called() - # Ensure that we have consumed the resources on the chosen host states - self.assertFalse(host_state.consume_from_request.called) - - @mock.patch('nova.scheduler.driver.SchedulerDriver._cleanup_allocations') - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_not_all_instance_clean_claimed( - self, mock_get_hosts, mock_get_all_states, mock_claim, mock_cleanup, - ): - """Tests that we clean up previously-allocated instances if not all - instances could be scheduled - """ - spec_obj = objects.RequestSpec( - num_instances=2, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=None) - - host_state = mock.Mock(spec=host_manager.HostState, - host="fake_host", nodename="fake_node", uuid=uuids.cn1, - cell_uuid=uuids.cell1, limits={}, updated='fake') - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.side_effect = [ - all_host_states, # first instance: return all the hosts (only one) - [], # second: act as if no more hosts that meet criteria - all_host_states, # the final call when creating alternates - ] - mock_claim.return_value = True - - instance_uuids = [uuids.instance1, uuids.instance2] - fake_alloc = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn1}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]} - ctx = mock.Mock() - self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx, - spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, - mock.sentinel.provider_summaries) - - # Ensure we cleaned up the first successfully-claimed instance - mock_cleanup.assert_called_once_with(ctx, [uuids.instance1]) - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_selection_alloc_requests_for_alts( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - spec_obj = objects.RequestSpec( - num_instances=1, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1), - project_id=uuids.project_id, - instance_group=None) - - host_state0 = mock.Mock(spec=host_manager.HostState, - host="fake_host0", nodename="fake_node0", uuid=uuids.cn0, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - host_state1 = mock.Mock(spec=host_manager.HostState, - host="fake_host1", nodename="fake_node1", uuid=uuids.cn1, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - host_state2 = mock.Mock(spec=host_manager.HostState, - host="fake_host2", nodename="fake_node2", uuid=uuids.cn2, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - all_host_states = [host_state0, host_state1, host_state2] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.return_value = all_host_states - mock_claim.return_value = True - - instance_uuids = [uuids.instance0] - fake_alloc0 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn0}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - fake_alloc1 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn1}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - fake_alloc2 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn2}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0], - uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]} - ctx = mock.Mock() - selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries, - return_alternates=True) - - sel0 = objects.Selection.from_host_state(host_state0, - allocation_request=fake_alloc0) - sel1 = objects.Selection.from_host_state(host_state1, - allocation_request=fake_alloc1) - sel2 = objects.Selection.from_host_state(host_state2, - allocation_request=fake_alloc2) - expected_selection = [[sel0, sel1, sel2]] - self.assertEqual(expected_selection, selected_hosts) - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_selection_alloc_requests_no_alts( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - spec_obj = objects.RequestSpec( - num_instances=1, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1), - project_id=uuids.project_id, - instance_group=None) - - host_state0 = mock.Mock(spec=host_manager.HostState, - host="fake_host0", nodename="fake_node0", uuid=uuids.cn0, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - host_state1 = mock.Mock(spec=host_manager.HostState, - host="fake_host1", nodename="fake_node1", uuid=uuids.cn1, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - host_state2 = mock.Mock(spec=host_manager.HostState, - host="fake_host2", nodename="fake_node2", uuid=uuids.cn2, - cell_uuid=uuids.cell, limits={}, aggregates=[]) - all_host_states = [host_state0, host_state1, host_state2] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.return_value = all_host_states - mock_claim.return_value = True - - instance_uuids = [uuids.instance0] - fake_alloc0 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn0}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - fake_alloc1 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn1}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - fake_alloc2 = {"allocations": [ - {"resource_provider": {"uuid": uuids.cn2}, - "resources": {"VCPU": 1, - "MEMORY_MB": 1024, - "DISK_GB": 100} - }]} - alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0], - uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]} - ctx = mock.Mock() - selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries, - return_alternates=False) - - sel0 = objects.Selection.from_host_state(host_state0, - allocation_request=fake_alloc0) - expected_selection = [[sel0]] - self.assertEqual(expected_selection, selected_hosts) - - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_instance_group( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - """Test that since the request spec object contains an instance group - object, that upon choosing a host in the primary schedule loop, - that we update the request spec's instance group information - """ - num_instances = 2 - ig = objects.InstanceGroup(hosts=[]) - spec_obj = objects.RequestSpec( - num_instances=num_instances, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_group=ig, instance_uuid=uuids.instance0) - # Reset the RequestSpec changes so they don't interfere with the - # assertion at the end of the test. - spec_obj.obj_reset_changes(recursive=True) - - hs1 = mock.Mock(spec=host_manager.HostState, host='host1', - nodename="node1", limits={}, uuid=uuids.cn1, - cell_uuid=uuids.cell1, instances={}, aggregates=[]) - hs2 = mock.Mock(spec=host_manager.HostState, host='host2', - nodename="node2", limits={}, uuid=uuids.cn2, - cell_uuid=uuids.cell2, instances={}, aggregates=[]) - all_host_states = [hs1, hs2] - mock_get_all_states.return_value = all_host_states - mock_claim.return_value = True - - alloc_reqs_by_rp_uuid = { - uuids.cn1: [{"allocations": "fake_cn1_alloc"}], - uuids.cn2: [{"allocations": "fake_cn2_alloc"}], - } - - # Simulate host 1 and host 2 being randomly returned first by - # _get_sorted_hosts() in the two iterations for each instance in - # num_instances - visited_instances = set([]) - - def fake_get_sorted_hosts(_spec_obj, host_states, index): - # Keep track of which instances are passed to the filters. - visited_instances.add(_spec_obj.instance_uuid) - if index % 2: - return [hs1, hs2] - return [hs2, hs1] - mock_get_hosts.side_effect = fake_get_sorted_hosts - instance_uuids = [ - getattr(uuids, 'instance%d' % x) for x in range(num_instances) - ] - ctx = mock.Mock() - self.driver._schedule(ctx, spec_obj, instance_uuids, - alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries) - - # Check that we called claim_resources() for both the first and second - # host state - claim_calls = [ - mock.call(ctx.elevated.return_value, self.driver.placement_client, - spec_obj, uuids.instance0, - alloc_reqs_by_rp_uuid[uuids.cn2][0], - allocation_request_version=None), - mock.call(ctx.elevated.return_value, self.driver.placement_client, - spec_obj, uuids.instance1, - alloc_reqs_by_rp_uuid[uuids.cn1][0], - allocation_request_version=None), - ] - mock_claim.assert_has_calls(claim_calls) - - # Check that _get_sorted_hosts() is called twice and that the - # second time, we pass it the hosts that were returned from - # _get_sorted_hosts() the first time - sorted_host_calls = [ - mock.call(spec_obj, all_host_states, 0), - mock.call(spec_obj, [hs2, hs1], 1), - ] - mock_get_hosts.assert_has_calls(sorted_host_calls) - - # The instance group object should have both host1 and host2 in its - # instance group hosts list and there should not be any "changes" to - # save in the instance group object - self.assertEqual(['host2', 'host1'], ig.hosts) - self.assertEqual({}, ig.obj_get_changes()) - # Assert that we updated HostState.instances for each host. - self.assertIn(uuids.instance0, hs2.instances) - self.assertIn(uuids.instance1, hs1.instances) - # Make sure that the RequestSpec.instance_uuid is not dirty. - self.assertEqual(sorted(instance_uuids), sorted(visited_instances)) - self.assertEqual(0, len(spec_obj.obj_what_changed()), - spec_obj.obj_what_changed()) - - @mock.patch('nova.scheduler.driver.LOG.debug') - @mock.patch('random.choice', side_effect=lambda x: x[1]) - @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') - @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') - def test_get_sorted_hosts(self, mock_filt, mock_weighed, mock_rand, debug): - """Tests the call that returns a sorted list of hosts by calling the - host manager's filtering and weighing routines - """ - self.flags(host_subset_size=2, group='filter_scheduler') - hs1 = mock.Mock(spec=host_manager.HostState, host='host1', - cell_uuid=uuids.cell1) - hs2 = mock.Mock(spec=host_manager.HostState, host='host2', - cell_uuid=uuids.cell2) - all_host_states = [hs1, hs2] - - mock_weighed.return_value = [ - weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), - ] - - # Make sure that when logging the weighed hosts we are logging them - # with the WeighedHost wrapper class rather than the HostState objects. - def fake_debug(message, *args, **kwargs): - if message.startswith('Weighed'): - self.assertEqual(1, len(args)) - for weighed_host in args[0]['hosts']: - self.assertIsInstance(weighed_host, weights.WeighedHost) - debug.side_effect = fake_debug - - results = self.driver._get_sorted_hosts(mock.sentinel.spec, - all_host_states, mock.sentinel.index) - debug.assert_called() - - mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, - mock.sentinel.index) - - mock_weighed.assert_called_once_with(mock_filt.return_value, - mock.sentinel.spec) - - # We override random.choice() to pick the **second** element of the - # returned weighed hosts list, which is the host state #2. This tests - # the code path that combines the randomly-chosen host with the - # remaining list of weighed host state objects - self.assertEqual([hs2, hs1], results) - - @mock.patch('random.choice', side_effect=lambda x: x[0]) - @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') - @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') - def test_get_sorted_hosts_subset_less_than_num_weighed(self, mock_filt, - mock_weighed, mock_rand): - """Tests that when we have >1 weighed hosts but a host subset size of - 1, that we always pick the first host in the weighed host - """ - self.flags(host_subset_size=1, group='filter_scheduler') - hs1 = mock.Mock(spec=host_manager.HostState, host='host1', - cell_uuid=uuids.cell1) - hs2 = mock.Mock(spec=host_manager.HostState, host='host2', - cell_uuid=uuids.cell2) - all_host_states = [hs1, hs2] - - mock_weighed.return_value = [ - weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), - ] - - results = self.driver._get_sorted_hosts(mock.sentinel.spec, - all_host_states, mock.sentinel.index) - - mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, - mock.sentinel.index) - - mock_weighed.assert_called_once_with(mock_filt.return_value, - mock.sentinel.spec) - - # We should be randomly selecting only from a list of one host state - mock_rand.assert_called_once_with([hs1]) - self.assertEqual([hs1, hs2], results) - - @mock.patch('random.choice', side_effect=lambda x: x[0]) - @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') - @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') - def test_get_sorted_hosts_subset_greater_than_num_weighed(self, mock_filt, - mock_weighed, mock_rand): - """Hosts should still be chosen if host subset size is larger than - number of weighed hosts. - """ - self.flags(host_subset_size=20, group='filter_scheduler') - hs1 = mock.Mock(spec=host_manager.HostState, host='host1', - cell_uuid=uuids.cell1) - hs2 = mock.Mock(spec=host_manager.HostState, host='host2', - cell_uuid=uuids.cell2) - all_host_states = [hs1, hs2] - - mock_weighed.return_value = [ - weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), - ] - - results = self.driver._get_sorted_hosts(mock.sentinel.spec, - all_host_states, mock.sentinel.index) - - mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, - mock.sentinel.index) - - mock_weighed.assert_called_once_with(mock_filt.return_value, - mock.sentinel.spec) - - # We overrode random.choice() to return the first element in the list, - # so even though we had a host_subset_size greater than the number of - # weighed hosts (2), we just random.choice() on the entire set of - # weighed hosts and thus return [hs1, hs2] - self.assertEqual([hs1, hs2], results) - - @mock.patch('random.shuffle', side_effect=lambda x: x.reverse()) - @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') - @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') - def test_get_sorted_hosts_shuffle_top_equal(self, mock_filt, mock_weighed, - mock_shuffle): - """Tests that top best weighed hosts are shuffled when enabled. - """ - self.flags(host_subset_size=1, group='filter_scheduler') - self.flags(shuffle_best_same_weighed_hosts=True, - group='filter_scheduler') - hs1 = mock.Mock(spec=host_manager.HostState, host='host1') - hs2 = mock.Mock(spec=host_manager.HostState, host='host2') - hs3 = mock.Mock(spec=host_manager.HostState, host='host3') - hs4 = mock.Mock(spec=host_manager.HostState, host='host4') - all_host_states = [hs1, hs2, hs3, hs4] - - mock_weighed.return_value = [ - weights.WeighedHost(hs1, 1.0), - weights.WeighedHost(hs2, 1.0), - weights.WeighedHost(hs3, 0.5), - weights.WeighedHost(hs4, 0.5), - ] - - results = self.driver._get_sorted_hosts(mock.sentinel.spec, - all_host_states, mock.sentinel.index) - - mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, - mock.sentinel.index) - - mock_weighed.assert_called_once_with(mock_filt.return_value, - mock.sentinel.spec) - - # We override random.shuffle() to reverse the list, thus the - # head of the list should become [host#2, host#1] - # (as the host_subset_size is 1) and the tail should stay the same. - self.assertEqual([hs2, hs1, hs3, hs4], results) - - def test_cleanup_allocations(self): - instance_uuids = [] - # Check we don't do anything if there's no instance UUIDs to cleanup - # allocations for - pc = self.driver.placement_client - - self.driver._cleanup_allocations(self.context, instance_uuids) - self.assertFalse(pc.delete_allocation_for_instance.called) - - instance_uuids = [uuids.instance1, uuids.instance2] - self.driver._cleanup_allocations(self.context, instance_uuids) - - exp_calls = [mock.call(self.context, uuids.instance1), - mock.call(self.context, uuids.instance2)] - pc.delete_allocation_for_instance.assert_has_calls(exp_calls) - - def test_add_retry_host(self): - retry = dict(num_attempts=1, hosts=[]) - filter_properties = dict(retry=retry) - host = "fakehost" - node = "fakenode" - - scheduler_utils._add_retry_host(filter_properties, host, node) - - hosts = filter_properties['retry']['hosts'] - self.assertEqual(1, len(hosts)) - self.assertEqual([host, node], hosts[0]) - - def test_post_select_populate(self): - # Test addition of certain filter props after a node is selected. - retry = {'hosts': [], 'num_attempts': 1} - filter_properties = {'retry': retry} - - selection = objects.Selection(service_host="host", nodename="node", - cell_uuid=uuids.cell) - scheduler_utils.populate_filter_properties(filter_properties, - selection) - self.assertEqual(['host', 'node'], - filter_properties['retry']['hosts'][0]) - - @mock.patch('nova.scheduler.driver.SchedulerDriver._schedule') - def test_select_destinations_match_num_instances(self, mock_schedule): - """Tests that the select_destinations() method returns the list of - hosts from the _schedule() method when the number of returned hosts - equals the number of instance UUIDs passed in. - """ - spec_obj = objects.RequestSpec( - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - num_instances=1, - image=None, - numa_topology=None, - pci_requests=None, - instance_uuid=uuids.instance_id) - - mock_schedule.return_value = [[fake_selection]] - dests = self.driver.select_destinations(self.context, spec_obj, - [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, - mock.sentinel.p_sums, mock.sentinel.ar_version) - - mock_schedule.assert_called_once_with(self.context, spec_obj, - [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, - mock.sentinel.p_sums, mock.sentinel.ar_version, False) - self.assertEqual([[fake_selection]], dests) - - @mock.patch('nova.scheduler.driver.SchedulerDriver._schedule') - def test_select_destinations_for_move_ops(self, mock_schedule): - """Tests that the select_destinations() method verifies the number of - hosts returned from the _schedule() method against the number of - instance UUIDs passed as a parameter and not against the RequestSpec - num_instances field since the latter could be wrong in case of a move - operation. - """ - spec_obj = objects.RequestSpec( - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - num_instances=2, - image=None, - numa_topology=None, - pci_requests=None, - instance_uuid=uuids.instance_id) - - mock_schedule.return_value = [[fake_selection]] - dests = self.driver.select_destinations(self.context, spec_obj, - [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, - mock.sentinel.p_sums, mock.sentinel.ar_version) - - mock_schedule.assert_called_once_with(self.context, spec_obj, - [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, - mock.sentinel.p_sums, mock.sentinel.ar_version, False) - self.assertEqual([[fake_selection]], dests) - - @mock.patch('nova.scheduler.utils.claim_resources', return_value=True) - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - def test_schedule_fewer_num_instances( - self, mock_get_hosts, mock_get_all_states, mock_claim, - ): - """Tests that the _schedule() method properly handles - resetting host state objects and raising NoValidHost when there are not - enough hosts available. - """ - spec_obj = objects.RequestSpec( - num_instances=2, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor"), - project_id=uuids.project_id, - instance_uuid=uuids.instance_id, - instance_group=None) - - host_state = mock.Mock(spec=host_manager.HostState, host="fake_host", - uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node", - limits={}, updated="Not None") - all_host_states = [host_state] - mock_get_all_states.return_value = all_host_states - mock_get_hosts.side_effect = [all_host_states, []] - - instance_uuids = [uuids.inst1, uuids.inst2] - fake_allocs_by_rp = {uuids.cn1: [{}]} - - self.assertRaises(exception.NoValidHost, self.driver._schedule, - self.context, spec_obj, instance_uuids, fake_allocs_by_rp, - mock.sentinel.p_sums) - self.assertIsNone(host_state.updated) - - @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - def _test_alternates_returned( - self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, - num_instances=2, num_alternates=2, - ): - all_host_states = [] - alloc_reqs = {} - for num in range(10): - host_name = "host%s" % num - hs = host_manager.HostState(host_name, "node%s" % num, - uuids.cell) - hs.uuid = getattr(uuids, host_name) - all_host_states.append(hs) - alloc_reqs[hs.uuid] = [{}] - - mock_get_all_hosts.return_value = all_host_states - mock_sorted.return_value = all_host_states - mock_claim.return_value = True - total_returned = num_alternates + 1 - self.flags(max_attempts=total_returned, group="scheduler") - instance_uuids = [getattr(uuids, "inst%s" % num) - for num in range(num_instances)] - - spec_obj = objects.RequestSpec( - num_instances=num_instances, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1), - project_id=uuids.project_id, - instance_group=None) - - dests = self.driver._schedule(self.context, spec_obj, - instance_uuids, alloc_reqs, None, return_alternates=True) - self.assertEqual(num_instances, len(dests)) - # Filtering and weighing hosts should be called num_instances + 1 times - # unless we're not getting alternates, and then just num_instances - self.assertEqual(num_instances + 1 - if num_alternates > 0 and num_instances > 1 - else num_instances, - mock_sorted.call_count, - 'Unexpected number of calls to filter hosts for %s ' - 'instances.' % num_instances) - selected_hosts = [dest[0] for dest in dests] - for dest in dests: - self.assertEqual(total_returned, len(dest)) - # Verify that there are no duplicates among a destination - self.assertEqual(len(dest), len(set(dest))) - # Verify that none of the selected hosts appear in the alternates. - for alt in dest[1:]: - self.assertNotIn(alt, selected_hosts) - - def test_alternates_returned(self): - self._test_alternates_returned(num_instances=1, num_alternates=1) - self._test_alternates_returned(num_instances=3, num_alternates=0) - self._test_alternates_returned(num_instances=1, num_alternates=4) - self._test_alternates_returned(num_instances=2, num_alternates=3) - self._test_alternates_returned(num_instances=8, num_alternates=8) - - @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - def test_alternates_same_cell( - self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, - ): - """Tests getting alternates plus claims where the hosts are spread - across two cells. - """ - all_host_states = [] - alloc_reqs = {} - for num in range(10): - host_name = "host%s" % num - cell_uuid = uuids.cell1 if num % 2 else uuids.cell2 - hs = host_manager.HostState(host_name, "node%s" % num, - cell_uuid) - hs.uuid = getattr(uuids, host_name) - all_host_states.append(hs) - alloc_reqs[hs.uuid] = [{}] - - mock_get_all_hosts.return_value = all_host_states - # There are two instances so _get_sorted_hosts is called once per - # instance and then once again before picking alternates. - mock_sorted.side_effect = [all_host_states, - list(reversed(all_host_states)), - all_host_states] - mock_claim.return_value = True - total_returned = 3 - self.flags(max_attempts=total_returned, group="scheduler") - instance_uuids = [uuids.inst1, uuids.inst2] - num_instances = len(instance_uuids) - - spec_obj = objects.RequestSpec( - num_instances=num_instances, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1), - project_id=uuids.project_id, - instance_group=None) - - dests = self.driver._schedule(self.context, spec_obj, - instance_uuids, alloc_reqs, None, return_alternates=True) - # There should be max_attempts hosts per instance (1 selected, 2 alts) - self.assertEqual(total_returned, len(dests[0])) - self.assertEqual(total_returned, len(dests[1])) - # Verify that the two selected hosts are not in the same cell. - self.assertNotEqual(dests[0][0].cell_uuid, dests[1][0].cell_uuid) - for dest in dests: - selected_host = dest[0] - selected_cell_uuid = selected_host.cell_uuid - for alternate in dest[1:]: - self.assertEqual(alternate.cell_uuid, selected_cell_uuid) - - @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") - @mock.patch('nova.scheduler.utils.claim_resources') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_sorted_hosts') - @mock.patch('nova.scheduler.driver.SchedulerDriver._get_all_host_states') - def _test_not_enough_alternates( - self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, - num_hosts, max_attempts, - ): - all_host_states = [] - alloc_reqs = {} - for num in range(num_hosts): - host_name = "host%s" % num - hs = host_manager.HostState(host_name, "node%s" % num, - uuids.cell) - hs.uuid = getattr(uuids, host_name) - all_host_states.append(hs) - alloc_reqs[hs.uuid] = [{}] - - mock_get_all_hosts.return_value = all_host_states - mock_sorted.return_value = all_host_states - mock_claim.return_value = True - # Set the total returned to more than the number of available hosts - self.flags(max_attempts=max_attempts, group="scheduler") - instance_uuids = [uuids.inst1, uuids.inst2] - num_instances = len(instance_uuids) - - spec_obj = objects.RequestSpec( - num_instances=num_instances, - flavor=objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1), - project_id=uuids.project_id, - instance_group=None) - - dests = self.driver._schedule(self.context, spec_obj, - instance_uuids, alloc_reqs, None, return_alternates=True) - self.assertEqual(num_instances, len(dests)) - selected_hosts = [dest[0] for dest in dests] - # The number returned for each destination should be the less of the - # number of available host and the max_attempts setting. - expected_number = min(num_hosts, max_attempts) - for dest in dests: - self.assertEqual(expected_number, len(dest)) - # Verify that there are no duplicates among a destination - self.assertEqual(len(dest), len(set(dest))) - # Verify that none of the selected hosts appear in the alternates. - for alt in dest[1:]: - self.assertNotIn(alt, selected_hosts) - - def test_not_enough_alternates(self): - self._test_not_enough_alternates(num_hosts=100, max_attempts=5) - self._test_not_enough_alternates(num_hosts=5, max_attempts=5) - self._test_not_enough_alternates(num_hosts=3, max_attempts=5) - self._test_not_enough_alternates(num_hosts=20, max_attempts=5) - - @mock.patch('nova.compute.utils.notify_about_scheduler_action') - @mock.patch.object(scheduler_driver.SchedulerDriver, '_schedule') - def test_select_destinations_notifications( - self, mock_schedule, mock_notify, - ): - mock_schedule.return_value = ([[mock.Mock()]], [[mock.Mock()]]) - - with mock.patch.object(self.driver.notifier, 'info') as mock_info: - flavor = objects.Flavor(memory_mb=512, - root_gb=512, - ephemeral_gb=0, - swap=0, - vcpus=1, - disabled=False, - is_public=True, - name="small_flavor") - expected = {'num_instances': 1, - 'instance_properties': { - 'uuid': uuids.instance, - 'ephemeral_gb': 0, - 'memory_mb': 512, - 'vcpus': 1, - 'root_gb': 512}, - 'instance_type': flavor, - 'image': {}} - spec_obj = objects.RequestSpec(num_instances=1, - flavor=flavor, - instance_uuid=uuids.instance) - - self.driver.select_destinations(self.context, spec_obj, - [uuids.instance], {}, None) - - expected = [ - mock.call(self.context, 'scheduler.select_destinations.start', - dict(request_spec=expected)), - mock.call(self.context, 'scheduler.select_destinations.end', - dict(request_spec=expected))] - self.assertEqual(expected, mock_info.call_args_list) - - mock_notify.assert_has_calls([ - mock.call(context=self.context, request_spec=spec_obj, - action='select_destinations', phase='start'), - mock.call(context=self.context, request_spec=spec_obj, - action='select_destinations', phase='end')]) - - def test_get_all_host_states_provider_summaries_is_none(self): - """Tests that HostManager.get_host_states_by_uuids is called with - compute_uuids being None when the incoming provider_summaries is None. - """ - with mock.patch.object(self.driver.host_manager, - 'get_host_states_by_uuids') as get_host_states: - self.driver._get_all_host_states( - mock.sentinel.ctxt, mock.sentinel.spec_obj, None) - # Make sure get_host_states_by_uuids was called with - # compute_uuids being None. - get_host_states.assert_called_once_with( - mock.sentinel.ctxt, None, mock.sentinel.spec_obj) - - def test_get_all_host_states_provider_summaries_is_empty(self): - """Tests that HostManager.get_host_states_by_uuids is called with - compute_uuids being [] when the incoming provider_summaries is {}. - """ - with mock.patch.object(self.driver.host_manager, - 'get_host_states_by_uuids') as get_host_states: - self.driver._get_all_host_states( - mock.sentinel.ctxt, mock.sentinel.spec_obj, {}) - # Make sure get_host_states_by_uuids was called with - # compute_uuids being []. - get_host_states.assert_called_once_with( - mock.sentinel.ctxt, [], mock.sentinel.spec_obj) diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py index 9b3033597dce..5c15bcbce033 100644 --- a/nova/tests/unit/scheduler/test_manager.py +++ b/nova/tests/unit/scheduler/test_manager.py @@ -19,6 +19,7 @@ Tests For Scheduler import mock import oslo_messaging as messaging +from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids from nova import context @@ -26,23 +27,49 @@ from nova import exception from nova import objects from nova.scheduler import host_manager from nova.scheduler import manager +from nova.scheduler import utils as scheduler_utils +from nova.scheduler import weights +from nova import servicegroup from nova import test from nova.tests.unit import fake_server_actions from nova.tests.unit.scheduler import fakes +fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0, + ram_allocation_ratio=1.0) +fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2, + "numa_topology": fake_numa_limit} +fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit) +fake_alloc = {"allocations": [ + {"resource_provider": {"uuid": uuids.compute_node}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} +fake_alloc_version = "1.23" +json_alloc = jsonutils.dumps(fake_alloc) +fake_selection = objects.Selection(service_host="fake_host", + nodename="fake_node", compute_node_uuid=uuids.compute_node, + cell_uuid=uuids.cell, limits=fake_limit_obj, + allocation_request=json_alloc, + allocation_request_version=fake_alloc_version) + + class SchedulerManagerTestCase(test.NoDBTestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager - @mock.patch.object(host_manager.HostManager, '_init_instance_info') - @mock.patch.object(host_manager.HostManager, '_init_aggregates') - def setUp(self, mock_init_agg, mock_init_inst): - super(SchedulerManagerTestCase, self).setUp() + @mock.patch.object( + host_manager.HostManager, '_init_instance_info', new=mock.Mock()) + @mock.patch.object( + host_manager.HostManager, '_init_aggregates', new=mock.Mock()) + def setUp(self): + super().setUp() self.manager = self.manager_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' + self.servicegroup_api = servicegroup.API() self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} fake_server_actions.stub_out_action_events(self) @@ -64,8 +91,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): cn.uuid: [fake_alloc_reqs[x]] for x, cn in enumerate(fakes.COMPUTE_NODES) } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.manager.select_destinations(self.context, spec_obj=fake_spec, instance_uuids=[fake_spec.instance_uuid]) mock_process.assert_called_once_with(self.context, fake_spec) @@ -104,8 +132,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): cn.uuid: [fake_alloc_reqs[x]] for x, cn in enumerate(fakes.COMPUTE_NODES) } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: sel_obj = objects.Selection(service_host="fake_host", nodename="fake_node", compute_node_uuid=uuids.compute_node, cell_uuid=uuids.cell, limits=None) @@ -118,7 +147,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase): self.assertIsInstance(sel_host, objects.Selection) mock_process.assert_called_once_with(None, fake_spec) # Since both return_objects and return_alternates are True, the - # driver should have been called with True for return_alternates. + # method should have been called with True for return_alternates. select_destinations.assert_called_once_with(None, fake_spec, [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, mock_p_sums, fake_version, True) @@ -133,7 +162,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase): sel_host = dests[0] self.assertIsInstance(sel_host, dict) # Even though return_alternates was passed as True, since - # return_objects was False, the driver should have been called with + # return_objects was False, the method should have been called with # return_alternates as False. select_destinations.assert_called_once_with(None, fake_spec, [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, @@ -150,8 +179,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): place_res = get_allocation_candidates_response mock_get_ac.return_value = place_res mock_rfrs.return_value.cpu_pinning_requested = False - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.assertRaises(messaging.rpc.dispatcher.ExpectedException, self.manager.select_destinations, self.context, spec_obj=fake_spec, @@ -196,8 +226,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): fake_spec = objects.RequestSpec( scheduler_hints={'_nova_check_type': ['rebuild']}) fake_spec.instance_uuid = uuids.instance - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.manager.select_destinations(self.context, spec_obj=fake_spec, instance_uuids=[fake_spec.instance_uuid]) select_destinations.assert_called_once_with( @@ -223,8 +254,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): cn.uuid: [fake_alloc_reqs[x]] for x, cn in enumerate(fakes.COMPUTE_NODES) } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.manager.select_destinations(self.context, spec_obj=fake_spec) mock_process.assert_called_once_with(self.context, fake_spec) select_destinations.assert_called_once_with(self.context, @@ -270,8 +302,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): for x, cn in enumerate(fakes.COMPUTE_NODES) } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.manager.select_destinations(self.context, spec_obj=fake_spec) select_destinations.assert_called_once_with(self.context, fake_spec, None, expected_alloc_reqs_by_rp_uuid, @@ -321,8 +354,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): cn.uuid: [fake_alloc_reqs[x]] for x, cn in enumerate(fakes.COMPUTE_NODES) } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: + with mock.patch.object( + self.manager, '_select_destinations', + ) as select_destinations: self.manager.select_destinations( self.context, request_spec='fake_spec', filter_properties='fake_props', @@ -335,23 +369,1091 @@ class SchedulerManagerTestCase(test.NoDBTestCase): mock_get_ac.assert_called_once_with( self.context, mock_rfrs.return_value) + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_placement_bad_comms( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + """If there was a problem communicating with the Placement service, + alloc_reqs_by_rp_uuid will be None and we need to avoid trying to claim + in the Placement API. + """ + spec_obj = objects.RequestSpec( + num_instances=1, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=None, instance_uuid=uuids.instance) + # Reset the RequestSpec changes so they don't interfere with the + # assertion at the end of the test. + spec_obj.obj_reset_changes(recursive=True) + + host_state = mock.Mock(spec=host_manager.HostState, host="fake_host", + uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node", + limits={}, aggregates=[]) + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + + visited_instances = set([]) + + def fake_get_sorted_hosts(_spec_obj, host_states, index): + # Keep track of which instances are passed to the filters. + visited_instances.add(_spec_obj.instance_uuid) + return all_host_states + + mock_get_hosts.side_effect = fake_get_sorted_hosts + + instance_uuids = [uuids.instance] + ctx = mock.Mock() + selected_hosts = self.manager._schedule(ctx, spec_obj, instance_uuids, + None, mock.sentinel.provider_summaries) + + expected_hosts = [[objects.Selection.from_host_state(host_state)]] + mock_get_all_states.assert_called_once_with( + ctx.elevated.return_value, spec_obj, + mock.sentinel.provider_summaries) + mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) + + self.assertEqual(len(selected_hosts), 1) + self.assertEqual(expected_hosts, selected_hosts) + + # Ensure that we have consumed the resources on the chosen host states + host_state.consume_from_request.assert_called_once_with(spec_obj) + + # And ensure we never called claim_resources() + self.assertFalse(mock_claim.called) + + # Make sure that the RequestSpec.instance_uuid is not dirty. + self.assertEqual(sorted(instance_uuids), sorted(visited_instances)) + self.assertEqual(0, len(spec_obj.obj_what_changed()), + spec_obj.obj_what_changed()) + + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_old_conductor( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + """Old conductor can call scheduler without the instance_uuids + parameter. When this happens, we need to ensure we do not attempt to + claim resources in the placement API since obviously we need instance + UUIDs to perform those claims. + """ + group = objects.InstanceGroup(hosts=[]) + spec_obj = objects.RequestSpec( + num_instances=1, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=group) + + host_state = mock.Mock(spec=host_manager.HostState, + host="fake_host", nodename="fake_node", uuid=uuids.cn1, + limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[]) + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.return_value = all_host_states + + instance_uuids = None + ctx = mock.Mock() + selected_hosts = self.manager._schedule(ctx, spec_obj, + instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid, + mock.sentinel.provider_summaries) + + mock_get_all_states.assert_called_once_with( + ctx.elevated.return_value, spec_obj, + mock.sentinel.provider_summaries) + mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) + + self.assertEqual(len(selected_hosts), 1) + expected_host = objects.Selection.from_host_state(host_state) + self.assertEqual([[expected_host]], selected_hosts) + + # Ensure that we have consumed the resources on the chosen host states + host_state.consume_from_request.assert_called_once_with(spec_obj) + + # And ensure we never called claim_resources() + self.assertFalse(mock_claim.called) + # And that the host is added to the server group but there are no + # instances tracked in the host_state. + self.assertIn(host_state.host, group.hosts) + self.assertEqual(0, len(host_state.instances)) + + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def _test_schedule_successful_claim( + self, mock_get_hosts, mock_get_all_states, mock_claim, num_instances=1, + ): + spec_obj = objects.RequestSpec( + num_instances=num_instances, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=None) + + host_state = mock.Mock(spec=host_manager.HostState, + host="fake_host", nodename="fake_node", uuid=uuids.cn1, + cell_uuid=uuids.cell1, limits={}, aggregates=[]) + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.return_value = all_host_states + mock_claim.return_value = True + + instance_uuids = [uuids.instance] + fake_alloc = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn1}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]} + ctx = mock.Mock() + selected_hosts = self.manager._schedule(ctx, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries) + + sel_obj = objects.Selection.from_host_state(host_state, + allocation_request=fake_alloc) + expected_selection = [[sel_obj]] + mock_get_all_states.assert_called_once_with( + ctx.elevated.return_value, spec_obj, + mock.sentinel.provider_summaries) + mock_get_hosts.assert_called() + mock_claim.assert_called_once_with(ctx.elevated.return_value, + self.manager.placement_client, spec_obj, uuids.instance, + alloc_reqs_by_rp_uuid[uuids.cn1][0], + allocation_request_version=None) + + self.assertEqual(len(selected_hosts), 1) + self.assertEqual(expected_selection, selected_hosts) + + # Ensure that we have consumed the resources on the chosen host states + host_state.consume_from_request.assert_called_once_with(spec_obj) + + def test_schedule_successful_claim(self): + self._test_schedule_successful_claim() + + def test_schedule_old_reqspec_and_move_operation(self): + """This test is for verifying that in case of a move operation with an + original RequestSpec created for 3 concurrent instances, we only verify + the instance that is moved. + """ + self._test_schedule_successful_claim(num_instances=3) + + @mock.patch('nova.scheduler.manager.SchedulerManager._cleanup_allocations') + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_unsuccessful_claim( + self, mock_get_hosts, mock_get_all_states, mock_claim, mock_cleanup, + ): + """Tests that we return an empty list if we are unable to successfully + claim resources for the instance + """ + spec_obj = objects.RequestSpec( + num_instances=1, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=None) + + host_state = mock.Mock(spec=host_manager.HostState, + host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1) + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.return_value = all_host_states + mock_claim.return_value = False + + instance_uuids = [uuids.instance] + alloc_reqs_by_rp_uuid = { + uuids.cn1: [{"allocations": mock.sentinel.alloc_req}], + } + ctx = mock.Mock() + fake_version = "1.99" + self.assertRaises(exception.NoValidHost, self.manager._schedule, ctx, + spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, + mock.sentinel.provider_summaries, + allocation_request_version=fake_version) + + mock_get_all_states.assert_called_once_with( + ctx.elevated.return_value, spec_obj, + mock.sentinel.provider_summaries) + mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0) + mock_claim.assert_called_once_with(ctx.elevated.return_value, + self.manager.placement_client, spec_obj, uuids.instance, + alloc_reqs_by_rp_uuid[uuids.cn1][0], + allocation_request_version=fake_version) + + mock_cleanup.assert_not_called() + # Ensure that we have consumed the resources on the chosen host states + self.assertFalse(host_state.consume_from_request.called) + + @mock.patch('nova.scheduler.manager.SchedulerManager._cleanup_allocations') + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_not_all_instance_clean_claimed( + self, mock_get_hosts, mock_get_all_states, mock_claim, mock_cleanup, + ): + """Tests that we clean up previously-allocated instances if not all + instances could be scheduled + """ + spec_obj = objects.RequestSpec( + num_instances=2, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=None) + + host_state = mock.Mock(spec=host_manager.HostState, + host="fake_host", nodename="fake_node", uuid=uuids.cn1, + cell_uuid=uuids.cell1, limits={}, updated='fake') + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.side_effect = [ + all_host_states, # first instance: return all the hosts (only one) + [], # second: act as if no more hosts that meet criteria + all_host_states, # the final call when creating alternates + ] + mock_claim.return_value = True + + instance_uuids = [uuids.instance1, uuids.instance2] + fake_alloc = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn1}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]} + ctx = mock.Mock() + self.assertRaises(exception.NoValidHost, self.manager._schedule, ctx, + spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, + mock.sentinel.provider_summaries) + + # Ensure we cleaned up the first successfully-claimed instance + mock_cleanup.assert_called_once_with(ctx, [uuids.instance1]) + + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_selection_alloc_requests_for_alts( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + spec_obj = objects.RequestSpec( + num_instances=1, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1), + project_id=uuids.project_id, + instance_group=None) + + host_state0 = mock.Mock(spec=host_manager.HostState, + host="fake_host0", nodename="fake_node0", uuid=uuids.cn0, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + host_state1 = mock.Mock(spec=host_manager.HostState, + host="fake_host1", nodename="fake_node1", uuid=uuids.cn1, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + host_state2 = mock.Mock(spec=host_manager.HostState, + host="fake_host2", nodename="fake_node2", uuid=uuids.cn2, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + all_host_states = [host_state0, host_state1, host_state2] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.return_value = all_host_states + mock_claim.return_value = True + + instance_uuids = [uuids.instance0] + fake_alloc0 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn0}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + fake_alloc1 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn1}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + fake_alloc2 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn2}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0], + uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]} + ctx = mock.Mock() + selected_hosts = self.manager._schedule(ctx, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries, + return_alternates=True) + + sel0 = objects.Selection.from_host_state(host_state0, + allocation_request=fake_alloc0) + sel1 = objects.Selection.from_host_state(host_state1, + allocation_request=fake_alloc1) + sel2 = objects.Selection.from_host_state(host_state2, + allocation_request=fake_alloc2) + expected_selection = [[sel0, sel1, sel2]] + self.assertEqual(expected_selection, selected_hosts) + + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_selection_alloc_requests_no_alts( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + spec_obj = objects.RequestSpec( + num_instances=1, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1), + project_id=uuids.project_id, + instance_group=None) + + host_state0 = mock.Mock(spec=host_manager.HostState, + host="fake_host0", nodename="fake_node0", uuid=uuids.cn0, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + host_state1 = mock.Mock(spec=host_manager.HostState, + host="fake_host1", nodename="fake_node1", uuid=uuids.cn1, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + host_state2 = mock.Mock(spec=host_manager.HostState, + host="fake_host2", nodename="fake_node2", uuid=uuids.cn2, + cell_uuid=uuids.cell, limits={}, aggregates=[]) + all_host_states = [host_state0, host_state1, host_state2] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.return_value = all_host_states + mock_claim.return_value = True + + instance_uuids = [uuids.instance0] + fake_alloc0 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn0}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + fake_alloc1 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn1}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + fake_alloc2 = {"allocations": [ + {"resource_provider": {"uuid": uuids.cn2}, + "resources": {"VCPU": 1, + "MEMORY_MB": 1024, + "DISK_GB": 100} + }]} + alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0], + uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]} + ctx = mock.Mock() + selected_hosts = self.manager._schedule(ctx, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries, + return_alternates=False) + + sel0 = objects.Selection.from_host_state(host_state0, + allocation_request=fake_alloc0) + expected_selection = [[sel0]] + self.assertEqual(expected_selection, selected_hosts) + + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_instance_group( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + """Test that since the request spec object contains an instance group + object, that upon choosing a host in the primary schedule loop, + that we update the request spec's instance group information + """ + num_instances = 2 + ig = objects.InstanceGroup(hosts=[]) + spec_obj = objects.RequestSpec( + num_instances=num_instances, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_group=ig, instance_uuid=uuids.instance0) + # Reset the RequestSpec changes so they don't interfere with the + # assertion at the end of the test. + spec_obj.obj_reset_changes(recursive=True) + + hs1 = mock.Mock(spec=host_manager.HostState, host='host1', + nodename="node1", limits={}, uuid=uuids.cn1, + cell_uuid=uuids.cell1, instances={}, aggregates=[]) + hs2 = mock.Mock(spec=host_manager.HostState, host='host2', + nodename="node2", limits={}, uuid=uuids.cn2, + cell_uuid=uuids.cell2, instances={}, aggregates=[]) + all_host_states = [hs1, hs2] + mock_get_all_states.return_value = all_host_states + mock_claim.return_value = True + + alloc_reqs_by_rp_uuid = { + uuids.cn1: [{"allocations": "fake_cn1_alloc"}], + uuids.cn2: [{"allocations": "fake_cn2_alloc"}], + } + + # Simulate host 1 and host 2 being randomly returned first by + # _get_sorted_hosts() in the two iterations for each instance in + # num_instances + visited_instances = set([]) + + def fake_get_sorted_hosts(_spec_obj, host_states, index): + # Keep track of which instances are passed to the filters. + visited_instances.add(_spec_obj.instance_uuid) + if index % 2: + return [hs1, hs2] + return [hs2, hs1] + mock_get_hosts.side_effect = fake_get_sorted_hosts + instance_uuids = [ + getattr(uuids, 'instance%d' % x) for x in range(num_instances) + ] + ctx = mock.Mock() + self.manager._schedule(ctx, spec_obj, instance_uuids, + alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries) + + # Check that we called claim_resources() for both the first and second + # host state + claim_calls = [ + mock.call(ctx.elevated.return_value, self.manager.placement_client, + spec_obj, uuids.instance0, + alloc_reqs_by_rp_uuid[uuids.cn2][0], + allocation_request_version=None), + mock.call(ctx.elevated.return_value, self.manager.placement_client, + spec_obj, uuids.instance1, + alloc_reqs_by_rp_uuid[uuids.cn1][0], + allocation_request_version=None), + ] + mock_claim.assert_has_calls(claim_calls) + + # Check that _get_sorted_hosts() is called twice and that the + # second time, we pass it the hosts that were returned from + # _get_sorted_hosts() the first time + sorted_host_calls = [ + mock.call(spec_obj, all_host_states, 0), + mock.call(spec_obj, [hs2, hs1], 1), + ] + mock_get_hosts.assert_has_calls(sorted_host_calls) + + # The instance group object should have both host1 and host2 in its + # instance group hosts list and there should not be any "changes" to + # save in the instance group object + self.assertEqual(['host2', 'host1'], ig.hosts) + self.assertEqual({}, ig.obj_get_changes()) + # Assert that we updated HostState.instances for each host. + self.assertIn(uuids.instance0, hs2.instances) + self.assertIn(uuids.instance1, hs1.instances) + # Make sure that the RequestSpec.instance_uuid is not dirty. + self.assertEqual(sorted(instance_uuids), sorted(visited_instances)) + self.assertEqual(0, len(spec_obj.obj_what_changed()), + spec_obj.obj_what_changed()) + + @mock.patch('nova.scheduler.manager.LOG.debug') + @mock.patch('random.choice', side_effect=lambda x: x[1]) + @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') + @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') + def test_get_sorted_hosts(self, mock_filt, mock_weighed, mock_rand, debug): + """Tests the call that returns a sorted list of hosts by calling the + host manager's filtering and weighing routines + """ + self.flags(host_subset_size=2, group='filter_scheduler') + hs1 = mock.Mock(spec=host_manager.HostState, host='host1', + cell_uuid=uuids.cell1) + hs2 = mock.Mock(spec=host_manager.HostState, host='host2', + cell_uuid=uuids.cell2) + all_host_states = [hs1, hs2] + + mock_weighed.return_value = [ + weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), + ] + + # Make sure that when logging the weighed hosts we are logging them + # with the WeighedHost wrapper class rather than the HostState objects. + def fake_debug(message, *args, **kwargs): + if message.startswith('Weighed'): + self.assertEqual(1, len(args)) + for weighed_host in args[0]['hosts']: + self.assertIsInstance(weighed_host, weights.WeighedHost) + debug.side_effect = fake_debug + + results = self.manager._get_sorted_hosts(mock.sentinel.spec, + all_host_states, mock.sentinel.index) + debug.assert_called() + + mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, + mock.sentinel.index) + + mock_weighed.assert_called_once_with(mock_filt.return_value, + mock.sentinel.spec) + + # We override random.choice() to pick the **second** element of the + # returned weighed hosts list, which is the host state #2. This tests + # the code path that combines the randomly-chosen host with the + # remaining list of weighed host state objects + self.assertEqual([hs2, hs1], results) + + @mock.patch('random.choice', side_effect=lambda x: x[0]) + @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') + @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') + def test_get_sorted_hosts_subset_less_than_num_weighed(self, mock_filt, + mock_weighed, mock_rand): + """Tests that when we have >1 weighed hosts but a host subset size of + 1, that we always pick the first host in the weighed host + """ + self.flags(host_subset_size=1, group='filter_scheduler') + hs1 = mock.Mock(spec=host_manager.HostState, host='host1', + cell_uuid=uuids.cell1) + hs2 = mock.Mock(spec=host_manager.HostState, host='host2', + cell_uuid=uuids.cell2) + all_host_states = [hs1, hs2] + + mock_weighed.return_value = [ + weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), + ] + + results = self.manager._get_sorted_hosts(mock.sentinel.spec, + all_host_states, mock.sentinel.index) + + mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, + mock.sentinel.index) + + mock_weighed.assert_called_once_with(mock_filt.return_value, + mock.sentinel.spec) + + # We should be randomly selecting only from a list of one host state + mock_rand.assert_called_once_with([hs1]) + self.assertEqual([hs1, hs2], results) + + @mock.patch('random.choice', side_effect=lambda x: x[0]) + @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') + @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') + def test_get_sorted_hosts_subset_greater_than_num_weighed(self, mock_filt, + mock_weighed, mock_rand): + """Hosts should still be chosen if host subset size is larger than + number of weighed hosts. + """ + self.flags(host_subset_size=20, group='filter_scheduler') + hs1 = mock.Mock(spec=host_manager.HostState, host='host1', + cell_uuid=uuids.cell1) + hs2 = mock.Mock(spec=host_manager.HostState, host='host2', + cell_uuid=uuids.cell2) + all_host_states = [hs1, hs2] + + mock_weighed.return_value = [ + weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0), + ] + + results = self.manager._get_sorted_hosts(mock.sentinel.spec, + all_host_states, mock.sentinel.index) + + mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, + mock.sentinel.index) + + mock_weighed.assert_called_once_with(mock_filt.return_value, + mock.sentinel.spec) + + # We overrode random.choice() to return the first element in the list, + # so even though we had a host_subset_size greater than the number of + # weighed hosts (2), we just random.choice() on the entire set of + # weighed hosts and thus return [hs1, hs2] + self.assertEqual([hs1, hs2], results) + + @mock.patch('random.shuffle', side_effect=lambda x: x.reverse()) + @mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts') + @mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts') + def test_get_sorted_hosts_shuffle_top_equal(self, mock_filt, mock_weighed, + mock_shuffle): + """Tests that top best weighed hosts are shuffled when enabled. + """ + self.flags(host_subset_size=1, group='filter_scheduler') + self.flags(shuffle_best_same_weighed_hosts=True, + group='filter_scheduler') + hs1 = mock.Mock(spec=host_manager.HostState, host='host1') + hs2 = mock.Mock(spec=host_manager.HostState, host='host2') + hs3 = mock.Mock(spec=host_manager.HostState, host='host3') + hs4 = mock.Mock(spec=host_manager.HostState, host='host4') + all_host_states = [hs1, hs2, hs3, hs4] + + mock_weighed.return_value = [ + weights.WeighedHost(hs1, 1.0), + weights.WeighedHost(hs2, 1.0), + weights.WeighedHost(hs3, 0.5), + weights.WeighedHost(hs4, 0.5), + ] + + results = self.manager._get_sorted_hosts(mock.sentinel.spec, + all_host_states, mock.sentinel.index) + + mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec, + mock.sentinel.index) + + mock_weighed.assert_called_once_with(mock_filt.return_value, + mock.sentinel.spec) + + # We override random.shuffle() to reverse the list, thus the + # head of the list should become [host#2, host#1] + # (as the host_subset_size is 1) and the tail should stay the same. + self.assertEqual([hs2, hs1, hs3, hs4], results) + + @mock.patch( + 'nova.scheduler.client.report.SchedulerReportClient' + '.delete_allocation_for_instance') + def test_cleanup_allocations(self, mock_delete_alloc): + # Check we don't do anything if there's no instance UUIDs to cleanup + # allocations for + + instance_uuids = [] + self.manager._cleanup_allocations(self.context, instance_uuids) + mock_delete_alloc.assert_not_called() + + instance_uuids = [uuids.instance1, uuids.instance2] + self.manager._cleanup_allocations(self.context, instance_uuids) + mock_delete_alloc.assert_has_calls([ + mock.call(self.context, uuids.instance1), + mock.call(self.context, uuids.instance2) + ]) + + def test_add_retry_host(self): + retry = dict(num_attempts=1, hosts=[]) + filter_properties = dict(retry=retry) + host = "fakehost" + node = "fakenode" + + scheduler_utils._add_retry_host(filter_properties, host, node) + + hosts = filter_properties['retry']['hosts'] + self.assertEqual(1, len(hosts)) + self.assertEqual([host, node], hosts[0]) + + def test_post_select_populate(self): + # Test addition of certain filter props after a node is selected. + retry = {'hosts': [], 'num_attempts': 1} + filter_properties = {'retry': retry} + + selection = objects.Selection(service_host="host", nodename="node", + cell_uuid=uuids.cell) + scheduler_utils.populate_filter_properties(filter_properties, + selection) + self.assertEqual(['host', 'node'], + filter_properties['retry']['hosts'][0]) + + @mock.patch('nova.scheduler.manager.SchedulerManager._schedule') + def test_select_destinations_match_num_instances(self, mock_schedule): + """Tests that the select_destinations() method returns the list of + hosts from the _schedule() method when the number of returned hosts + equals the number of instance UUIDs passed in. + """ + spec_obj = objects.RequestSpec( + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + num_instances=1, + image=None, + numa_topology=None, + pci_requests=None, + instance_uuid=uuids.instance_id) + + mock_schedule.return_value = [[fake_selection]] + dests = self.manager._select_destinations( + self.context, spec_obj, + [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, + mock.sentinel.p_sums, mock.sentinel.ar_version) + + mock_schedule.assert_called_once_with(self.context, spec_obj, + [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, + mock.sentinel.p_sums, mock.sentinel.ar_version, False) + self.assertEqual([[fake_selection]], dests) + + @mock.patch('nova.scheduler.manager.SchedulerManager._schedule') + def test_select_destinations_for_move_ops(self, mock_schedule): + """Tests that the select_destinations() method verifies the number of + hosts returned from the _schedule() method against the number of + instance UUIDs passed as a parameter and not against the RequestSpec + num_instances field since the latter could be wrong in case of a move + operation. + """ + spec_obj = objects.RequestSpec( + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + num_instances=2, + image=None, + numa_topology=None, + pci_requests=None, + instance_uuid=uuids.instance_id) + + mock_schedule.return_value = [[fake_selection]] + dests = self.manager._select_destinations( + self.context, spec_obj, + [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, + mock.sentinel.p_sums, mock.sentinel.ar_version) + + mock_schedule.assert_called_once_with(self.context, spec_obj, + [mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid, + mock.sentinel.p_sums, mock.sentinel.ar_version, False) + self.assertEqual([[fake_selection]], dests) + + @mock.patch('nova.scheduler.utils.claim_resources', return_value=True) + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + def test_schedule_fewer_num_instances( + self, mock_get_hosts, mock_get_all_states, mock_claim, + ): + """Tests that the _schedule() method properly handles + resetting host state objects and raising NoValidHost when there are not + enough hosts available. + """ + spec_obj = objects.RequestSpec( + num_instances=2, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor"), + project_id=uuids.project_id, + instance_uuid=uuids.instance_id, + instance_group=None) + + host_state = mock.Mock(spec=host_manager.HostState, host="fake_host", + uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node", + limits={}, updated="Not None") + all_host_states = [host_state] + mock_get_all_states.return_value = all_host_states + mock_get_hosts.side_effect = [all_host_states, []] + + instance_uuids = [uuids.inst1, uuids.inst2] + fake_allocs_by_rp = {uuids.cn1: [{}]} + + self.assertRaises(exception.NoValidHost, self.manager._schedule, + self.context, spec_obj, instance_uuids, fake_allocs_by_rp, + mock.sentinel.p_sums) + self.assertIsNone(host_state.updated) + + @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + def _test_alternates_returned( + self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, + num_instances=2, num_alternates=2, + ): + all_host_states = [] + alloc_reqs = {} + for num in range(10): + host_name = "host%s" % num + hs = host_manager.HostState(host_name, "node%s" % num, + uuids.cell) + hs.uuid = getattr(uuids, host_name) + all_host_states.append(hs) + alloc_reqs[hs.uuid] = [{}] + + mock_get_all_hosts.return_value = all_host_states + mock_sorted.return_value = all_host_states + mock_claim.return_value = True + total_returned = num_alternates + 1 + self.flags(max_attempts=total_returned, group="scheduler") + instance_uuids = [getattr(uuids, "inst%s" % num) + for num in range(num_instances)] + + spec_obj = objects.RequestSpec( + num_instances=num_instances, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1), + project_id=uuids.project_id, + instance_group=None) + + dests = self.manager._schedule(self.context, spec_obj, + instance_uuids, alloc_reqs, None, return_alternates=True) + self.assertEqual(num_instances, len(dests)) + # Filtering and weighing hosts should be called num_instances + 1 times + # unless we're not getting alternates, and then just num_instances + self.assertEqual(num_instances + 1 + if num_alternates > 0 and num_instances > 1 + else num_instances, + mock_sorted.call_count, + 'Unexpected number of calls to filter hosts for %s ' + 'instances.' % num_instances) + selected_hosts = [dest[0] for dest in dests] + for dest in dests: + self.assertEqual(total_returned, len(dest)) + # Verify that there are no duplicates among a destination + self.assertEqual(len(dest), len(set(dest))) + # Verify that none of the selected hosts appear in the alternates. + for alt in dest[1:]: + self.assertNotIn(alt, selected_hosts) + + def test_alternates_returned(self): + self._test_alternates_returned(num_instances=1, num_alternates=1) + self._test_alternates_returned(num_instances=3, num_alternates=0) + self._test_alternates_returned(num_instances=1, num_alternates=4) + self._test_alternates_returned(num_instances=2, num_alternates=3) + self._test_alternates_returned(num_instances=8, num_alternates=8) + + @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + def test_alternates_same_cell( + self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, + ): + """Tests getting alternates plus claims where the hosts are spread + across two cells. + """ + all_host_states = [] + alloc_reqs = {} + for num in range(10): + host_name = "host%s" % num + cell_uuid = uuids.cell1 if num % 2 else uuids.cell2 + hs = host_manager.HostState(host_name, "node%s" % num, + cell_uuid) + hs.uuid = getattr(uuids, host_name) + all_host_states.append(hs) + alloc_reqs[hs.uuid] = [{}] + + mock_get_all_hosts.return_value = all_host_states + # There are two instances so _get_sorted_hosts is called once per + # instance and then once again before picking alternates. + mock_sorted.side_effect = [all_host_states, + list(reversed(all_host_states)), + all_host_states] + mock_claim.return_value = True + total_returned = 3 + self.flags(max_attempts=total_returned, group="scheduler") + instance_uuids = [uuids.inst1, uuids.inst2] + num_instances = len(instance_uuids) + + spec_obj = objects.RequestSpec( + num_instances=num_instances, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1), + project_id=uuids.project_id, + instance_group=None) + + dests = self.manager._schedule(self.context, spec_obj, + instance_uuids, alloc_reqs, None, return_alternates=True) + # There should be max_attempts hosts per instance (1 selected, 2 alts) + self.assertEqual(total_returned, len(dests[0])) + self.assertEqual(total_returned, len(dests[1])) + # Verify that the two selected hosts are not in the same cell. + self.assertNotEqual(dests[0][0].cell_uuid, dests[1][0].cell_uuid) + for dest in dests: + selected_host = dest[0] + selected_cell_uuid = selected_host.cell_uuid + for alternate in dest[1:]: + self.assertEqual(alternate.cell_uuid, selected_cell_uuid) + + @mock.patch("nova.scheduler.host_manager.HostState.consume_from_request") + @mock.patch('nova.scheduler.utils.claim_resources') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_sorted_hosts') + @mock.patch('nova.scheduler.manager.SchedulerManager._get_all_host_states') + def _test_not_enough_alternates( + self, mock_get_all_hosts, mock_sorted, mock_claim, mock_consume, + num_hosts, max_attempts, + ): + all_host_states = [] + alloc_reqs = {} + for num in range(num_hosts): + host_name = "host%s" % num + hs = host_manager.HostState(host_name, "node%s" % num, + uuids.cell) + hs.uuid = getattr(uuids, host_name) + all_host_states.append(hs) + alloc_reqs[hs.uuid] = [{}] + + mock_get_all_hosts.return_value = all_host_states + mock_sorted.return_value = all_host_states + mock_claim.return_value = True + # Set the total returned to more than the number of available hosts + self.flags(max_attempts=max_attempts, group="scheduler") + instance_uuids = [uuids.inst1, uuids.inst2] + num_instances = len(instance_uuids) + + spec_obj = objects.RequestSpec( + num_instances=num_instances, + flavor=objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1), + project_id=uuids.project_id, + instance_group=None) + + dests = self.manager._schedule(self.context, spec_obj, + instance_uuids, alloc_reqs, None, return_alternates=True) + self.assertEqual(num_instances, len(dests)) + selected_hosts = [dest[0] for dest in dests] + # The number returned for each destination should be the less of the + # number of available host and the max_attempts setting. + expected_number = min(num_hosts, max_attempts) + for dest in dests: + self.assertEqual(expected_number, len(dest)) + # Verify that there are no duplicates among a destination + self.assertEqual(len(dest), len(set(dest))) + # Verify that none of the selected hosts appear in the alternates. + for alt in dest[1:]: + self.assertNotIn(alt, selected_hosts) + + def test_not_enough_alternates(self): + self._test_not_enough_alternates(num_hosts=100, max_attempts=5) + self._test_not_enough_alternates(num_hosts=5, max_attempts=5) + self._test_not_enough_alternates(num_hosts=3, max_attempts=5) + self._test_not_enough_alternates(num_hosts=20, max_attempts=5) + + @mock.patch('nova.compute.utils.notify_about_scheduler_action') + @mock.patch.object(manager.SchedulerManager, '_schedule') + def test_select_destinations_notifications( + self, mock_schedule, mock_notify, + ): + mock_schedule.return_value = ([[mock.Mock()]], [[mock.Mock()]]) + + with mock.patch.object(self.manager.notifier, 'info') as mock_info: + flavor = objects.Flavor(memory_mb=512, + root_gb=512, + ephemeral_gb=0, + swap=0, + vcpus=1, + disabled=False, + is_public=True, + name="small_flavor") + expected = {'num_instances': 1, + 'instance_properties': { + 'uuid': uuids.instance, + 'ephemeral_gb': 0, + 'memory_mb': 512, + 'vcpus': 1, + 'root_gb': 512}, + 'instance_type': flavor, + 'image': {}} + spec_obj = objects.RequestSpec(num_instances=1, + flavor=flavor, + instance_uuid=uuids.instance) + + self.manager._select_destinations( + self.context, spec_obj, [uuids.instance], {}, None) + + expected = [ + mock.call(self.context, 'scheduler.select_destinations.start', + dict(request_spec=expected)), + mock.call(self.context, 'scheduler.select_destinations.end', + dict(request_spec=expected))] + self.assertEqual(expected, mock_info.call_args_list) + + mock_notify.assert_has_calls([ + mock.call( + context=self.context, request_spec=spec_obj, + action='select_destinations', phase='start', + ), + mock.call( + context=self.context, request_spec=spec_obj, + action='select_destinations', phase='end', + ), + ]) + + def test_get_all_host_states_provider_summaries_is_none(self): + """Tests that HostManager.get_host_states_by_uuids is called with + compute_uuids being None when the incoming provider_summaries is None. + """ + with mock.patch.object(self.manager.host_manager, + 'get_host_states_by_uuids') as get_host_states: + self.manager._get_all_host_states( + mock.sentinel.ctxt, mock.sentinel.spec_obj, None) + # Make sure get_host_states_by_uuids was called with + # compute_uuids being None. + get_host_states.assert_called_once_with( + mock.sentinel.ctxt, None, mock.sentinel.spec_obj) + + def test_get_all_host_states_provider_summaries_is_empty(self): + """Tests that HostManager.get_host_states_by_uuids is called with + compute_uuids being [] when the incoming provider_summaries is {}. + """ + with mock.patch.object(self.manager.host_manager, + 'get_host_states_by_uuids') as get_host_states: + self.manager._get_all_host_states( + mock.sentinel.ctxt, mock.sentinel.spec_obj, {}) + # Make sure get_host_states_by_uuids was called with + # compute_uuids being []. + get_host_states.assert_called_once_with( + mock.sentinel.ctxt, [], mock.sentinel.spec_obj) + def test_update_aggregates(self): - with mock.patch.object(self.manager.driver.host_manager, - 'update_aggregates' - ) as update_aggregates: + with mock.patch.object( + self.manager.host_manager, 'update_aggregates', + ) as update_aggregates: self.manager.update_aggregates(None, aggregates='agg') update_aggregates.assert_called_once_with('agg') def test_delete_aggregate(self): - with mock.patch.object(self.manager.driver.host_manager, - 'delete_aggregate' - ) as delete_aggregate: + with mock.patch.object( + self.manager.host_manager, 'delete_aggregate', + ) as delete_aggregate: self.manager.delete_aggregate(None, aggregate='agg') delete_aggregate.assert_called_once_with('agg') def test_update_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'update_instance_info') as mock_update: + with mock.patch.object( + self.manager.host_manager, 'update_instance_info', + ) as mock_update: self.manager.update_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_info) @@ -360,8 +1462,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): mock.sentinel.instance_info) def test_delete_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'delete_instance_info') as mock_delete: + with mock.patch.object( + self.manager.host_manager, 'delete_instance_info', + ) as mock_delete: self.manager.delete_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuid) @@ -370,8 +1473,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): mock.sentinel.instance_uuid) def test_sync_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'sync_instance_info') as mock_sync: + with mock.patch.object( + self.manager.host_manager, 'sync_instance_info', + ) as mock_sync: self.manager.sync_instance_info(mock.sentinel.context, mock.sentinel.host_name, mock.sentinel.instance_uuids) @@ -380,8 +1484,9 @@ class SchedulerManagerTestCase(test.NoDBTestCase): mock.sentinel.instance_uuids) def test_reset(self): - with mock.patch.object(self.manager.driver.host_manager, - 'refresh_cells_caches') as mock_refresh: + with mock.patch.object( + self.manager.host_manager, 'refresh_cells_caches', + ) as mock_refresh: self.manager.reset() mock_refresh.assert_called_once_with()