213 lines
10 KiB
Python
213 lines
10 KiB
Python
# Copyright (c) 2011 OpenStack Foundation
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""
|
|
The FilterScheduler is for creating instances locally.
|
|
You can customize this scheduler by specifying your own Host Filters and
|
|
Weighing Functions.
|
|
"""
|
|
|
|
import random
|
|
|
|
from oslo_log import log as logging
|
|
from six.moves import range
|
|
|
|
import nova.conf
|
|
from nova import exception
|
|
from nova.i18n import _
|
|
from nova import rpc
|
|
from nova.scheduler import driver
|
|
|
|
CONF = nova.conf.CONF
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
class FilterScheduler(driver.Scheduler):
|
|
"""Scheduler that can be used for filtering and weighing."""
|
|
def __init__(self, *args, **kwargs):
|
|
super(FilterScheduler, self).__init__(*args, **kwargs)
|
|
self.notifier = rpc.get_notifier('scheduler')
|
|
|
|
def select_destinations(self, context, spec_obj, instance_uuids,
|
|
alloc_reqs_by_rp_uuid, provider_summaries):
|
|
"""Returns a sorted list of HostState objects that satisfy the
|
|
supplied request_spec.
|
|
|
|
:param context: The RequestContext object
|
|
:param spec_obj: The RequestSpec object
|
|
:param instance_uuids: List of UUIDs, one for each value of the spec
|
|
object's num_instances attribute
|
|
:param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
|
|
UUID, of the allocation requests that may
|
|
be used to claim resources against
|
|
matched hosts. If None, indicates either
|
|
the placement API wasn't reachable or
|
|
that there were no allocation requests
|
|
returned by the placement API. If the
|
|
latter, the provider_summaries will be an
|
|
empty dict, not None.
|
|
:param provider_summaries: Optional dict, keyed by resource provider
|
|
UUID, of information that will be used by
|
|
the filters/weighers in selecting matching
|
|
hosts for a request. If None, indicates that
|
|
the scheduler driver should grab all compute
|
|
node information locally and that the
|
|
Placement API is not used. If an empty dict,
|
|
indicates the Placement API returned no
|
|
potential matches for the requested
|
|
resources.
|
|
"""
|
|
self.notifier.info(
|
|
context, 'scheduler.select_destinations.start',
|
|
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
|
|
|
|
num_instances = spec_obj.num_instances
|
|
selected_hosts = self._schedule(context, spec_obj, instance_uuids,
|
|
alloc_reqs_by_rp_uuid, provider_summaries)
|
|
|
|
# Couldn't fulfill the request_spec
|
|
if len(selected_hosts) < num_instances:
|
|
# NOTE(Rui Chen): If multiple creates failed, set the updated time
|
|
# of selected HostState to None so that these HostStates are
|
|
# refreshed according to database in next schedule, and release
|
|
# the resource consumed by instance in the process of selecting
|
|
# host.
|
|
for host in selected_hosts:
|
|
host.updated = None
|
|
|
|
# Log the details but don't put those into the reason since
|
|
# we don't want to give away too much information about our
|
|
# actual environment.
|
|
LOG.debug('There are %(hosts)d hosts available but '
|
|
'%(num_instances)d instances requested to build.',
|
|
{'hosts': len(selected_hosts),
|
|
'num_instances': num_instances})
|
|
|
|
reason = _('There are not enough hosts available.')
|
|
raise exception.NoValidHost(reason=reason)
|
|
|
|
self.notifier.info(
|
|
context, 'scheduler.select_destinations.end',
|
|
dict(request_spec=spec_obj.to_legacy_request_spec_dict()))
|
|
return selected_hosts
|
|
|
|
def _schedule(self, context, spec_obj, instance_uuids,
|
|
alloc_reqs_by_rp_uuid, provider_summaries):
|
|
"""Returns a list of hosts that meet the required specs, ordered by
|
|
their fitness.
|
|
|
|
:param context: The RequestContext object
|
|
:param spec_obj: The RequestSpec object
|
|
:param instance_uuids: List of UUIDs, one for each value of the spec
|
|
object's num_instances attribute
|
|
:param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider
|
|
UUID, of the allocation requests that may
|
|
be used to claim resources against
|
|
matched hosts. If None, indicates either
|
|
the placement API wasn't reachable or
|
|
that there were no allocation requests
|
|
returned by the placement API. If the
|
|
latter, the provider_summaries will be an
|
|
empty dict, not None.
|
|
:param provider_summaries: Optional dict, keyed by resource provider
|
|
UUID, of information that will be used by
|
|
the filters/weighers in selecting matching
|
|
hosts for a request. If None, indicates that
|
|
the scheduler driver should grab all compute
|
|
node information locally and that the
|
|
Placement API is not used. If an empty dict,
|
|
indicates the Placement API returned no
|
|
potential matches for the requested
|
|
resources.
|
|
"""
|
|
elevated = context.elevated()
|
|
|
|
# Find our local list of acceptable hosts by repeatedly
|
|
# filtering and weighing our options. Each time we choose a
|
|
# host, we virtually consume resources on it so subsequent
|
|
# selections can adjust accordingly.
|
|
|
|
# Note: remember, we are using an iterator here. So only
|
|
# traverse this list once. This can bite you if the hosts
|
|
# are being scanned in a filter or weighing function.
|
|
hosts = self._get_all_host_states(elevated, spec_obj,
|
|
provider_summaries)
|
|
|
|
selected_hosts = []
|
|
num_instances = spec_obj.num_instances
|
|
for num in range(num_instances):
|
|
hosts = self._get_sorted_hosts(spec_obj, hosts, num)
|
|
if not hosts:
|
|
break
|
|
|
|
chosen_host = hosts[0]
|
|
|
|
LOG.debug("Selected host: %(host)s", {'host': chosen_host})
|
|
selected_hosts.append(chosen_host)
|
|
|
|
# Now consume the resources so the filter/weights
|
|
# will change for the next instance.
|
|
chosen_host.consume_from_request(spec_obj)
|
|
if spec_obj.instance_group is not None:
|
|
spec_obj.instance_group.hosts.append(chosen_host.host)
|
|
# hosts has to be not part of the updates when saving
|
|
spec_obj.instance_group.obj_reset_changes(['hosts'])
|
|
return selected_hosts
|
|
|
|
def _get_sorted_hosts(self, spec_obj, host_states, index):
|
|
"""Returns a list of HostState objects that match the required
|
|
scheduling constraints for the request spec object and have been sorted
|
|
according to the weighers.
|
|
"""
|
|
filtered_hosts = self.host_manager.get_filtered_hosts(host_states,
|
|
spec_obj, index)
|
|
|
|
LOG.debug("Filtered %(hosts)s", {'hosts': filtered_hosts})
|
|
|
|
if not filtered_hosts:
|
|
return []
|
|
|
|
weighed_hosts = self.host_manager.get_weighed_hosts(filtered_hosts,
|
|
spec_obj)
|
|
# Strip off the WeighedHost wrapper class...
|
|
weighed_hosts = [h.obj for h in weighed_hosts]
|
|
|
|
LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts})
|
|
|
|
# We randomize the first element in the returned list to alleviate
|
|
# congestion where the same host is consistently selected among
|
|
# numerous potential hosts for similar request specs.
|
|
host_subset_size = CONF.filter_scheduler.host_subset_size
|
|
if host_subset_size < len(weighed_hosts):
|
|
weighed_subset = weighed_hosts[0:host_subset_size]
|
|
else:
|
|
weighed_subset = weighed_hosts
|
|
chosen_host = random.choice(weighed_subset)
|
|
weighed_hosts.remove(chosen_host)
|
|
return [chosen_host] + weighed_hosts
|
|
|
|
def _get_all_host_states(self, context, spec_obj, provider_summaries):
|
|
"""Template method, so a subclass can implement caching."""
|
|
# NOTE(jaypipes): None is treated differently from an empty dict. We
|
|
# pass None when we want to grab all compute nodes (for instance, when
|
|
# using the caching scheduler. We pass an empty dict when the Placement
|
|
# API found no providers that match the requested constraints.
|
|
compute_uuids = None
|
|
if provider_summaries is not None:
|
|
compute_uuids = list(provider_summaries.keys())
|
|
return self.host_manager.get_host_states_by_uuids(context,
|
|
compute_uuids,
|
|
spec_obj)
|