Remove ChanceScheduler

ChanceScheduler is deprecated in Pike [1] and will be removed in a
subsequent release.

[1] https://review.openstack.org/#/c/492210/

Change-Id: I44f9c1cabf9fc64b1a6903236bc88f5ed8619e9e
This commit is contained in:
zhangyangyang 2018-06-19 14:36:12 +08:00 committed by Stephen Finucane
parent c587a85acd
commit 92a459331f
7 changed files with 10 additions and 303 deletions

View File

@ -917,14 +917,6 @@ For example:
ram_weight_multiplier = 1.0
offset_weight_multiplier = 1.0
Chance scheduler
~~~~~~~~~~~~~~~~
As an administrator, you work with the filter scheduler. However, the Compute
service also uses the Chance Scheduler,
``nova.scheduler.chance.ChanceScheduler``, which randomly selects from lists of
filtered hosts.
Utilization aware scheduling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -37,7 +37,6 @@ Other options are:
* 'caching_scheduler' which aggressively caches the system state for better
individual scheduler performance at the risk of more retries when running
multiple schedulers. [DEPRECATED]
* 'chance_scheduler' which simply picks a host at random. [DEPRECATED]
* 'fake_scheduler' which is used for testing.
Possible values:
@ -46,7 +45,6 @@ Possible values:
* filter_scheduler
* caching_scheduler
* chance_scheduler
* fake_scheduler
* You may also set this to the entry point name of a custom scheduler driver,

View File

@ -1,124 +0,0 @@
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from oslo_log import log as logging
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.scheduler import driver
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
USES_ALLOCATION_CANDIDATES = False
def __init__(self, *args, **kwargs):
super(ChanceScheduler, self).__init__(*args, **kwargs)
LOG.warning('ChanceScheduler is deprecated in Pike and will be '
'removed in a subsequent release.')
def _filter_hosts(self, hosts, spec_obj):
"""Filter a list of hosts based on RequestSpec."""
ignore_hosts = spec_obj.ignore_hosts or []
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, spec_obj, instance_uuids,
return_alternates=False):
"""Picks a host that is up at random."""
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
hosts = self._filter_hosts(hosts, spec_obj)
if not hosts:
msg = _("Could not find another compute")
raise exception.NoValidHost(reason=msg)
# Note that we don't claim in the chance scheduler
num_instances = len(instance_uuids)
# If possible, we'd like to return distinct hosts for each instance.
# But when there are fewer available hosts than requested instances, we
# will need to return some duplicates.
if len(hosts) >= num_instances:
selected_hosts = random.sample(hosts, num_instances)
else:
selected_hosts = [random.choice(hosts)
for i in range(num_instances)]
# This is the overall list of values to be returned. There will be one
# item per instance, and that item will be a list of Selection objects
# representing the selected host and zero or more alternates.
# NOTE(edleafe): in a multi-cell environment, this can return
# alternates from different cells. When support for multiple cells is
# implemented in select_destinations, this will have to be updated to
# restrict alternates to come from the same cell.
selections_to_return = []
# We can't return dupes as alternates, since alternates are used when
# building to the selected host fails.
if return_alternates:
alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)
else:
alts_per_instance = 0
for sel_host in selected_hosts:
selection = objects.Selection.from_host_state(sel_host)
sel_plus_alts = [selection]
while len(sel_plus_alts) < alts_per_instance:
candidate = random.choice(hosts)
if (candidate not in sel_plus_alts) and (
candidate not in selected_hosts):
# We don't want to include a selected host as an alternate,
# as it will have a high likelihood of not having enough
# resources left after it has an instance built on it.
alt_select = objects.Selection.from_host_state(candidate)
sel_plus_alts.append(alt_select)
selections_to_return.append(sel_plus_alts)
return selections_to_return
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None, return_alternates=False):
"""Selects random destinations. Returns a list of list of Selection
objects.
"""
num_instances = spec_obj.num_instances
# TODO(danms): This needs to be extended to support multiple cells
# and limiting the destination scope to a single requested cell
host_selections = self._schedule(context, compute_rpcapi.RPC_TOPIC,
spec_obj, instance_uuids)
if len(host_selections) < num_instances:
reason = _('There are not enough hosts available.')
raise exception.NoValidHost(reason=reason)
return host_selections

View File

@ -1,158 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Chance Scheduler.
"""
import mock
from nova import exception
from nova import objects
from nova.scheduler import chance
from nova.scheduler import host_manager
from nova.tests.unit.scheduler import test_scheduler
from nova.tests import uuidsentinel as uuids
def _generate_fake_hosts(num):
hosts = []
for i in range(num):
fake_host_state = host_manager.HostState("host%s" % i, "fake_node",
uuids.cell)
fake_host_state.uuid = getattr(uuids, "host%s" % i)
fake_host_state.limits = {}
hosts.append(fake_host_state)
return hosts
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
driver_cls = chance.ChanceScheduler
def test_filter_hosts_avoid(self):
"""Test to make sure _filter_hosts() filters original hosts if
avoid_original_host is True.
"""
hosts = ['host1', 'host2', 'host3']
spec_obj = objects.RequestSpec(ignore_hosts=['host2'])
filtered = self.driver._filter_hosts(hosts, spec_obj=spec_obj)
self.assertEqual(filtered, ['host1', 'host3'])
def test_filter_hosts_no_avoid(self):
"""Test to make sure _filter_hosts() does not filter original
hosts if avoid_original_host is False.
"""
hosts = ['host1', 'host2', 'host3']
spec_obj = objects.RequestSpec(ignore_hosts=[])
filtered = self.driver._filter_hosts(hosts, spec_obj=spec_obj)
self.assertEqual(filtered, hosts)
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_select_destinations(self, mock_hosts_up):
mock_hosts_up.return_value = _generate_fake_hosts(4)
spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
dests = self.driver.select_destinations(self.context, spec_obj,
[uuids.instance1, uuids.instance2], {},
mock.sentinel.provider_summaries)
self.assertEqual(2, len(dests))
# Test that different hosts were returned
self.assertIsNot(dests[0], dests[1])
@mock.patch("nova.scheduler.chance.ChanceScheduler._filter_hosts")
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_select_destinations_no_valid_host(self, mock_hosts_up,
mock_filter):
mock_hosts_up.return_value = _generate_fake_hosts(4)
mock_filter.return_value = []
spec_obj = objects.RequestSpec(num_instances=1)
spec_obj.instance_uuid = uuids.instance
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
spec_obj, [spec_obj.instance_uuid], {},
mock.sentinel.provider_summaries)
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_schedule_success_single_instance(self, mock_hosts_up):
hosts = _generate_fake_hosts(20)
mock_hosts_up.return_value = hosts
spec_obj = objects.RequestSpec(num_instances=1, ignore_hosts=None)
spec_obj.instance_uuid = uuids.instance
# Set the max_attempts to 2
attempts = 2
expected = attempts
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid], return_alternates=True)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
# Now set max_attempts to a number larger than the available hosts. It
# should return a host_list containing only as many hosts as there are
# to choose from.
attempts = len(hosts) + 1
expected = len(hosts)
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid], return_alternates=True)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
# Now verify that if we pass False for return_alternates, that we only
# get one host in the host_list.
attempts = 5
expected = 1
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, [spec_obj.instance_uuid], return_alternates=False)
self.assertEqual(1, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(expected, len(host_list))
@mock.patch("nova.scheduler.chance.ChanceScheduler.hosts_up")
def test_schedule_success_multiple_instances(self, mock_hosts_up):
hosts = _generate_fake_hosts(20)
mock_hosts_up.return_value = hosts
num_instances = 4
spec_obj = objects.RequestSpec(num_instances=num_instances,
ignore_hosts=None)
instance_uuids = [getattr(uuids, "inst%s" % i)
for i in range(num_instances)]
spec_obj.instance_uuid = instance_uuids[0]
# Set the max_attempts to 2
attempts = 2
self.flags(max_attempts=attempts, group="scheduler")
selected_hosts = self.driver._schedule(self.context, "compute",
spec_obj, instance_uuids, return_alternates=True)
self.assertEqual(num_instances, len(selected_hosts))
for host_list in selected_hosts:
self.assertEqual(attempts, len(host_list))
# Verify that none of the selected hosts appear as alternates
# Set the max_attempts to 5 to get 4 alternates per instance
attempts = 4
self.flags(max_attempts=attempts, group="scheduler")
result = self.driver._schedule(self.context, "compute", spec_obj,
instance_uuids)
selected = [host_list[0] for host_list in result]
for host_list in result:
for sel in selected:
self.assertNotIn(sel, host_list[1:])

View File

@ -23,7 +23,6 @@ import oslo_messaging as messaging
from nova import context
from nova import objects
from nova.scheduler import caching_scheduler
from nova.scheduler import chance
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import manager
@ -46,15 +45,6 @@ class SchedulerManagerInitTestCase(test.NoDBTestCase):
driver = self.manager_cls().driver
self.assertIsInstance(driver, filter_scheduler.FilterScheduler)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_chance_schedulerdriver(self,
mock_init_agg,
mock_init_inst):
self.flags(driver='chance_scheduler', group='scheduler')
driver = self.manager_cls().driver
self.assertIsInstance(driver, chance.ChanceScheduler)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def test_init_using_caching_schedulerdriver(self,

View File

@ -0,0 +1,10 @@
---
upgrade:
- |
The ``chance_scheduler`` scheduler driver was deprecated in Pike
and has now been removed. You should enable the ``filter_scheduler``
driver instead. If ``chance_scheduler`` behavior is desired
(i.e. speed is valued over correctness) then configuring the
``filter_scheduler`` with only the ``AllHostsFilter`` enabled and
adjusting ``[filter_scheduler]/host_subset_size`` will provide similar
performance.

View File

@ -86,7 +86,6 @@ nova.ipv6_backend =
nova.scheduler.driver =
filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler
caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler
chance_scheduler = nova.scheduler.chance:ChanceScheduler
fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler
[egg_info]