Drop Chance/SimpleScheduler Implementation
This patch removes the implementation of ChanceScheduler and SimpleScheduler as previous changes have made sure they are internally replaced by FilterScheduler. The "max_gigabytes" config option is deprecated and will leave it like that for one more release before we can remove it. DocImpact: "ChanceScheduler and SimpleScheduler have been deprecated and their implementation have been removed from Cinder." Implement bp: deprecate-chance-and-simple-schedulers Change-Id: Ifb1cb25e3bb4cdf26fa3283336b83fce5c97141e
This commit is contained in:
parent
da2caad4b9
commit
cea7fe21ce
@ -1,148 +0,0 @@
|
||||
# Copyright (c) 2010 OpenStack Foundation
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Chance (Random) Scheduler implementation
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder.scheduler import driver
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ChanceScheduler(driver.Scheduler):
|
||||
"""Implements Scheduler as a random node selector."""
|
||||
|
||||
def _filter_hosts(self, request_spec, hosts, **kwargs):
|
||||
"""Filter a list of hosts based on request_spec."""
|
||||
|
||||
filter_properties = kwargs.get('filter_properties', {})
|
||||
if not filter_properties:
|
||||
filter_properties = {}
|
||||
ignore_hosts = filter_properties.get('ignore_hosts', [])
|
||||
hosts = [host for host in hosts if host not in ignore_hosts]
|
||||
return hosts
|
||||
|
||||
def _get_weighted_candidates(self, context, topic, request_spec, **kwargs):
|
||||
"""Returns a list of the available hosts."""
|
||||
|
||||
elevated = context.elevated()
|
||||
hosts = self.hosts_up(elevated, topic)
|
||||
if not hosts:
|
||||
msg = _("Is the appropriate service running?")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
return self._filter_hosts(request_spec, hosts, **kwargs)
|
||||
|
||||
def _choose_host_from_list(self, hosts):
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
def _schedule(self, context, topic, request_spec, **kwargs):
|
||||
"""Picks a host that is up at random."""
|
||||
hosts = self._get_weighted_candidates(context, topic,
|
||||
request_spec, **kwargs)
|
||||
if not hosts:
|
||||
msg = _("Could not find another host")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
return self._choose_host_from_list(hosts)
|
||||
|
||||
def schedule_create_volume(self, context, request_spec, filter_properties):
|
||||
"""Picks a host that is up at random."""
|
||||
topic = CONF.volume_topic
|
||||
host = self._schedule(context, topic, request_spec,
|
||||
filter_properties=filter_properties)
|
||||
volume_id = request_spec['volume_id']
|
||||
snapshot_id = request_spec['snapshot_id']
|
||||
image_id = request_spec['image_id']
|
||||
|
||||
updated_volume = driver.volume_update_db(context, volume_id, host)
|
||||
self.volume_rpcapi.create_volume(context, updated_volume, host,
|
||||
request_spec, filter_properties,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id)
|
||||
|
||||
def host_passes_filters(self, context, host, request_spec,
|
||||
filter_properties):
|
||||
"""Check if the specified host passes the filters."""
|
||||
weighed_hosts = self._get_weighted_candidates(
|
||||
context,
|
||||
CONF.volume_topic,
|
||||
request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
for weighed_host in weighed_hosts:
|
||||
if weighed_host == host:
|
||||
elevated = context.elevated()
|
||||
host_states = self.host_manager.get_all_host_states(elevated)
|
||||
for host_state in host_states:
|
||||
if host_state.host == host:
|
||||
return host_state
|
||||
|
||||
msg = (_('cannot place volume %(id)s on %(host)s')
|
||||
% {'id': request_spec['volume_id'], 'host': host})
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
def find_retype_host(self, context, request_spec, filter_properties,
|
||||
migration_policy='never'):
|
||||
"""Find a host that can accept the volume with its new type."""
|
||||
current_host = request_spec['volume_properties']['host']
|
||||
|
||||
# The volume already exists on this host, and so we shouldn't check if
|
||||
# it can accept the volume again.
|
||||
filter_properties['vol_exists_on'] = current_host
|
||||
|
||||
weighed_hosts = self._get_weighted_candidates(
|
||||
context,
|
||||
CONF.volume_topic,
|
||||
request_spec,
|
||||
filter_properties=filter_properties)
|
||||
if not weighed_hosts:
|
||||
msg = (_('No valid hosts for volume %(id)s with type %(type)s')
|
||||
% {'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
target_host = None
|
||||
for weighed_host in weighed_hosts:
|
||||
if weighed_host == current_host:
|
||||
target_host = current_host
|
||||
|
||||
if migration_policy == 'never' and target_host is None:
|
||||
msg = (_('Current host not valid for volume %(id)s with type '
|
||||
'%(type)s, migration not allowed')
|
||||
% {'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
if not target_host:
|
||||
target_host = self._choose_host_from_list(weighed_hosts)
|
||||
|
||||
elevated = context.elevated()
|
||||
host_states = self.host_manager.get_all_host_states(elevated)
|
||||
for host_state in host_states:
|
||||
if host_state.host == target_host:
|
||||
return (host_state, migration_policy)
|
||||
|
||||
# NOTE(avishay):We should never get here, but raise just in case
|
||||
msg = (_('No host_state for selected host %s') % target_host)
|
||||
raise exception.NoValidHost(reason=msg)
|
@ -24,7 +24,6 @@ from oslo.config import cfg
|
||||
from cinder import db
|
||||
from cinder.openstack.common import importutils
|
||||
from cinder.openstack.common import timeutils
|
||||
from cinder import utils
|
||||
from cinder.volume import rpcapi as volume_rpcapi
|
||||
|
||||
|
||||
@ -65,14 +64,6 @@ class Scheduler(object):
|
||||
host,
|
||||
capabilities)
|
||||
|
||||
def hosts_up(self, context, topic):
|
||||
"""Return the list of hosts that have a running service for topic."""
|
||||
|
||||
services = db.service_get_all_by_topic(context, topic)
|
||||
return [service['host']
|
||||
for service in services
|
||||
if utils.service_is_up(service)]
|
||||
|
||||
def host_passes_filters(self, context, volume_id, host, filter_properties):
|
||||
"""Check if the specified host passes the filters."""
|
||||
raise NotImplementedError(_("Must implement host_passes_filters"))
|
||||
|
@ -16,68 +16,51 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple Scheduler
|
||||
Chance and Simple Scheduler are DEPRECATED.
|
||||
|
||||
Chance and Simple scheduler implementation have been deprecated, as their
|
||||
functionality can be implemented using the FilterScheduler, here's how:
|
||||
|
||||
If one would like to have scheduler randomly picks available back-end
|
||||
(like ChanceScheduler did), use FilterScheduler with following combination
|
||||
of filters and weighers.
|
||||
|
||||
scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler
|
||||
scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter',
|
||||
'CapabilitiesFilter']
|
||||
scheduler_default_weighers = 'ChanceWeigher'
|
||||
|
||||
If one prefers the scheduler to pick up the back-end has most available
|
||||
space that scheudler can see (like SimpleScheduler did), use following
|
||||
combination of filters and weighers with FilterScheduler.
|
||||
|
||||
scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler
|
||||
scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter',
|
||||
'CapabilitiesFilter']
|
||||
scheduler_default_weighers = 'AllocatedCapacityWeigher'
|
||||
allocated_capacity_weight_multiplier = -1.0
|
||||
|
||||
Setting/leaving configure option
|
||||
'scheduler_driver=cinder.scheduler.chance.ChanceScheduler' or
|
||||
'scheduler_driver=cinder.scheduler.simple.SimpleScheduler' in cinder.conf
|
||||
works exactly the same as described above since scheduler manager has been
|
||||
updated to do the trick internally/transparently for users.
|
||||
|
||||
With that, FilterScheduler behaves mostly the same as Chance/SimpleScheduler,
|
||||
with additional benefits of supporting volume types, volume encryption, QoS.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.scheduler import chance
|
||||
from cinder import utils
|
||||
|
||||
|
||||
simple_scheduler_opts = [
|
||||
cfg.IntOpt("max_gigabytes",
|
||||
default=10000,
|
||||
help="maximum number of volume gigabytes to allow per host"), ]
|
||||
help="This configure option has been deprecated along with "
|
||||
"the SimpleScheduler. New scheduler is able to gather "
|
||||
"capacity information for each host, thus setting the "
|
||||
"maximum number of volume gigabytes for host is no "
|
||||
"longer needed. It's safe to remove this configure "
|
||||
"from cinder.conf."), ]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(simple_scheduler_opts)
|
||||
|
||||
|
||||
class SimpleScheduler(chance.ChanceScheduler):
|
||||
"""Implements Naive Scheduler that tries to find least loaded host."""
|
||||
|
||||
def _get_weighted_candidates(self, context, topic, request_spec, **kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
elevated = context.elevated()
|
||||
|
||||
volume_id = request_spec.get('volume_id')
|
||||
snapshot_id = request_spec.get('snapshot_id')
|
||||
image_id = request_spec.get('image_id')
|
||||
volume_properties = request_spec.get('volume_properties')
|
||||
volume_size = volume_properties.get('size')
|
||||
availability_zone = volume_properties.get('availability_zone')
|
||||
filter_properties = kwargs.get('filter_properties', {})
|
||||
|
||||
zone, host = None, None
|
||||
if availability_zone:
|
||||
zone, _x, host = availability_zone.partition(':')
|
||||
if host and context.is_admin:
|
||||
service = db.service_get_by_args(elevated, host, topic)
|
||||
if not utils.service_is_up(service):
|
||||
raise exception.WillNotSchedule(host=host)
|
||||
return [host]
|
||||
|
||||
candidates = []
|
||||
results = db.service_get_all_volume_sorted(elevated)
|
||||
if zone:
|
||||
results = [(s, gigs) for (s, gigs) in results
|
||||
if s['availability_zone'] == zone]
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
no_skip = service['host'] != filter_properties.get('vol_exists_on')
|
||||
if no_skip and volume_gigabytes + volume_size > CONF.max_gigabytes:
|
||||
continue
|
||||
if utils.service_is_up(service) and not service['disabled']:
|
||||
candidates.append(service['host'])
|
||||
|
||||
if candidates:
|
||||
return candidates
|
||||
else:
|
||||
msg = _("No service with adequate space or no service running")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
def _choose_host_from_list(self, hosts):
|
||||
return hosts[0]
|
||||
|
@ -212,23 +212,6 @@ class SchedulerTestCase(test.TestCase):
|
||||
_mock_update_cap.assert_called_once_with(service_name, host,
|
||||
capabilities)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
@mock.patch('cinder.utils.service_is_up')
|
||||
def test_hosts_up(self, _mock_serv_is_up, _mock_serv_get_all_by_topic):
|
||||
service1 = {'host': 'host1', 'disabled': False}
|
||||
service2 = {'host': 'host2', 'disabled': False}
|
||||
services = [service1, service2]
|
||||
|
||||
def fake_serv_is_up(service):
|
||||
return service['host'] is 'host2'
|
||||
|
||||
_mock_serv_get_all_by_topic.return_value = services
|
||||
_mock_serv_is_up.side_effect = fake_serv_is_up
|
||||
result = self.driver.hosts_up(self.context, self.topic)
|
||||
self.assertEqual(result, ['host2'])
|
||||
_mock_serv_get_all_by_topic.assert_called_once_with(self.context,
|
||||
self.topic)
|
||||
|
||||
|
||||
class SchedulerDriverBaseTestCase(SchedulerTestCase):
|
||||
"""Test cases for base scheduler driver class methods
|
||||
|
@ -38,10 +38,10 @@ The :mod:`cinder.scheduler.driver` Module
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
The :mod:`cinder.scheduler.simple` Driver
|
||||
The :mod:`cinder.scheduler.filter_scheduler` Driver
|
||||
-----------------------------------------
|
||||
|
||||
.. automodule:: cinder.scheduler.simple
|
||||
.. automodule:: cinder.scheduler.filter_scheduler
|
||||
:noindex:
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
@ -983,8 +983,11 @@
|
||||
# Options defined in cinder.scheduler.simple
|
||||
#
|
||||
|
||||
# maximum number of volume gigabytes to allow per host
|
||||
# (integer value)
|
||||
# This configure option has been deprecated along with the
|
||||
# SimpleScheduler. New scheduler is able to gather capacity
|
||||
# information for each host, thus setting the maximum number
|
||||
# of volume gigabytes for host is no longer needed. It's safe
|
||||
# to remove this configure from cinder.conf. (integer value)
|
||||
#max_gigabytes=10000
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user