25dadb94db
The CachingScheduler has been deprecated since Pike [1]. It does not use the placement service and as more of nova relies on placement for managing resource allocations, maintaining compabitility for the CachingScheduler is exorbitant. The release note in this change goes into much more detail about why the FilterScheduler + Placement should be a sufficient replacement for the original justification for the CachingScheduler along with details on how to migrate from the CachingScheduler to the FilterScheduler. Since the [scheduler]/driver configuration option does allow loading out-of-tree drivers and the scheduler driver interface does have the USES_ALLOCATION_CANDIDATES variable, it is possible that there are drivers being used which are also not using the placement service. The release note also explains this but warns against it. However, as a result some existing functional tests, which were using the CachingScheduler, are updated to still test scheduling without allocations being created in the placement service. Over time we will likely remove the USES_ALLOCATION_CANDIDATES variable in the scheduler driver interface along with the compatibility code associated with it, but that is left for a later change. [1] Ia7ff98ff28b7265058845e46b277317a2bfc96d2 Change-Id: I1832da2190be5ef2b04953938860a56a43e8cddf
103 lines
3.9 KiB
Python
103 lines
3.9 KiB
Python
# Copyright (c) 2011 OpenStack Foundation
|
|
# Copyright (c) 2012 Cloudscaling
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
from oslo_log import log as logging
|
|
|
|
from nova.scheduler import filters
|
|
from nova.scheduler.filters import utils
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
class BaseRamFilter(filters.BaseHostFilter):
|
|
|
|
RUN_ON_REBUILD = False
|
|
|
|
def _get_ram_allocation_ratio(self, host_state, spec_obj):
|
|
raise NotImplementedError
|
|
|
|
def host_passes(self, host_state, spec_obj):
|
|
"""Only return hosts with sufficient available RAM."""
|
|
requested_ram = spec_obj.memory_mb
|
|
free_ram_mb = host_state.free_ram_mb
|
|
total_usable_ram_mb = host_state.total_usable_ram_mb
|
|
|
|
# Do not allow an instance to overcommit against itself, only against
|
|
# other instances.
|
|
if not total_usable_ram_mb >= requested_ram:
|
|
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
|
|
"usable ram before overcommit, it only has "
|
|
"%(usable_ram)s MB.",
|
|
{'host_state': host_state,
|
|
'requested_ram': requested_ram,
|
|
'usable_ram': total_usable_ram_mb})
|
|
return False
|
|
|
|
ram_allocation_ratio = self._get_ram_allocation_ratio(host_state,
|
|
spec_obj)
|
|
|
|
memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio
|
|
used_ram_mb = total_usable_ram_mb - free_ram_mb
|
|
usable_ram = memory_mb_limit - used_ram_mb
|
|
if not usable_ram >= requested_ram:
|
|
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
|
|
"usable ram, it only has %(usable_ram)s MB usable ram.",
|
|
{'host_state': host_state,
|
|
'requested_ram': requested_ram,
|
|
'usable_ram': usable_ram})
|
|
return False
|
|
|
|
# save oversubscription limit for compute node to test against:
|
|
host_state.limits['memory_mb'] = memory_mb_limit
|
|
return True
|
|
|
|
|
|
class RamFilter(BaseRamFilter):
|
|
"""Ram Filter with over subscription flag."""
|
|
|
|
def __init__(self):
|
|
super(RamFilter, self).__init__()
|
|
LOG.warning('The RamFilter is deprecated since the 19.0.0 Stein '
|
|
'release. MEMORY_MB filtering is performed natively '
|
|
'using the Placement service when using the '
|
|
'filter_scheduler driver. Furthermore, enabling RamFilter '
|
|
'may incorrectly filter out baremetal nodes which must be '
|
|
'scheduled using custom resource classes.')
|
|
|
|
def _get_ram_allocation_ratio(self, host_state, spec_obj):
|
|
return host_state.ram_allocation_ratio
|
|
|
|
|
|
class AggregateRamFilter(BaseRamFilter):
|
|
"""AggregateRamFilter with per-aggregate ram subscription flag.
|
|
|
|
Fall back to global ram_allocation_ratio if no per-aggregate setting found.
|
|
"""
|
|
|
|
def _get_ram_allocation_ratio(self, host_state, spec_obj):
|
|
aggregate_vals = utils.aggregate_values_from_key(
|
|
host_state,
|
|
'ram_allocation_ratio')
|
|
|
|
try:
|
|
ratio = utils.validate_num_values(
|
|
aggregate_vals, host_state.ram_allocation_ratio, cast_to=float)
|
|
except ValueError as e:
|
|
LOG.warning("Could not decode ram_allocation_ratio: '%s'", e)
|
|
ratio = host_state.ram_allocation_ratio
|
|
|
|
return ratio
|