nova/nova/scheduler/filters/disk_filter.py
Matt Riedemann 25dadb94db Remove the CachingScheduler
The CachingScheduler has been deprecated since Pike [1].
It does not use the placement service and as more of nova
relies on placement for managing resource allocations,
maintaining compabitility for the CachingScheduler is
exorbitant.

The release note in this change goes into much more detail
about why the FilterScheduler + Placement should be a
sufficient replacement for the original justification
for the CachingScheduler along with details on how to migrate
from the CachingScheduler to the FilterScheduler.

Since the [scheduler]/driver configuration option does allow
loading out-of-tree drivers and the scheduler driver interface
does have the USES_ALLOCATION_CANDIDATES variable, it is
possible that there are drivers being used which are also not
using the placement service. The release note also explains this
but warns against it. However, as a result some existing
functional tests, which were using the CachingScheduler, are
updated to still test scheduling without allocations being
created in the placement service.

Over time we will likely remove the USES_ALLOCATION_CANDIDATES
variable in the scheduler driver interface along with the
compatibility code associated with it, but that is left for
a later change.

[1] Ia7ff98ff28b7265058845e46b277317a2bfc96d2

Change-Id: I1832da2190be5ef2b04953938860a56a43e8cddf
2018-10-18 17:55:36 -04:00

109 lines
4.2 KiB
Python

# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
class DiskFilter(filters.BaseHostFilter):
"""DEPRECATED: Disk Filter with over subscription flag."""
RUN_ON_REBUILD = False
DEPRECATED = True
def __init__(self):
super(DiskFilter, self).__init__()
if self.DEPRECATED:
LOG.warning('The DiskFilter is deprecated since the 19.0.0 Stein '
'release. DISK_GB filtering is performed natively '
'using the Placement service when using the '
'filter_scheduler driver. Furthermore, enabling '
'DiskFilter may incorrectly filter out baremetal '
'nodes which must be scheduled using custom resource '
'classes.')
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
"""Filter based on disk usage."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
# Do not allow an instance to overcommit against itself, only against
# other instances. In other words, if there isn't room for even just
# this one instance in total_usable_disk space, consider the host full.
if total_usable_disk_mb < requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s "
"MB usable disk space before overcommit, it only "
"has %(physical_disk_size)s MB.",
{'host_state': host_state,
'requested_disk': requested_disk,
'physical_disk_size':
total_usable_disk_mb})
return False
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, spec_obj)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
"""AggregateDiskFilter with per-aggregate disk allocation ratio flag.
Fall back to global disk_allocation_ratio if no per-aggregate setting
found.
"""
RUN_ON_REBUILD = False
DEPRECATED = False
def _get_disk_allocation_ratio(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning("Could not decode disk_allocation_ratio: '%s'", e)
ratio = host_state.disk_allocation_ratio
return ratio