filter: add per-aggregate filter to configure max_io_ops_per_host
Adds a filter AggregateIoOpsFilter whichprovides the ability to read from aggregates metadata "max_io_ops_per_host". DocImpact Implements: blueprint per-aggregate-max-io-ops-per-host Change-Id: I086033e7904c16995298bee7196ad3e7b5dc7aaf
This commit is contained in:
parent
d479a159d2
commit
65b3a4f607
|
@ -104,6 +104,12 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
|
|||
``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to
|
||||
run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host``
|
||||
instances such as build/resize/snapshot etc are running on it.
|
||||
* |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate
|
||||
``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will
|
||||
fall back to the global default ``max_io_ops_per_host``. If more than
|
||||
one value is found for a host (meaning the host is in two or more different
|
||||
aggregates with different max io operations settings), the minimum value
|
||||
will be used.
|
||||
* |PciPassthroughFilter| - Filter that schedules instances on a host if the host
|
||||
has devices to meet the device requests in the 'extra_specs' for the flavor.
|
||||
* |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within
|
||||
|
@ -356,6 +362,7 @@ in :mod:``nova.tests.scheduler``.
|
|||
.. |DiskFilter| replace:: :class:`DiskFilter <nova.scheduler.filters.disk_filter.DiskFilter>`
|
||||
.. |NumInstancesFilter| replace:: :class:`NumInstancesFilter <nova.scheduler.filters.num_instances_filter.NumInstancesFilter>`
|
||||
.. |IoOpsFilter| replace:: :class:`IoOpsFilter <nova.scheduler.filters.io_ops_filter.IoOpsFilter>`
|
||||
.. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter <nova.scheduler.filters.io_ops_filter.AggregateIoOpsFilter>`
|
||||
.. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter <nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter>`
|
||||
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
|
||||
.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter <nova.scheduler.filters.affinity_filter.GroupAntiAffinityFilter>`
|
||||
|
|
|
@ -15,8 +15,10 @@
|
|||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.i18n import _LW
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -34,12 +36,16 @@ CONF.register_opt(max_io_ops_per_host_opt)
|
|||
class IoOpsFilter(filters.BaseHostFilter):
|
||||
"""Filter out hosts with too many concurrent I/O operations."""
|
||||
|
||||
def _get_max_io_ops_per_host(self, host_state, filter_properties):
|
||||
return CONF.max_io_ops_per_host
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Use information about current vm and task states collected from
|
||||
compute node statistics to decide whether to filter.
|
||||
"""
|
||||
num_io_ops = host_state.num_io_ops
|
||||
max_io_ops = CONF.max_io_ops_per_host
|
||||
max_io_ops = self._get_max_io_ops_per_host(
|
||||
host_state, filter_properties)
|
||||
passes = num_io_ops < max_io_ops
|
||||
if not passes:
|
||||
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
|
||||
|
@ -47,3 +53,27 @@ class IoOpsFilter(filters.BaseHostFilter):
|
|||
{'host_state': host_state,
|
||||
'max_io_ops': max_io_ops})
|
||||
return passes
|
||||
|
||||
|
||||
class AggregateIoOpsFilter(IoOpsFilter):
|
||||
"""AggregateIoOpsFilter with per-aggregate the max io operations.
|
||||
|
||||
Fall back to global max_io_ops_per_host if no per-aggregate setting found.
|
||||
"""
|
||||
|
||||
def _get_max_io_ops_per_host(self, host_state, filter_properties):
|
||||
# TODO(uni): DB query in filter is a performance hit, especially for
|
||||
# system with lots of hosts. Will need a general solution here to fix
|
||||
# all filters with aggregate DB call things.
|
||||
aggregate_vals = utils.aggregate_values_from_db(
|
||||
filter_properties['context'],
|
||||
host_state.host,
|
||||
'max_io_ops_per_host')
|
||||
try:
|
||||
value = utils.validate_num_values(
|
||||
aggregate_vals, CONF.max_io_ops_per_host, cast_to=int)
|
||||
except ValueError as e:
|
||||
LOG.warn(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
|
||||
value = CONF.max_io_ops_per_host
|
||||
|
||||
return value
|
||||
|
|
|
@ -1854,3 +1854,28 @@ class HostFiltersTestCase(test.NoDBTestCase):
|
|||
attribute_dict={'metrics': metrics})
|
||||
filt_cls = self.class_map['MetricsFilter']()
|
||||
self.assertFalse(filt_cls.host_passes(host, None))
|
||||
|
||||
def test_aggregate_filter_num_iops_value(self):
|
||||
self.flags(max_io_ops_per_host=7)
|
||||
filt_cls = self.class_map['AggregateIoOpsFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_io_ops': 7})
|
||||
filter_properties = {'context': self.context}
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
self._create_aggregate_with_host(
|
||||
name='fake_aggregate',
|
||||
hosts=['host1'],
|
||||
metadata={'max_io_ops_per_host': 8})
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_aggregate_filter_num_iops_value_error(self):
|
||||
self.flags(max_io_ops_per_host=8)
|
||||
filt_cls = self.class_map['AggregateIoOpsFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'num_io_ops': 7})
|
||||
self._create_aggregate_with_host(
|
||||
name='fake_aggregate',
|
||||
hosts=['host1'],
|
||||
metadata={'max_io_ops_per_host': 'XXX'})
|
||||
filter_properties = {'context': self.context}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
|
Loading…
Reference in New Issue