Prepare filters for using RequestSpec object

Now that the FilterScheduler is using a RequestSpec object,
we need to change the filters for backporting the object
into the legacy dicts before changing each of them.

Release Notes will be updated to mention that custom filters
need to be modified to either use a RequestSpec object or
primitive them into a dict.

Partially-Implements: blueprint request-spec-object-mitaka

UpgradeImpact - see the reno file attached.

Change-Id: I14f18c4507498b1d24a9318fafc97193861ca0b6
This commit is contained in:
Sylvain Bauza 2015-07-16 13:19:19 +02:00
parent 4c06d9073c
commit aeae7040c7
35 changed files with 341 additions and 131 deletions

View File

@ -27,13 +27,13 @@ LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
def _filter_one(self, obj, spec_obj):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
def filter_all(self, filter_obj_list, spec_obj):
"""Yield objects that pass the filter.
Can be overridden in a subclass, if you need to base filtering
@ -41,7 +41,7 @@ class BaseFilter(object):
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
if self._filter_one(obj, spec_obj):
yield obj
# Set to true in a subclass if a filter only needs to be run once
@ -65,7 +65,7 @@ class BaseFilterHandler(loadables.BaseLoader):
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filters, objs, filter_properties, index=0):
def get_filtered_objects(self, filters, objs, spec_obj, index=0):
list_objs = list(objs)
LOG.debug("Starting with %d host(s)", len(list_objs))
# Track the hosts as they are removed. The 'full_filter_results' list
@ -82,7 +82,7 @@ class BaseFilterHandler(loadables.BaseLoader):
if filter_.run_filter_for_index(index):
cls_name = filter_.__class__.__name__
start_count = len(list_objs)
objs = filter_.filter_all(list_objs, filter_properties)
objs = filter_.filter_all(list_objs, spec_obj)
if objs is None:
LOG.debug("Filter %s says to stop filtering", cls_name)
return
@ -104,9 +104,17 @@ class BaseFilterHandler(loadables.BaseLoader):
{'cls_name': cls_name, 'obj_len': len(list_objs)})
if not list_objs:
# Log the filtration history
rspec = filter_properties.get("request_spec", {})
inst_props = rspec.get("instance_properties", {})
msg_dict = {"inst_uuid": inst_props.get("uuid", ""),
# NOTE(sbauza): Since the Cells scheduler still provides a legacy
# dictionary for filter_props, and since we agreed on not modifying
# the Cells scheduler to support that because of Cells v2, we
# prefer to define a compatible way to address both types
if isinstance(spec_obj, dict):
rspec = spec_obj.get("request_spec", {})
inst_props = rspec.get("instance_properties", {})
inst_uuid = inst_props.get("uuid", "")
else:
inst_uuid = spec_obj.instance_uuid
msg_dict = {"inst_uuid": inst_uuid,
"str_results": str(full_filter_results),
}
full_msg = ("Filtering removed all hosts for the request with "

View File

@ -127,23 +127,12 @@ class FilterScheduler(driver.Scheduler):
selected_hosts = []
num_instances = spec_obj.num_instances
# TODO(sbauza): Modify the interfaces for HostManager and filters to
# accept the RequestSpec object directly (in a later patch hopefully)
filter_properties = spec_obj.to_legacy_filter_properties_dict()
# NOTE(sbauza): Adding temporarly some keys since filters are
# directly using it - until we provide directly RequestSpec
filter_properties.update(
{'request_spec': spec_obj.to_legacy_request_spec_dict(),
'instance_type': spec_obj.flavor})
# TODO(sbauza): Adding two keys not used in-tree but which will be
# provided as non-fields for the RequestSpec once we provide it to the
# filters
filter_properties.update({'context': context,
'config_options': config_options})
# NOTE(sbauza): Adding one field for any out-of-tree need
spec_obj.config_options = config_options
for num in range(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties, index=num)
spec_obj, index=num)
if not hosts:
# Can't get any more locally.
break
@ -151,7 +140,7 @@ class FilterScheduler(driver.Scheduler):
LOG.debug("Filtered %(hosts)s", {'hosts': hosts})
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
spec_obj)
LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts})
@ -169,8 +158,10 @@ class FilterScheduler(driver.Scheduler):
# Now consume the resources so the filter/weights
# will change for the next instance.
chosen_host.obj.consume_from_request(spec_obj)
if filter_properties.get('group_updated') is True:
filter_properties['group_hosts'].add(chosen_host.obj.host)
if spec_obj.instance_group is not None:
spec_obj.instance_group.hosts.append(chosen_host.obj.host)
# hosts has to be not part of the updates when saving
spec_obj.instance_group.obj_reset_changes(['hosts'])
return selected_hosts
def _get_all_host_states(self, context):

View File

@ -16,8 +16,10 @@
"""
Scheduler host filters
"""
import functools
from nova import filters
from nova import objects
class BaseHostFilter(filters.BaseFilter):
@ -45,3 +47,31 @@ def all_filters():
and should return a list of all filter classes available.
"""
return HostFilterHandler().get_all_classes()
# TODO(sbauza): Remove that decorator once all filters are using RequestSpec
# object directly.
def compat_legacy_props(function):
"""Decorator for returning a legacy filter_properties dictionary.
This is used for keeping unchanged the existing filters without yet using
the RequestSpec fields by returning a legacy dictionary.
"""
@functools.wraps(function)
def decorated_host_passes(self, host_state, filter_properties):
if isinstance(filter_properties, objects.RequestSpec):
legacy_props = filter_properties.to_legacy_filter_properties_dict()
legacy_props.update({'request_spec': (
filter_properties.to_legacy_request_spec_dict()),
'instance_type': filter_properties.flavor})
# TODO(sbauza): Adding two keys not used in-tree but which will be
# provided as non-fields for the RequestSpec once we provide it to
# the filters
legacy_props.update(
{'context': filter_properties._context,
'config_options': filter_properties.config_options})
filter_properties = legacy_props
return function(self, host_state, filter_properties)
return decorated_host_passes

View File

@ -30,6 +30,7 @@ class DifferentHostFilter(filters.BaseHostFilter):
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
@ -49,6 +50,7 @@ class SameHostFilter(filters.BaseHostFilter):
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
@ -67,6 +69,7 @@ class SimpleCIDRAffinityFilter(filters.BaseHostFilter):
# The address of a host doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
@ -87,6 +90,7 @@ class _GroupAntiAffinityFilter(filters.BaseHostFilter):
"""Schedule the instance on a different host from a set of group
hosts.
"""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
# Only invoke the filter is 'anti-affinity' is configured
policies = filter_properties.get('group_policies', [])
@ -113,6 +117,7 @@ class ServerGroupAntiAffinityFilter(_GroupAntiAffinityFilter):
class _GroupAffinityFilter(filters.BaseHostFilter):
"""Schedule the instance on to host from a set of group hosts.
"""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
# Only invoke the filter is 'affinity' is configured
policies = filter_properties.get('group_policies', [])

View File

@ -40,6 +40,7 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Checks a host in an aggregate that metadata key/value match
with image properties.

View File

@ -33,6 +33,7 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create instance_type

View File

@ -28,6 +28,7 @@ class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
# Aggregate data and tenant do not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""If a host is in an aggregate that has the metadata key
"filter_tenant_id" it can only create instances from that tenant(s).

View File

@ -23,5 +23,6 @@ class AllHostsFilter(filters.BaseHostFilter):
# list of hosts doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
return True

View File

@ -36,6 +36,7 @@ class AvailabilityZoneFilter(filters.BaseHostFilter):
# Availability zones do not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})

View File

@ -92,6 +92,7 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return False
return True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create instance_type."""
instance_type = filter_properties.get('instance_type')

View File

@ -34,6 +34,7 @@ class ComputeFilter(filters.BaseHostFilter):
# Host state does not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes."""
service = host_state.service

View File

@ -29,6 +29,7 @@ class BaseCoreFilter(filters.BaseHostFilter):
def _get_cpu_allocation_ratio(self, host_state, filter_properties):
raise NotImplementedError
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')

View File

@ -35,6 +35,7 @@ class DiskFilter(filters.BaseHostFilter):
def _get_disk_allocation_ratio(self, host_state, filter_properties):
return CONF.disk_allocation_ratio
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')

View File

@ -25,6 +25,7 @@ LOG = logging.getLogger(__name__)
class ExactCoreFilter(filters.BaseHostFilter):
"""Exact Core Filter."""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact number of CPU cores."""
instance_type = filter_properties.get('instance_type')

View File

@ -23,6 +23,7 @@ LOG = logging.getLogger(__name__)
class ExactDiskFilter(filters.BaseHostFilter):
"""Exact Disk Filter."""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact amount of disk available."""
instance_type = filter_properties.get('instance_type')

View File

@ -23,6 +23,7 @@ LOG = logging.getLogger(__name__)
class ExactRamFilter(filters.BaseHostFilter):
"""Exact RAM Filter."""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact amount of RAM available."""
instance_type = filter_properties.get('instance_type')

View File

@ -95,6 +95,7 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
'hypervisor_version': hypervisor_version})
return False
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Check if host passes specified image properties.

View File

@ -39,6 +39,7 @@ class IoOpsFilter(filters.BaseHostFilter):
def _get_max_io_ops_per_host(self, host_state, filter_properties):
return CONF.max_io_ops_per_host
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.

View File

@ -39,6 +39,7 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
# The configuration values do not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
to True::

View File

@ -126,6 +126,7 @@ class JsonFilter(filters.BaseHostFilter):
result = method(self, cooked_args)
return result
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.

View File

@ -43,6 +43,7 @@ class MetricsFilter(filters.BaseHostFilter):
name="metrics.weight_setting")
self.keys = set([x[0] for x in opts])
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
metrics_on_host = set(m.name for m in host_state.metrics)
if not self.keys.issubset(metrics_on_host):

View File

@ -36,6 +36,7 @@ class NumInstancesFilter(filters.BaseHostFilter):
def _get_max_instances_per_host(self, host_state, filter_properties):
return CONF.max_instances_per_host
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
max_instances = self._get_max_instances_per_host(

View File

@ -18,6 +18,7 @@ from nova.virt import hardware
class NUMATopologyFilter(filters.BaseHostFilter):
"""Filter on requested NUMA topology."""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
ram_ratio = host_state.ram_allocation_ratio
cpu_ratio = host_state.cpu_allocation_ratio

View File

@ -40,6 +40,7 @@ class PciPassthroughFilter(filters.BaseHostFilter):
"""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Return true if the host has the required PCI devices."""
request_spec = filter_properties.get('request_spec', {})

View File

@ -28,6 +28,7 @@ class BaseRamFilter(filters.BaseHostFilter):
def _get_ram_allocation_ratio(self, host_state, filter_properties):
raise NotImplementedError
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
instance_type = filter_properties.get('instance_type')

View File

@ -25,6 +25,7 @@ class RetryFilter(filters.BaseHostFilter):
purposes
"""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)

View File

@ -265,6 +265,7 @@ class TrustedFilter(filters.BaseHostFilter):
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type', {})
extra = instance_type.get('extra_specs', {})

View File

@ -25,6 +25,7 @@ class TypeAffinityFilter(filters.BaseHostFilter):
(spread) set to 1 (default).
"""
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
"""Dynamically limits hosts to one instance type
@ -48,6 +49,7 @@ class AggregateTypeAffinityFilter(filters.BaseHostFilter):
# Aggregate data does not change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type')

View File

@ -449,7 +449,7 @@ class HostManager(object):
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, filter_properties,
def get_filtered_hosts(self, hosts, spec_obj,
filter_class_names=None, index=0):
"""Filter hosts and return only ones passing all filters."""
@ -499,9 +499,9 @@ class HostManager(object):
filters = self.default_filters
else:
filters = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
ignore_hosts = spec_obj.ignore_hosts or []
force_hosts = spec_obj.force_hosts or []
force_nodes = spec_obj.force_nodes or []
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
@ -523,12 +523,12 @@ class HostManager(object):
hosts = six.itervalues(name_to_cls_map)
return self.filter_handler.get_filtered_objects(filters,
hosts, filter_properties, index)
hosts, spec_obj, index)
def get_weighed_hosts(self, hosts, weight_properties):
def get_weighed_hosts(self, hosts, spec_obj):
"""Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, weight_properties)
hosts, spec_obj)
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts

View File

@ -74,7 +74,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
os_type='Linux',
uuid='fake-uuid',
pci_requests=None,
numa_topology=None)
numa_topology=None,
instance_group=None)
self.mox.ReplayAll()
weighed_hosts = self.driver._schedule(self.context, spec_obj)
self.assertEqual(len(weighed_hosts), 10)
@ -143,7 +144,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
ephemeral_gb=0,
vcpus=1),
pci_requests=None,
numa_topology=None)
numa_topology=None,
instance_group=None)
self.mox.ReplayAll()
hosts = self.driver._schedule(self.context, spec_obj)
@ -178,7 +180,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
ephemeral_gb=0,
vcpus=1),
pci_requests=None,
numa_topology=None)
numa_topology=None,
instance_group=None)
self.mox.ReplayAll()
hosts = self.driver._schedule(self.context, spec_obj)
@ -222,7 +225,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
ephemeral_gb=0,
vcpus=1),
pci_requests=None,
numa_topology=None)
numa_topology=None,
instance_group=None)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)

View File

@ -23,6 +23,7 @@ from six.moves import range
from nova import filters
from nova import loadables
from nova import objects
from nova import test
@ -46,18 +47,18 @@ class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
spec_obj = objects.RequestSpec()
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
base_filter._filter_one('obj1', spec_obj).AndReturn(True)
base_filter._filter_one('obj2', spec_obj).AndReturn(False)
base_filter._filter_one('obj3', spec_obj).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
result = base_filter.filter_all(filter_obj_list, spec_obj)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
@ -67,7 +68,7 @@ class FiltersTestCase(test.NoDBTestCase):
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
spec_obj = objects.RequestSpec()
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
@ -83,16 +84,16 @@ class FiltersTestCase(test.NoDBTestCase):
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in range(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj1', spec_obj).AndReturn(True)
base_filter._filter_one('obj2', spec_obj).AndReturn(False)
for x in range(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
base_filter._filter_one('obj3', spec_obj).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in range(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
objs = base_filter.filter_all(objs, spec_obj)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
@ -100,7 +101,7 @@ class FiltersTestCase(test.NoDBTestCase):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
spec_obj = objects.RequestSpec()
def _fake_base_loader_init(*args, **kwargs):
pass
@ -122,10 +123,10 @@ class FiltersTestCase(test.NoDBTestCase):
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
spec_obj).AndReturn(filter_objs_second)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
spec_obj).AndReturn(filter_objs_last)
self.mox.ReplayAll()
@ -133,7 +134,7 @@ class FiltersTestCase(test.NoDBTestCase):
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
spec_obj)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
@ -142,7 +143,7 @@ class FiltersTestCase(test.NoDBTestCase):
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
spec_obj = objects.RequestSpec()
def _fake_base_loader_init(*args, **kwargs):
pass
@ -164,7 +165,7 @@ class FiltersTestCase(test.NoDBTestCase):
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
spec_obj).AndReturn(filter_objs_second)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
@ -174,11 +175,11 @@ class FiltersTestCase(test.NoDBTestCase):
filter_mocks = [filt1_mock, filt2_mock]
filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
spec_obj)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
spec_obj = objects.RequestSpec()
def _fake_base_loader_init(*args, **kwargs):
pass
@ -200,26 +201,86 @@ class FiltersTestCase(test.NoDBTestCase):
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
spec_obj).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
spec_obj)
self.assertIsNone(result)
def test_get_filtered_objects_info_log_none_returned(self):
LOG = filters.LOG
class FilterA(filters.BaseFilter):
def filter_all(self, list_objs, filter_properties):
def filter_all(self, list_objs, spec_obj):
# return all but the first object
return list_objs[1:]
class FilterB(filters.BaseFilter):
def filter_all(self, list_objs, filter_properties):
def filter_all(self, list_objs, spec_obj):
# return an empty list
return []
filter_a = FilterA()
filter_b = FilterB()
all_filters = [filter_a, filter_b]
hosts = ["Host0", "Host1", "Host2"]
fake_uuid = "uuid"
spec_obj = objects.RequestSpec(instance_uuid=fake_uuid)
with mock.patch.object(LOG, "info") as mock_log:
result = self.filter_handler.get_filtered_objects(
all_filters, hosts, spec_obj)
self.assertFalse(result)
# FilterA should leave Host1 and Host2; FilterB should leave None.
exp_output = ("['FilterA: (start: 3, end: 2)', "
"'FilterB: (start: 2, end: 0)']")
cargs = mock_log.call_args[0][0]
self.assertIn("with instance ID '%s'" % fake_uuid, cargs)
self.assertIn(exp_output, cargs)
def test_get_filtered_objects_debug_log_none_returned(self):
LOG = filters.LOG
class FilterA(filters.BaseFilter):
def filter_all(self, list_objs, spec_obj):
# return all but the first object
return list_objs[1:]
class FilterB(filters.BaseFilter):
def filter_all(self, list_objs, spec_obj):
# return an empty list
return []
filter_a = FilterA()
filter_b = FilterB()
all_filters = [filter_a, filter_b]
hosts = ["Host0", "Host1", "Host2"]
fake_uuid = "uuid"
spec_obj = objects.RequestSpec(instance_uuid=fake_uuid)
with mock.patch.object(LOG, "debug") as mock_log:
result = self.filter_handler.get_filtered_objects(
all_filters, hosts, spec_obj)
self.assertFalse(result)
# FilterA should leave Host1 and Host2; FilterB should leave None.
exp_output = ("[('FilterA', [('Host1', ''), ('Host2', '')]), " +
"('FilterB', None)]")
cargs = mock_log.call_args[0][0]
self.assertIn("with instance ID '%s'" % fake_uuid, cargs)
self.assertIn(exp_output, cargs)
def test_get_filtered_objects_compatible_with_filt_props_dicts(self):
LOG = filters.LOG
class FilterA(filters.BaseFilter):
def filter_all(self, list_objs, spec_obj):
# return all but the first object
return list_objs[1:]
class FilterB(filters.BaseFilter):
def filter_all(self, list_objs, spec_obj):
# return an empty list
return []
@ -240,34 +301,3 @@ class FiltersTestCase(test.NoDBTestCase):
cargs = mock_log.call_args[0][0]
self.assertIn("with instance ID '%s'" % fake_uuid, cargs)
self.assertIn(exp_output, cargs)
def test_get_filtered_objects_debug_log_none_returned(self):
LOG = filters.LOG
class FilterA(filters.BaseFilter):
def filter_all(self, list_objs, filter_properties):
# return all but the first object
return list_objs[1:]
class FilterB(filters.BaseFilter):
def filter_all(self, list_objs, filter_properties):
# return an empty list
return []
filter_a = FilterA()
filter_b = FilterB()
all_filters = [filter_a, filter_b]
hosts = ["Host0", "Host1", "Host2"]
fake_uuid = "uuid"
filt_props = {"request_spec": {"instance_properties": {
"uuid": fake_uuid}}}
with mock.patch.object(LOG, "debug") as mock_log:
result = self.filter_handler.get_filtered_objects(
all_filters, hosts, filt_props)
self.assertFalse(result)
# FilterA should leave Host1 and Host2; FilterB should leave None.
exp_output = ("[('FilterA', [('Host1', ''), ('Host2', '')]), " +
"('FilterB', None)]")
cargs = mock_log.call_args[0][0]
self.assertIn("with instance ID '%s'" % fake_uuid, cargs)
self.assertIn(exp_output, cargs)

View File

@ -14,7 +14,9 @@
"""
Tests For Scheduler Host Filters.
"""
import mock
from nova import objects
from nova.scheduler import filters
from nova.scheduler.filters import all_hosts_filter
from nova.scheduler.filters import compute_filter
@ -36,3 +38,27 @@ class HostFiltersTestCase(test.NoDBTestCase):
filt_cls = all_hosts_filter.AllHostsFilter()
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
@mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict')
@mock.patch.object(objects.RequestSpec, 'to_legacy_filter_properties_dict')
def test_compat_legacy_props(self, to_props, to_spec):
fake_flavor = objects.Flavor()
fake_context = mock.Mock()
fake_spec = objects.RequestSpec(context=fake_context,
flavor=fake_flavor)
fake_spec.config_options = None
to_props.return_value = {'prop1': 'val1'}
to_spec.return_value = {'spec1': 'val2'}
@filters.compat_legacy_props
def fake_host_passes(self, host_state, filter_properties):
# NOTE(sbauza): Convenient way to verify the passed properties
return filter_properties
expected = {'prop1': 'val1',
'request_spec': {'spec1': 'val2'},
'instance_type': fake_flavor,
'context': fake_context,
'config_options': None}
self.assertEqual(expected,
fake_host_passes('self', 'host_state', fake_spec))

View File

@ -207,7 +207,10 @@ class HostManagerTestCase(test.NoDBTestCase):
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
fake_properties = objects.RequestSpec(ignore_hosts=[],
instance_uuid='fake-uuid1',
force_hosts=[],
force_nodes=[])
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
@ -220,7 +223,10 @@ class HostManagerTestCase(test.NoDBTestCase):
@mock.patch.object(FakeFilterClass2, '_filter_one', return_value=True)
def test_get_filtered_hosts_with_specified_filters(self, mock_filter_one):
fake_properties = {'moo': 1, 'cow': 2}
fake_properties = objects.RequestSpec(ignore_hosts=[],
instance_uuid='fake-uuid1',
force_hosts=[],
force_nodes=[])
specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
info = {'expected_objs': self.fake_hosts,
@ -232,8 +238,12 @@ class HostManagerTestCase(test.NoDBTestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
@ -245,8 +255,11 @@ class HostManagerTestCase(test.NoDBTestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_host3', 'fake_host5'],
force_nodes=[])
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
@ -258,7 +271,11 @@ class HostManagerTestCase(test.NoDBTestCase):
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=['fake_host5', 'fake_host6'],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
@ -270,8 +287,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=['fake_host1'],
force_hosts=['fake_host3', 'fake_host1'],
force_nodes=[])
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
@ -284,7 +304,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
@ -296,8 +320,11 @@ class HostManagerTestCase(test.NoDBTestCase):
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=[],
force_nodes=['fake-node2', 'fake-node4', 'fake-node9'])
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
@ -310,8 +337,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=['fake-host1', 'fake_multihost'],
force_nodes=['fake-node2', 'fake-node9'])
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
@ -324,8 +354,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=['fake-node'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
@ -337,8 +370,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=['fake_host1', 'fake_host2'],
force_hosts=[],
force_nodes=['fake-node4', 'fake-node2'])
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
@ -350,8 +386,11 @@ class HostManagerTestCase(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid1',
ignore_hosts=['fake_multihost'],
force_hosts=[],
force_nodes=['fake_node4', 'fake_node2'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
@ -885,9 +924,9 @@ class HostStateTestCase(test.NoDBTestCase):
numa_fit_mock.return_value = fake_numa_topology
instance_init_mock.return_value = fake_instance
spec_obj = objects.RequestSpec(
instance_uuid='fake-uuid',
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
uuid='fake-uuid',
numa_topology=fake_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
host = host_manager.HostState("fakehost", "fakenode")
@ -905,9 +944,9 @@ class HostStateTestCase(test.NoDBTestCase):
second_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
spec_obj = objects.RequestSpec(
instance_uuid='fake-uuid',
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
uuid='fake-uuid',
numa_topology=second_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
second_host_numa_topology = mock.Mock()
@ -936,6 +975,7 @@ class HostStateTestCase(test.NoDBTestCase):
for r in fake_requests],
instance_uuid='fake-uuid')
req_spec = objects.RequestSpec(
instance_uuid='fake-uuid',
project_id='12345',
numa_topology=inst_topology,
pci_requests=fake_requests_obj,
@ -968,6 +1008,7 @@ class HostStateTestCase(test.NoDBTestCase):
for r in fake_requests],
instance_uuid='fake-uuid')
req_spec = objects.RequestSpec(
instance_uuid='fake-uuid',
project_id='12345',
numa_topology=None,
pci_requests=fake_requests_obj,

View File

@ -329,7 +329,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=[],
force_nodes=[])
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
@ -342,7 +346,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
@mock.patch.object(FakeFilterClass2, '_filter_one', return_value=True)
def test_get_filtered_hosts_with_specified_filters(self, mock_filter_one):
fake_properties = {'moo': 1, 'cow': 2}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=[],
force_nodes=[])
specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
info = {'expected_objs': self.fake_hosts,
@ -354,8 +362,12 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
@ -367,8 +379,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_host3', 'fake_host5'],
force_nodes=[])
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
@ -380,7 +395,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=['fake_host5', 'fake_host6'],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
@ -392,8 +411,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=['fake_host1'],
force_hosts=['fake_host3', 'fake_host1'],
force_nodes=[])
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
@ -406,7 +428,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
@ -418,8 +444,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=[],
force_nodes=['fake-node2', 'fake-node4', 'fake-node9'])
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
@ -432,8 +461,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_multihost'],
force_nodes=['fake-node2', 'fake-node9'])
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
@ -446,8 +478,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=['fake-node'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
@ -459,8 +494,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=['fake_host1', 'fake_host2'],
force_hosts=[],
force_nodes=['fake-node4', 'fake-node2'])
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
@ -472,8 +510,11 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
fake_properties = objects.RequestSpec(
instance_uuid='fake-uuid',
ignore_hosts=['fake_multihost'],
force_hosts=[],
force_nodes=['fake_node4', 'fake_node2'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}

View File

@ -0,0 +1,9 @@
---
upgrade:
- |
Filters internal interface changed using now the RequestSpec NovaObject
instead of an old filter_properties dictionary.
In case you run out-of-tree filters, you need to modify the host_passes()
method to accept a new RequestSpec object and modify the filter internals
to use that new object. You can see other in-tree filters for getting the
logic or ask for help in #openstack-nova IRC channel.