Refined fix for validating image on rebuild
This aims to fix the issue described in bug 1664931 where a rebuild fails to validate the existing host with the scheduler when a new image is provided. The previous attempt to do this could cause rebuilds to fail unnecessarily because we ran _all_ of the filters during a rebuild, which could cause usage/resource filters to prevent an otherwise valid rebuild from succeeding. This aims to classify filters as useful for rebuild or not, and only apply the former during a rebuild scheduler check. We do this by using an internal scheduler hint, indicating our intent. This should (a) filter out all hosts other than the one we're running on and (b) be detectable by the filtering infrastructure as an internally-generated scheduling request in order to trigger the correct filtering behavior. Closes-Bug: #1664931 Change-Id: I1a46ef1503be2febcd20f4594f44344d05525446
This commit is contained in:
parent
2c7302e33e
commit
f7c688b8ef
nova
compute
scheduler
filters
__init__.pyaffinity_filter.pyaggregate_image_properties_isolation.pyaggregate_instance_extra_specs.pyaggregate_multitenancy_isolation.pyall_hosts_filter.pyavailability_zone_filter.pycompute_capabilities_filter.pycompute_filter.pycore_filter.pydisk_filter.pyexact_core_filter.pyexact_disk_filter.pyexact_ram_filter.pyimage_props_filter.pyio_ops_filter.pyisolated_hosts_filter.pyjson_filter.pymetrics_filter.pynum_instances_filter.pynuma_topology_filter.pypci_passthrough_filter.pyram_filter.pyretry_filter.pytrusted_filter.pytype_filter.py
host_manager.pyutils.pytests
releasenotes/notes
@ -2941,16 +2941,27 @@ class API(base.Base):
|
|||||||
# through the scheduler again, but we want the instance to be
|
# through the scheduler again, but we want the instance to be
|
||||||
# rebuilt on the same host it's already on.
|
# rebuilt on the same host it's already on.
|
||||||
if orig_image_ref != image_href:
|
if orig_image_ref != image_href:
|
||||||
request_spec.requested_destination = objects.Destination(
|
|
||||||
host=instance.host,
|
|
||||||
node=instance.node)
|
|
||||||
# We have to modify the request spec that goes to the scheduler
|
# We have to modify the request spec that goes to the scheduler
|
||||||
# to contain the new image. We persist this since we've already
|
# to contain the new image. We persist this since we've already
|
||||||
# changed the instance.image_ref above so we're being
|
# changed the instance.image_ref above so we're being
|
||||||
# consistent.
|
# consistent.
|
||||||
request_spec.image = objects.ImageMeta.from_dict(image)
|
request_spec.image = objects.ImageMeta.from_dict(image)
|
||||||
request_spec.save()
|
request_spec.save()
|
||||||
host = None # This tells conductor to call the scheduler.
|
if 'scheduler_hints' not in request_spec:
|
||||||
|
request_spec.scheduler_hints = {}
|
||||||
|
# Nuke the id on this so we can't accidentally save
|
||||||
|
# this hint hack later
|
||||||
|
del request_spec.id
|
||||||
|
|
||||||
|
# NOTE(danms): Passing host=None tells conductor to
|
||||||
|
# call the scheduler. The _nova_check_type hint
|
||||||
|
# requires that the scheduler returns only the same
|
||||||
|
# host that we are currently on and only checks
|
||||||
|
# rebuild-related filters.
|
||||||
|
request_spec.scheduler_hints['_nova_check_type'] = ['rebuild']
|
||||||
|
request_spec.force_hosts = [instance.host]
|
||||||
|
request_spec.force_nodes = [instance.node]
|
||||||
|
host = None
|
||||||
except exception.RequestSpecNotFound:
|
except exception.RequestSpecNotFound:
|
||||||
# Some old instances can still have no RequestSpec object attached
|
# Some old instances can still have no RequestSpec object attached
|
||||||
# to them, we need to support the old way
|
# to them, we need to support the old way
|
||||||
|
@ -21,9 +21,27 @@ from nova import filters
|
|||||||
|
|
||||||
class BaseHostFilter(filters.BaseFilter):
|
class BaseHostFilter(filters.BaseFilter):
|
||||||
"""Base class for host filters."""
|
"""Base class for host filters."""
|
||||||
def _filter_one(self, obj, filter_properties):
|
|
||||||
|
# This is set to True if this filter should be run for rebuild.
|
||||||
|
# For example, with rebuild, we need to ask the scheduler if the
|
||||||
|
# existing host is still legit for a rebuild with the new image and
|
||||||
|
# other parameters. We care about running policy filters (i.e.
|
||||||
|
# ImagePropertiesFilter) but not things that check usage on the
|
||||||
|
# existing compute node, etc.
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
|
def _filter_one(self, obj, spec):
|
||||||
"""Return True if the object passes the filter, otherwise False."""
|
"""Return True if the object passes the filter, otherwise False."""
|
||||||
return self.host_passes(obj, filter_properties)
|
# Do this here so we don't get scheduler.filters.utils
|
||||||
|
from nova.scheduler import utils
|
||||||
|
if not self.RUN_ON_REBUILD and utils.request_is_rebuild(spec):
|
||||||
|
# If we don't filter, default to passing the host.
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# We are either a rebuild filter, in which case we always run,
|
||||||
|
# or this request is not rebuild in which case all filters
|
||||||
|
# should run.
|
||||||
|
return self.host_passes(obj, spec)
|
||||||
|
|
||||||
def host_passes(self, host_state, filter_properties):
|
def host_passes(self, host_state, filter_properties):
|
||||||
"""Return True if the HostState passes the filter, otherwise False.
|
"""Return True if the HostState passes the filter, otherwise False.
|
||||||
|
@ -29,6 +29,8 @@ class DifferentHostFilter(filters.BaseHostFilter):
|
|||||||
# The hosts the instances are running on doesn't change within a request
|
# The hosts the instances are running on doesn't change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
affinity_uuids = spec_obj.get_scheduler_hint('different_host')
|
affinity_uuids = spec_obj.get_scheduler_hint('different_host')
|
||||||
if affinity_uuids:
|
if affinity_uuids:
|
||||||
@ -45,6 +47,8 @@ class SameHostFilter(filters.BaseHostFilter):
|
|||||||
# The hosts the instances are running on doesn't change within a request
|
# The hosts the instances are running on doesn't change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
affinity_uuids = spec_obj.get_scheduler_hint('same_host')
|
affinity_uuids = spec_obj.get_scheduler_hint('same_host')
|
||||||
if affinity_uuids:
|
if affinity_uuids:
|
||||||
@ -59,6 +63,8 @@ class SimpleCIDRAffinityFilter(filters.BaseHostFilter):
|
|||||||
# The address of a host doesn't change within a request
|
# The address of a host doesn't change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
affinity_cidr = spec_obj.get_scheduler_hint('cidr', '/24')
|
affinity_cidr = spec_obj.get_scheduler_hint('cidr', '/24')
|
||||||
affinity_host_addr = spec_obj.get_scheduler_hint('build_near_host_ip')
|
affinity_host_addr = spec_obj.get_scheduler_hint('build_near_host_ip')
|
||||||
@ -77,6 +83,9 @@ class _GroupAntiAffinityFilter(filters.BaseHostFilter):
|
|||||||
"""Schedule the instance on a different host from a set of group
|
"""Schedule the instance on a different host from a set of group
|
||||||
hosts.
|
hosts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
# Only invoke the filter if 'anti-affinity' is configured
|
# Only invoke the filter if 'anti-affinity' is configured
|
||||||
policies = (spec_obj.instance_group.policies
|
policies = (spec_obj.instance_group.policies
|
||||||
@ -110,6 +119,9 @@ class ServerGroupAntiAffinityFilter(_GroupAntiAffinityFilter):
|
|||||||
class _GroupAffinityFilter(filters.BaseHostFilter):
|
class _GroupAffinityFilter(filters.BaseHostFilter):
|
||||||
"""Schedule the instance on to host from a set of group hosts.
|
"""Schedule the instance on to host from a set of group hosts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
# Only invoke the filter if 'affinity' is configured
|
# Only invoke the filter if 'affinity' is configured
|
||||||
policies = (spec_obj.instance_group.policies
|
policies = (spec_obj.instance_group.policies
|
||||||
|
@ -32,6 +32,8 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
|
|||||||
# Aggregate data and instance type does not change within a request
|
# Aggregate data and instance type does not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = True
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""Checks a host in an aggregate that metadata key/value match
|
"""Checks a host in an aggregate that metadata key/value match
|
||||||
with image properties.
|
with image properties.
|
||||||
|
@ -33,6 +33,8 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
|
|||||||
# Aggregate data and instance type does not change within a request
|
# Aggregate data and instance type does not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""Return a list of hosts that can create instance_type
|
"""Return a list of hosts that can create instance_type
|
||||||
|
|
||||||
|
@ -28,6 +28,8 @@ class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
|
|||||||
# Aggregate data and tenant do not change within a request
|
# Aggregate data and tenant do not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""If a host is in an aggregate that has the metadata key
|
"""If a host is in an aggregate that has the metadata key
|
||||||
"filter_tenant_id" it can only create instances from that tenant(s).
|
"filter_tenant_id" it can only create instances from that tenant(s).
|
||||||
|
@ -23,5 +23,7 @@ class AllHostsFilter(filters.BaseHostFilter):
|
|||||||
# list of hosts doesn't change within a request
|
# list of hosts doesn't change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
return True
|
return True
|
||||||
|
@ -35,6 +35,8 @@ class AvailabilityZoneFilter(filters.BaseHostFilter):
|
|||||||
# Availability zones do not change within a request
|
# Availability zones do not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
availability_zone = spec_obj.availability_zone
|
availability_zone = spec_obj.availability_zone
|
||||||
|
|
||||||
|
@ -30,6 +30,8 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
|
|||||||
# Instance type and host capabilities do not change within a request
|
# Instance type and host capabilities do not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_capabilities(self, host_state, scope):
|
def _get_capabilities(self, host_state, scope):
|
||||||
cap = host_state
|
cap = host_state
|
||||||
for index in range(0, len(scope)):
|
for index in range(0, len(scope)):
|
||||||
|
@ -25,6 +25,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class ComputeFilter(filters.BaseHostFilter):
|
class ComputeFilter(filters.BaseHostFilter):
|
||||||
"""Filter on active Compute nodes."""
|
"""Filter on active Compute nodes."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.servicegroup_api = servicegroup.API()
|
self.servicegroup_api = servicegroup.API()
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class BaseCoreFilter(filters.BaseHostFilter):
|
class BaseCoreFilter(filters.BaseHostFilter):
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_cpu_allocation_ratio(self, host_state, spec_obj):
|
def _get_cpu_allocation_ratio(self, host_state, spec_obj):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@ -28,6 +28,8 @@ CONF = nova.conf.CONF
|
|||||||
class DiskFilter(filters.BaseHostFilter):
|
class DiskFilter(filters.BaseHostFilter):
|
||||||
"""Disk Filter with over subscription flag."""
|
"""Disk Filter with over subscription flag."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_disk_allocation_ratio(self, host_state, spec_obj):
|
def _get_disk_allocation_ratio(self, host_state, spec_obj):
|
||||||
return host_state.disk_allocation_ratio
|
return host_state.disk_allocation_ratio
|
||||||
|
|
||||||
@ -80,6 +82,8 @@ class AggregateDiskFilter(DiskFilter):
|
|||||||
found.
|
found.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_disk_allocation_ratio(self, host_state, spec_obj):
|
def _get_disk_allocation_ratio(self, host_state, spec_obj):
|
||||||
aggregate_vals = utils.aggregate_values_from_key(
|
aggregate_vals = utils.aggregate_values_from_key(
|
||||||
host_state,
|
host_state,
|
||||||
|
@ -25,6 +25,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class ExactCoreFilter(filters.BaseHostFilter):
|
class ExactCoreFilter(filters.BaseHostFilter):
|
||||||
"""Exact Core Filter."""
|
"""Exact Core Filter."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ExactCoreFilter, self).__init__(*args, **kwargs)
|
super(ExactCoreFilter, self).__init__(*args, **kwargs)
|
||||||
LOG.warning('ExactCoreFilter is deprecated in Pike and will be '
|
LOG.warning('ExactCoreFilter is deprecated in Pike and will be '
|
||||||
|
@ -23,6 +23,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class ExactDiskFilter(filters.BaseHostFilter):
|
class ExactDiskFilter(filters.BaseHostFilter):
|
||||||
"""Exact Disk Filter."""
|
"""Exact Disk Filter."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ExactDiskFilter, self).__init__(*args, **kwargs)
|
super(ExactDiskFilter, self).__init__(*args, **kwargs)
|
||||||
LOG.warning('ExactDiskFilter is deprecated in Pike and will be '
|
LOG.warning('ExactDiskFilter is deprecated in Pike and will be '
|
||||||
|
@ -23,6 +23,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class ExactRamFilter(filters.BaseHostFilter):
|
class ExactRamFilter(filters.BaseHostFilter):
|
||||||
"""Exact RAM Filter."""
|
"""Exact RAM Filter."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ExactRamFilter, self).__init__(*args, **kwargs)
|
super(ExactRamFilter, self).__init__(*args, **kwargs)
|
||||||
LOG.warning('ExactRamFilter is deprecated in Pike and will be '
|
LOG.warning('ExactRamFilter is deprecated in Pike and will be '
|
||||||
|
@ -35,6 +35,8 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
|
|||||||
contained in the image dictionary in the request_spec.
|
contained in the image dictionary in the request_spec.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = True
|
||||||
|
|
||||||
# Image Properties and Compute Capabilities do not change within
|
# Image Properties and Compute Capabilities do not change within
|
||||||
# a request
|
# a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
@ -28,6 +28,8 @@ CONF = nova.conf.CONF
|
|||||||
class IoOpsFilter(filters.BaseHostFilter):
|
class IoOpsFilter(filters.BaseHostFilter):
|
||||||
"""Filter out hosts with too many concurrent I/O operations."""
|
"""Filter out hosts with too many concurrent I/O operations."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_max_io_ops_per_host(self, host_state, spec_obj):
|
def _get_max_io_ops_per_host(self, host_state, spec_obj):
|
||||||
return CONF.filter_scheduler.max_io_ops_per_host
|
return CONF.filter_scheduler.max_io_ops_per_host
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
|
|||||||
# The configuration values do not change within a request
|
# The configuration values do not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = True
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
|
"""Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
|
||||||
to True::
|
to True::
|
||||||
|
@ -26,6 +26,9 @@ class JsonFilter(filters.BaseHostFilter):
|
|||||||
"""Host Filter to allow simple JSON-based grammar for
|
"""Host Filter to allow simple JSON-based grammar for
|
||||||
selecting hosts.
|
selecting hosts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _op_compare(self, args, op):
|
def _op_compare(self, args, op):
|
||||||
"""Returns True if the specified operator can successfully
|
"""Returns True if the specified operator can successfully
|
||||||
compare the first item in the args with all the rest. Will
|
compare the first item in the args with all the rest. Will
|
||||||
|
@ -32,6 +32,8 @@ class MetricsFilter(filters.BaseHostFilter):
|
|||||||
these hosts.
|
these hosts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(MetricsFilter, self).__init__()
|
super(MetricsFilter, self).__init__()
|
||||||
opts = utils.parse_options(CONF.metrics.weight_setting,
|
opts = utils.parse_options(CONF.metrics.weight_setting,
|
||||||
|
@ -28,6 +28,8 @@ CONF = nova.conf.CONF
|
|||||||
class NumInstancesFilter(filters.BaseHostFilter):
|
class NumInstancesFilter(filters.BaseHostFilter):
|
||||||
"""Filter out hosts with too many instances."""
|
"""Filter out hosts with too many instances."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_max_instances_per_host(self, host_state, spec_obj):
|
def _get_max_instances_per_host(self, host_state, spec_obj):
|
||||||
return CONF.filter_scheduler.max_instances_per_host
|
return CONF.filter_scheduler.max_instances_per_host
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
class NUMATopologyFilter(filters.BaseHostFilter):
|
class NUMATopologyFilter(filters.BaseHostFilter):
|
||||||
"""Filter on requested NUMA topology."""
|
"""Filter on requested NUMA topology."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = True
|
||||||
|
|
||||||
def _satisfies_cpu_policy(self, host_state, extra_specs, image_props):
|
def _satisfies_cpu_policy(self, host_state, extra_specs, image_props):
|
||||||
"""Check that the host_state provided satisfies any available
|
"""Check that the host_state provided satisfies any available
|
||||||
CPU policy requirements.
|
CPU policy requirements.
|
||||||
|
@ -40,6 +40,8 @@ class PciPassthroughFilter(filters.BaseHostFilter):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""Return true if the host has the required PCI devices."""
|
"""Return true if the host has the required PCI devices."""
|
||||||
pci_requests = spec_obj.pci_requests
|
pci_requests = spec_obj.pci_requests
|
||||||
|
@ -25,6 +25,8 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class BaseRamFilter(filters.BaseHostFilter):
|
class BaseRamFilter(filters.BaseHostFilter):
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def _get_ram_allocation_ratio(self, host_state, spec_obj):
|
def _get_ram_allocation_ratio(self, host_state, spec_obj):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@ -26,6 +26,10 @@ class RetryFilter(filters.BaseHostFilter):
|
|||||||
purposes
|
purposes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# NOTE(danms): This does not affect _where_ an instance lands, so not
|
||||||
|
# related to rebuild.
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
"""Skip nodes that have already been attempted."""
|
"""Skip nodes that have already been attempted."""
|
||||||
retry = spec_obj.retry
|
retry = spec_obj.retry
|
||||||
|
@ -230,6 +230,8 @@ class ComputeAttestation(object):
|
|||||||
class TrustedFilter(filters.BaseHostFilter):
|
class TrustedFilter(filters.BaseHostFilter):
|
||||||
"""Trusted filter to support Trusted Compute Pools."""
|
"""Trusted filter to support Trusted Compute Pools."""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.compute_attestation = ComputeAttestation()
|
self.compute_attestation = ComputeAttestation()
|
||||||
msg = _LW('The TrustedFilter is deprecated as it has been marked '
|
msg = _LW('The TrustedFilter is deprecated as it has been marked '
|
||||||
|
@ -30,6 +30,8 @@ class TypeAffinityFilter(filters.BaseHostFilter):
|
|||||||
(spread) set to 1 (default).
|
(spread) set to 1 (default).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(TypeAffinityFilter, self).__init__()
|
super(TypeAffinityFilter, self).__init__()
|
||||||
LOG.warning('TypeAffinityFilter is deprecated for removal in the '
|
LOG.warning('TypeAffinityFilter is deprecated for removal in the '
|
||||||
@ -64,6 +66,8 @@ class AggregateTypeAffinityFilter(filters.BaseHostFilter):
|
|||||||
# Aggregate data does not change within a request
|
# Aggregate data does not change within a request
|
||||||
run_filter_once_per_request = True
|
run_filter_once_per_request = True
|
||||||
|
|
||||||
|
RUN_ON_REBUILD = False
|
||||||
|
|
||||||
def host_passes(self, host_state, spec_obj):
|
def host_passes(self, host_state, spec_obj):
|
||||||
instance_type = spec_obj.flavor
|
instance_type = spec_obj.flavor
|
||||||
|
|
||||||
|
@ -577,8 +577,13 @@ class HostManager(object):
|
|||||||
_match_forced_hosts(name_to_cls_map, force_hosts)
|
_match_forced_hosts(name_to_cls_map, force_hosts)
|
||||||
if force_nodes:
|
if force_nodes:
|
||||||
_match_forced_nodes(name_to_cls_map, force_nodes)
|
_match_forced_nodes(name_to_cls_map, force_nodes)
|
||||||
if force_hosts or force_nodes:
|
check_type = ('scheduler_hints' in spec_obj and
|
||||||
# NOTE(deva): Skip filters when forcing host or node
|
spec_obj.scheduler_hints.get('_nova_check_type'))
|
||||||
|
if not check_type and (force_hosts or force_nodes):
|
||||||
|
# NOTE(deva,dansmith): Skip filters when forcing host or node
|
||||||
|
# unless we've declared the internal check type flag, in which
|
||||||
|
# case we're asking for a specific host and for filtering to
|
||||||
|
# be done.
|
||||||
if name_to_cls_map:
|
if name_to_cls_map:
|
||||||
return name_to_cls_map.values()
|
return name_to_cls_map.values()
|
||||||
else:
|
else:
|
||||||
|
@ -736,3 +736,16 @@ def retry_on_timeout(retries=1):
|
|||||||
return outer
|
return outer
|
||||||
|
|
||||||
retry_select_destinations = retry_on_timeout(CONF.scheduler.max_attempts - 1)
|
retry_select_destinations = retry_on_timeout(CONF.scheduler.max_attempts - 1)
|
||||||
|
|
||||||
|
|
||||||
|
def request_is_rebuild(spec_obj):
|
||||||
|
"""Returns True if request is for a rebuild.
|
||||||
|
|
||||||
|
:param spec_obj: An objects.RequestSpec to examine (or None).
|
||||||
|
"""
|
||||||
|
if not spec_obj:
|
||||||
|
return False
|
||||||
|
if 'scheduler_hints' not in spec_obj:
|
||||||
|
return False
|
||||||
|
check_type = spec_obj.scheduler_hints.get('_nova_check_type')
|
||||||
|
return check_type == ['rebuild']
|
||||||
|
@ -1066,6 +1066,16 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
|
|||||||
# can use to update image metadata via our compute images proxy API.
|
# can use to update image metadata via our compute images proxy API.
|
||||||
microversion = '2.38'
|
microversion = '2.38'
|
||||||
|
|
||||||
|
def _disable_compute_for(self, server):
|
||||||
|
# Refresh to get its host
|
||||||
|
server = self.api.get_server(server['id'])
|
||||||
|
host = server['OS-EXT-SRV-ATTR:host']
|
||||||
|
|
||||||
|
# Disable the service it is on
|
||||||
|
self.api_fixture.admin_api.put_service('disable',
|
||||||
|
{'host': host,
|
||||||
|
'binary': 'nova-compute'})
|
||||||
|
|
||||||
def test_rebuild_with_image_novalidhost(self):
|
def test_rebuild_with_image_novalidhost(self):
|
||||||
"""Creates a server with an image that is valid for the single compute
|
"""Creates a server with an image that is valid for the single compute
|
||||||
that we have. Then rebuilds the server, passing in an image with
|
that we have. Then rebuilds the server, passing in an image with
|
||||||
@ -1073,6 +1083,12 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
|
|||||||
a NoValidHost error. The ImagePropertiesFilter filter is enabled by
|
a NoValidHost error. The ImagePropertiesFilter filter is enabled by
|
||||||
default so that should filter out the host based on the image meta.
|
default so that should filter out the host based on the image meta.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
fake.set_nodes(['host2'])
|
||||||
|
self.addCleanup(fake.restore_nodes)
|
||||||
|
self.flags(host='host2')
|
||||||
|
self.compute2 = self.start_service('compute', host='host2')
|
||||||
|
|
||||||
server_req_body = {
|
server_req_body = {
|
||||||
'server': {
|
'server': {
|
||||||
# We hard-code from a fake image since we can't get images
|
# We hard-code from a fake image since we can't get images
|
||||||
@ -1087,6 +1103,11 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
|
|||||||
}
|
}
|
||||||
server = self.api.post_server(server_req_body)
|
server = self.api.post_server(server_req_body)
|
||||||
self._wait_for_state_change(self.api, server, 'ACTIVE')
|
self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||||
|
|
||||||
|
# Disable the host we're on so ComputeFilter would have ruled it out
|
||||||
|
# normally
|
||||||
|
self._disable_compute_for(server)
|
||||||
|
|
||||||
# Now update the image metadata to be something that won't work with
|
# Now update the image metadata to be something that won't work with
|
||||||
# the fake compute driver we're using since the fake driver has an
|
# the fake compute driver we're using since the fake driver has an
|
||||||
# "x86_64" architecture.
|
# "x86_64" architecture.
|
||||||
|
@ -3218,6 +3218,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
system_metadata=orig_system_metadata,
|
system_metadata=orig_system_metadata,
|
||||||
expected_attrs=['system_metadata'],
|
expected_attrs=['system_metadata'],
|
||||||
image_ref=orig_image_href,
|
image_ref=orig_image_href,
|
||||||
|
node='node',
|
||||||
vm_mode=fields_obj.VMMode.HVM)
|
vm_mode=fields_obj.VMMode.HVM)
|
||||||
flavor = instance.get_flavor()
|
flavor = instance.get_flavor()
|
||||||
|
|
||||||
@ -3229,7 +3230,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
_get_image.side_effect = get_image
|
_get_image.side_effect = get_image
|
||||||
bdm_get_by_instance_uuid.return_value = bdms
|
bdm_get_by_instance_uuid.return_value = bdms
|
||||||
|
|
||||||
fake_spec = objects.RequestSpec()
|
fake_spec = objects.RequestSpec(id=1)
|
||||||
req_spec_get_by_inst_uuid.return_value = fake_spec
|
req_spec_get_by_inst_uuid.return_value = fake_spec
|
||||||
|
|
||||||
with mock.patch.object(self.compute_api.compute_task_api,
|
with mock.patch.object(self.compute_api.compute_task_api,
|
||||||
@ -3247,10 +3248,9 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
# assert the request spec was modified so the scheduler picks
|
# assert the request spec was modified so the scheduler picks
|
||||||
# the existing instance host/node
|
# the existing instance host/node
|
||||||
req_spec_save.assert_called_once_with()
|
req_spec_save.assert_called_once_with()
|
||||||
self.assertIn('requested_destination', fake_spec)
|
self.assertIn('_nova_check_type', fake_spec.scheduler_hints)
|
||||||
requested_destination = fake_spec.requested_destination
|
self.assertEqual('rebuild',
|
||||||
self.assertEqual(instance.host, requested_destination.host)
|
fake_spec.scheduler_hints['_nova_check_type'][0])
|
||||||
self.assertEqual(instance.node, requested_destination.node)
|
|
||||||
|
|
||||||
_check_auto_disk_config.assert_called_once_with(image=new_image)
|
_check_auto_disk_config.assert_called_once_with(image=new_image)
|
||||||
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
|
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
|
||||||
|
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
The fix for `OSSA-2017-005`_ (CVE-2017-16239) was too far-reaching in that
|
||||||
|
rebuilds can now fail based on scheduling filters that should not apply
|
||||||
|
to rebuild. For example, a rebuild of an instance on a disabled compute
|
||||||
|
host could fail whereas it would not before the fix for CVE-2017-16239.
|
||||||
|
Similarly, rebuilding an instance on a host that is at capacity for vcpu,
|
||||||
|
memory or disk could fail since the scheduler filters would treat it as a
|
||||||
|
new build request even though the rebuild is not claiming *new* resources.
|
||||||
|
|
||||||
|
Therefore this release contains a fix for those regressions in scheduling
|
||||||
|
behavior on rebuild while maintaining the original fix for CVE-2017-16239.
|
||||||
|
|
||||||
|
.. note:: The fix relies on a ``RUN_ON_REBUILD`` variable which is checked
|
||||||
|
for all scheduler filters during a rebuild. The reasoning behind
|
||||||
|
the value for that variable depends on each filter. If you have
|
||||||
|
out-of-tree scheduler filters, you will likely need to assess
|
||||||
|
whether or not they need to override the default value (False)
|
||||||
|
for the new variable.
|
Loading…
x
Reference in New Issue
Block a user