Merge "Disable NUMATopologyFilter on rebuild" into stable/rocky

This commit is contained in:
Zuul 2020-08-18 02:30:04 +00:00 committed by Gerrit Code Review
commit b2a7b4e81d
3 changed files with 37 additions and 8 deletions

View File

@ -23,7 +23,11 @@ LOG = logging.getLogger(__name__)
class NUMATopologyFilter(filters.BaseHostFilter): class NUMATopologyFilter(filters.BaseHostFilter):
"""Filter on requested NUMA topology.""" """Filter on requested NUMA topology."""
RUN_ON_REBUILD = True # NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
# we validate that the NUMA topology does not change in the api. If the
# requested image would alter the NUMA constrains we reject the rebuild
# request and therefore do not need to run this filter on rebuild.
RUN_ON_REBUILD = False
def _satisfies_cpu_policy(self, host_state, extra_specs, image_props): def _satisfies_cpu_policy(self, host_state, extra_specs, image_props):
"""Check that the host_state provided satisfies any available """Check that the host_state provided satisfies any available

View File

@ -17,6 +17,8 @@ import fixtures
import mock import mock
import six import six
from testtools import skip
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -31,6 +33,7 @@ from nova.tests.unit import fake_notifier
from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -456,7 +459,17 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
self.assertTrue(filter_mock.called) self.assertTrue(filter_mock.called)
self.assertEqual('ACTIVE', status) self.assertEqual('ACTIVE', status)
def test_rebuild_server_with_network_affinity(self): # FIXME(sean-k-mooney): The logic of this test is incorrect.
# The test was written to assert that we failed to rebuild
# because the NUMA constraints were violated due to the attachment
# of an interface from a second host NUMA node to an instance with
# a NUMA topology of 1 that is affined to a different NUMA node.
# Nova should reject the interface attachment if the NUMA constraints
# would be violated and it should fail at that point not when the
# instance is rebuilt. This is a latent bug which will be addressed
# in a separate patch.
@skip("bug 1855332")
def test_attach_interface_with_network_affinity_violation(self):
extra_spec = {'hw:numa_nodes': '1'} extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec) flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [ networks = [
@ -491,10 +504,15 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
'net_id': NUMAAffinityNeutronFixture.network_2['id'], 'net_id': NUMAAffinityNeutronFixture.network_2['id'],
} }
} }
# FIXME(sean-k-mooney): This should raise an exception as this
# interface attachment would violate the NUMA constraints.
self.api.attach_interface(server['id'], post) self.api.attach_interface(server['id'], post)
post = {'rebuild': { post = {'rebuild': {
'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c', 'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
}} }}
# NOTE(sean-k-mooney): the rest of the test is incorrect but
# is left to show the currently broken behavior.
# Now this should fail because we've violated the NUMA requirements # Now this should fail because we've violated the NUMA requirements
# with the latest attachment # with the latest attachment
ex = self.assertRaises(client.OpenStackApiException, ex = self.assertRaises(client.OpenStackApiException,
@ -659,12 +677,8 @@ class NUMAServersRebuildTests(NUMAServersTestBase):
server = self._create_active_server( server = self._create_active_server(
server_args={"flavorRef": flavor_id}) server_args={"flavorRef": flavor_id})
# TODO(sean-k-mooney): this should pass but i currently expect it to # This should succeed as the numa constraints do not change.
# fail because the NUMA topology filter does not support in place self._rebuild_server(server, self.image_ref_1)
# rebuild and we have used all the resources on the compute node.
self.assertRaises(
client.OpenStackApiException, self._rebuild_server,
server, self.image_ref_1)
def test_rebuild_server_with_different_numa_topology_fails(self): def test_rebuild_server_with_different_numa_topology_fails(self):
"""Create a NUMA instance and ensure inplace rebuild fails. """Create a NUMA instance and ensure inplace rebuild fails.

View File

@ -14,3 +14,14 @@ fixes:
and rejects the rebuild. and rejects the rebuild.
.. _`bug #1763766`: https://bugs.launchpad.net/nova/+bug/1763766 .. _`bug #1763766`: https://bugs.launchpad.net/nova/+bug/1763766
features:
- |
With the changes introduced to address `bug #1763766`_, Nova now guards
against NUMA constraint changes on rebuild. As a result the
``NUMATopologyFilter`` is no longer required to run on rebuild since
we already know the topology will not change and therefor the existing
resource claim is still valid. As such it is now possible to do an in-place
rebuild of a instance with a NUMA topology even if the image changes
provided the new image does not alter the topology.