Call evaluate_hardware_support exactly once per hwm
Fixes an issue where we could call evaluate_hardware_support multiple times each run. Now, instead, we cache the values and use the cache where needed. Adds unit test coverage for get_managers and the new method. Fixes issue where we were caching hardware managers between unit tests. Also includes fixes for codespell CI: - skip build files in repo - fix spelling issues introduced to repo Closes-bug: 2066308 Change-Id: Iebc5b6d2440bfc9f23daa322493379bbe69e84d0
This commit is contained in:
parent
0acaa1e3be
commit
c39517b044
@ -504,7 +504,7 @@ def list_all_block_devices(block_type='disk',
|
||||
:param all_serial_and_wwn: Don't collect serial and wwn numbers based
|
||||
on a priority order, instead collect wwn
|
||||
numbers from both udevadm and lsblk. When
|
||||
enabled this option will allso collect both
|
||||
enabled this option will also collect both
|
||||
the short and the long serial from udevadm if
|
||||
possible.
|
||||
|
||||
@ -3268,46 +3268,65 @@ def _collect_udev(io_dict):
|
||||
io_dict[f'udev/{kname}'] = buf
|
||||
|
||||
|
||||
def _compare_extensions(ext1, ext2):
|
||||
mgr1 = ext1.obj
|
||||
mgr2 = ext2.obj
|
||||
return mgr2.evaluate_hardware_support() - mgr1.evaluate_hardware_support()
|
||||
def _compare_managers(hwm1, hwm2):
|
||||
return hwm2['support'] - hwm1['support']
|
||||
|
||||
|
||||
def _get_extensions():
|
||||
return stevedore.ExtensionManager(
|
||||
namespace='ironic_python_agent.hardware_managers',
|
||||
invoke_on_load=True
|
||||
)
|
||||
|
||||
|
||||
def get_managers():
|
||||
"""Get a list of hardware managers in priority order.
|
||||
|
||||
Use stevedore to find all eligible hardware managers, sort them based on
|
||||
self-reported (via evaluate_hardware_support()) priorities, and return them
|
||||
in a list. The resulting list is cached in _global_managers.
|
||||
This exists as a backwards compatibility shim, returning a simple list
|
||||
of managers where expected. New usages should use get_managers_detail.
|
||||
|
||||
:returns: Priority-sorted list of hardware managers
|
||||
:raises HardwareManagerNotFound: if no valid hardware managers found
|
||||
"""
|
||||
return [hwm['manager'] for hwm in get_managers_detail()]
|
||||
|
||||
|
||||
def get_managers_detail():
|
||||
"""Get detailed information about hardware managers
|
||||
|
||||
Use stevedore to find all eligible hardware managers, sort them based on
|
||||
self-reported (via evaluate_hardware_support()) priorities, and return a
|
||||
dict containing the manager object, it's class name, and hardware support
|
||||
value. The resulting list is cached in _global_managers.
|
||||
|
||||
:returns: list of dictionaries representing hardware managers and metadata
|
||||
:raises HardwareManagerNotFound: if no valid hardware managers found
|
||||
"""
|
||||
global _global_managers
|
||||
|
||||
if not _global_managers:
|
||||
extension_manager = stevedore.ExtensionManager(
|
||||
namespace='ironic_python_agent.hardware_managers',
|
||||
invoke_on_load=True)
|
||||
|
||||
# There will always be at least one extension available (the
|
||||
# GenericHardwareManager).
|
||||
extensions = sorted(extension_manager,
|
||||
key=functools.cmp_to_key(_compare_extensions))
|
||||
|
||||
preferred_managers = []
|
||||
|
||||
for extension in extensions:
|
||||
if extension.obj.evaluate_hardware_support() > 0:
|
||||
preferred_managers.append(extension.obj)
|
||||
for extension in _get_extensions():
|
||||
hwm = extension.obj
|
||||
hardware_support = hwm.evaluate_hardware_support()
|
||||
if hardware_support > 0:
|
||||
preferred_managers.append({
|
||||
'name': hwm.__class__.__name__,
|
||||
'manager': hwm,
|
||||
'support': hardware_support
|
||||
})
|
||||
LOG.info('Hardware manager found: %s',
|
||||
extension.entry_point_target)
|
||||
|
||||
if not preferred_managers:
|
||||
raise errors.HardwareManagerNotFound
|
||||
|
||||
_global_managers = preferred_managers
|
||||
hwms = sorted(preferred_managers,
|
||||
key=functools.cmp_to_key(_compare_managers))
|
||||
|
||||
_global_managers = hwms
|
||||
|
||||
return _global_managers
|
||||
|
||||
@ -3501,20 +3520,14 @@ def deduplicate_steps(candidate_steps):
|
||||
all managers, key=manager, value=list of steps
|
||||
:returns: A deduplicated dictionary of {hardware_manager: [steps]}
|
||||
"""
|
||||
support = dispatch_to_all_managers(
|
||||
'evaluate_hardware_support')
|
||||
support = {hwm['name']: hwm['support']
|
||||
for hwm in get_managers_detail()}
|
||||
|
||||
steps = collections.defaultdict(list)
|
||||
deduped_steps = collections.defaultdict(list)
|
||||
|
||||
for manager, manager_steps in candidate_steps.items():
|
||||
# We cannot deduplicate steps with unknown hardware support
|
||||
if manager not in support:
|
||||
LOG.warning('Unknown hardware support for %(manager)s, '
|
||||
'dropping steps: %(steps)s',
|
||||
{'manager': manager, 'steps': manager_steps})
|
||||
continue
|
||||
|
||||
for step in manager_steps:
|
||||
# build a new dict of steps that's easier to filter
|
||||
step['hwm'] = {'name': manager,
|
||||
|
@ -63,6 +63,7 @@ class IronicAgentTest(test_base.BaseTestCase):
|
||||
|
||||
ext_base._EXT_MANAGER = None
|
||||
hardware._CACHED_HW_INFO = None
|
||||
hardware._global_managers = None
|
||||
|
||||
def _set_config(self):
|
||||
self.cfg_fixture = self.useFixture(config_fixture.Config(CONF))
|
||||
|
@ -34,12 +34,14 @@ class TestCleanExtension(base.IronicAgentTest):
|
||||
}
|
||||
self.version = {'generic': '1', 'specific': '1'}
|
||||
|
||||
@mock.patch('ironic_python_agent.hardware.get_managers_detail',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.get_current_versions',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.dispatch_to_all_managers',
|
||||
autospec=True)
|
||||
def test_get_clean_steps(self, mock_dispatch, mock_version,
|
||||
mock_cache_node):
|
||||
mock_managers, mock_cache_node):
|
||||
mock_version.return_value = self.version
|
||||
|
||||
manager_steps = {
|
||||
@ -118,13 +120,17 @@ class TestCleanExtension(base.IronicAgentTest):
|
||||
|
||||
}
|
||||
|
||||
hardware_support = {
|
||||
'SpecificHardwareManager': 3,
|
||||
'FirmwareHardwareManager': 4,
|
||||
'DiskHardwareManager': 4
|
||||
}
|
||||
# NOTE(JayF): The real dict also has manager: hwm-object
|
||||
# but we don't use it in the code under test
|
||||
hwms = [
|
||||
{'name': 'SpecificHardwareManager', 'support': 3},
|
||||
{'name': 'FirmwareHardwareManager', 'support': 4},
|
||||
{'name': 'DiskHardwareManager', 'support': 4},
|
||||
]
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps]
|
||||
mock_managers.return_value = hwms
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps, hardware_support]
|
||||
expected_return = {
|
||||
'hardware_manager_version': self.version,
|
||||
'clean_steps': expected_steps
|
||||
|
@ -32,12 +32,14 @@ class TestDeployExtension(base.IronicAgentTest):
|
||||
}
|
||||
self.version = {'generic': '1', 'specific': '1'}
|
||||
|
||||
@mock.patch('ironic_python_agent.hardware.get_managers_detail',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.get_current_versions',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.dispatch_to_all_managers',
|
||||
autospec=True)
|
||||
def test_get_deploy_steps(self, mock_dispatch, mock_version,
|
||||
mock_cache_node):
|
||||
mock_managers, mock_cache_node):
|
||||
mock_version.return_value = self.version
|
||||
|
||||
manager_steps = {
|
||||
@ -116,13 +118,17 @@ class TestDeployExtension(base.IronicAgentTest):
|
||||
|
||||
}
|
||||
|
||||
hardware_support = {
|
||||
'SpecificHardwareManager': 3,
|
||||
'FirmwareHardwareManager': 4,
|
||||
'DiskHardwareManager': 4
|
||||
}
|
||||
# NOTE(JayF): The real dict also has manager: hwm-object
|
||||
# but we don't use it in the code under test
|
||||
hwms = [
|
||||
{'name': 'SpecificHardwareManager', 'support': 3},
|
||||
{'name': 'FirmwareHardwareManager', 'support': 4},
|
||||
{'name': 'DiskHardwareManager', 'support': 4},
|
||||
]
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps]
|
||||
mock_managers.return_value = hwms
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps, hardware_support]
|
||||
expected_return = {
|
||||
'hardware_manager_version': self.version,
|
||||
'deploy_steps': expected_steps
|
||||
|
@ -34,12 +34,14 @@ class TestServiceExtension(base.IronicAgentTest):
|
||||
}
|
||||
self.version = {'generic': '1', 'specific': '1'}
|
||||
|
||||
@mock.patch('ironic_python_agent.hardware.get_managers_detail',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.get_current_versions',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.dispatch_to_all_managers',
|
||||
autospec=True)
|
||||
def test_get_service_steps(self, mock_dispatch, mock_version,
|
||||
mock_cache_node):
|
||||
mock_managers, mock_cache_node):
|
||||
mock_version.return_value = self.version
|
||||
|
||||
manager_steps = {
|
||||
@ -118,13 +120,17 @@ class TestServiceExtension(base.IronicAgentTest):
|
||||
|
||||
}
|
||||
|
||||
hardware_support = {
|
||||
'SpecificHardwareManager': 3,
|
||||
'FirmwareHardwareManager': 4,
|
||||
'DiskHardwareManager': 4
|
||||
}
|
||||
# NOTE(JayF): The real dict also has manager: hwm-object
|
||||
# but we don't use it in the code under test
|
||||
hwms = [
|
||||
{'name': 'SpecificHardwareManager', 'support': 3},
|
||||
{'name': 'FirmwareHardwareManager', 'support': 4},
|
||||
{'name': 'DiskHardwareManager', 'support': 4},
|
||||
]
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps]
|
||||
mock_managers.return_value = hwms
|
||||
|
||||
mock_dispatch.side_effect = [manager_steps, hardware_support]
|
||||
expected_return = {
|
||||
'hardware_manager_version': self.version,
|
||||
'service_steps': expected_steps
|
||||
|
@ -83,12 +83,22 @@ BLK_DEVICE_TEMPLATE_PARTUUID_DEVICE = [
|
||||
]
|
||||
|
||||
|
||||
class FakeHardwareManager(hardware.GenericHardwareManager):
|
||||
def __init__(self, hardware_support):
|
||||
self._hardware_support = hardware_support
|
||||
|
||||
class FakeHardwareManager(hardware.HardwareManager):
|
||||
def evaluate_hardware_support(self):
|
||||
return self._hardware_support
|
||||
return self.support
|
||||
|
||||
|
||||
def _create_mock_hwm(name, support):
|
||||
def set_support(self, x):
|
||||
self.support = x
|
||||
|
||||
# note(JayF): This code creates a subclass of FakeHardwareManager with
|
||||
# a unique name. Since we actually use the class name in IPA
|
||||
# code as an identifier, we need to have a new class for each
|
||||
# mock.
|
||||
hwm = type(name, (FakeHardwareManager,), {'_set_support': set_support})()
|
||||
hwm._set_support(support)
|
||||
return hwm
|
||||
|
||||
|
||||
class TestHardwareManagerLoading(base.IronicAgentTest):
|
||||
@ -106,17 +116,58 @@ class TestHardwareManagerLoading(base.IronicAgentTest):
|
||||
fake_ep.attrs = ['fake attrs']
|
||||
ext1 = extension.Extension(
|
||||
'fake_generic0', fake_ep, None,
|
||||
FakeHardwareManager(hardware.HardwareSupport.GENERIC))
|
||||
_create_mock_hwm("fake_generic0",
|
||||
hardware.HardwareSupport.GENERIC))
|
||||
ext2 = extension.Extension(
|
||||
'fake_mainline0', fake_ep, None,
|
||||
FakeHardwareManager(hardware.HardwareSupport.MAINLINE))
|
||||
_create_mock_hwm("fake_mainline0",
|
||||
hardware.HardwareSupport.MAINLINE))
|
||||
ext3 = extension.Extension(
|
||||
'fake_generic1', fake_ep, None,
|
||||
FakeHardwareManager(hardware.HardwareSupport.GENERIC))
|
||||
self.correct_hw_manager = ext2.obj
|
||||
'fake_serviceprovider0', fake_ep, None,
|
||||
_create_mock_hwm("fake_serviceprovider0",
|
||||
hardware.HardwareSupport.SERVICE_PROVIDER))
|
||||
# Note(JayF): Ensure these are added in an order other than priority
|
||||
# order or else you may invalidate the entire test :)
|
||||
self.fake_ext_mgr = extension.ExtensionManager.make_test_instance([
|
||||
ext1, ext2, ext3
|
||||
])
|
||||
self.expected_detail_response = [
|
||||
{'name': 'fake_serviceprovider0',
|
||||
'support': hardware.HardwareSupport.SERVICE_PROVIDER,
|
||||
'manager': ext3.obj},
|
||||
{'name': 'fake_mainline0',
|
||||
'support': hardware.HardwareSupport.MAINLINE,
|
||||
'manager': ext2.obj},
|
||||
{'name': 'fake_generic0',
|
||||
'support': hardware.HardwareSupport.GENERIC,
|
||||
'manager': ext1.obj},
|
||||
]
|
||||
self.expected_get_managers_response = [ext3.obj, ext2.obj, ext1.obj]
|
||||
|
||||
@mock.patch.object(hardware, '_get_extensions', autospec=True)
|
||||
def test_get_managers(self, mock_extensions):
|
||||
"""Test to ensure get_managers sorts and returns a list of HWMs.
|
||||
|
||||
The most meaningful part of this test is ensuring HWMs are in priority
|
||||
order, with the highest hardware support value coming earlier in the
|
||||
list of classes.
|
||||
"""
|
||||
mock_extensions.return_value = self.fake_ext_mgr
|
||||
expected_names = [x.__class__.__name__
|
||||
for x in self.expected_get_managers_response]
|
||||
actual_names = [x.__class__.__name__
|
||||
for x in hardware.get_managers()]
|
||||
self.assertEqual(actual_names, expected_names)
|
||||
|
||||
@mock.patch.object(hardware, '_get_extensions', autospec=True)
|
||||
def test_get_managers_detail(self, mock_extensions):
|
||||
"""ensure get_manager_details returns a list of HWMs + metadata
|
||||
|
||||
These also need to be sorted in priority order
|
||||
"""
|
||||
mock_extensions.return_value = self.fake_ext_mgr
|
||||
self.assertEqual(hardware.get_managers_detail(),
|
||||
self.expected_detail_response)
|
||||
|
||||
|
||||
@mock.patch.object(disk_utils, 'udev_settle', lambda *_: None)
|
||||
|
@ -962,10 +962,10 @@ def _unmount_any_config_drives():
|
||||
and glean leverage a folder at /mnt/config to convey configuration
|
||||
to a booting OS.
|
||||
|
||||
The possibility exists that one of the utilties mounted one or multiple
|
||||
The possibility exists that one of the utilities mounted one or multiple
|
||||
such folders, even if the configuration was not used, and this can
|
||||
result in locked devices which can prevent rebuild operations from
|
||||
completing successfully as as long as the folder is mounted, it is
|
||||
completing successfully as long as the folder is mounted, it is
|
||||
a "locked" device to the operating system.
|
||||
"""
|
||||
while os.path.ismount('/mnt/config'):
|
||||
|
@ -6,5 +6,5 @@ features:
|
||||
The new ``mix and match`` approach allows IPA to collect and match
|
||||
``disk serial`` and ``wwn`` root device hints against values coming
|
||||
from both ``lsblk`` and ``udev`` at the same time. The ``mix and match``
|
||||
approach is necesarry to handle edge cases where the serial and/or wwn
|
||||
approach is necessary to handle edge cases where the serial and/or wwn
|
||||
information is different in ``lsblk`` compared to ``udev``.
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fixes bug 2066308, an issue where Ironic Python Agent would call
|
||||
evaluate_hardware_support multiple times on hardware manager plugins.
|
||||
Scanning for hardware and disks is time consuming, and caused timeouts
|
||||
on badly-performing nodes.
|
Loading…
Reference in New Issue
Block a user