2014-04-09 14:31:31 -07:00
|
|
|
# Copyright 2013 Rackspace, Inc.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-01-07 16:36:59 -08:00
|
|
|
|
2016-11-23 17:14:15 +01:00
|
|
|
import binascii
|
2016-05-24 10:04:12 +02:00
|
|
|
import os
|
2020-11-11 11:22:31 +01:00
|
|
|
import shutil
|
2021-02-26 21:42:18 -05:00
|
|
|
import stat
|
2016-05-24 10:04:12 +02:00
|
|
|
import time
|
2020-04-06 12:28:30 +02:00
|
|
|
from unittest import mock
|
2016-05-24 10:04:12 +02:00
|
|
|
|
2016-07-18 17:18:14 +01:00
|
|
|
from ironic_lib import disk_utils
|
2021-02-25 14:20:59 +01:00
|
|
|
from ironic_lib import utils as il_utils
|
2015-08-06 13:03:27 +02:00
|
|
|
import netifaces
|
2015-04-24 15:08:41 +05:30
|
|
|
from oslo_concurrency import processutils
|
2016-06-02 13:34:10 -04:00
|
|
|
from oslo_config import cfg
|
2015-09-02 15:21:04 +00:00
|
|
|
from oslo_utils import units
|
2015-03-10 11:20:40 +00:00
|
|
|
import pyudev
|
2014-06-08 18:45:40 -07:00
|
|
|
from stevedore import extension
|
2014-01-07 16:36:59 -08:00
|
|
|
|
2014-06-04 10:44:25 -07:00
|
|
|
from ironic_python_agent import errors
|
2014-03-19 16:19:52 -07:00
|
|
|
from ironic_python_agent import hardware
|
2018-03-13 16:06:55 +02:00
|
|
|
from ironic_python_agent import netutils
|
2021-02-10 13:48:29 +01:00
|
|
|
from ironic_python_agent import raid_utils
|
2017-04-03 15:32:44 +10:00
|
|
|
from ironic_python_agent.tests.unit import base
|
2021-02-11 15:36:09 +01:00
|
|
|
from ironic_python_agent.tests.unit.samples import hardware_samples as hws
|
2014-04-10 14:12:16 -07:00
|
|
|
from ironic_python_agent import utils
|
2014-01-07 16:36:59 -08:00
|
|
|
|
2016-06-02 13:34:10 -04:00
|
|
|
CONF = cfg.CONF
|
|
|
|
|
|
|
|
CONF.import_opt('disk_wait_attempts', 'ironic_python_agent.config')
|
|
|
|
CONF.import_opt('disk_wait_delay', 'ironic_python_agent.config')
|
|
|
|
|
2021-02-11 15:36:09 +01:00
|
|
|
|
2015-08-28 11:14:52 -07:00
|
|
|
BLK_DEVICE_TEMPLATE_SMALL_DEVICES = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda', model='TinyUSB Drive',
|
|
|
|
size=3116853504, rotational=False,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid="F531-BDC3"),
|
2015-08-28 11:14:52 -07:00
|
|
|
hardware.BlockDevice(name='/dev/sdb', model='AlmostBigEnough Drive',
|
|
|
|
size=4294967295, rotational=False,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid=""),
|
2015-08-28 11:14:52 -07:00
|
|
|
]
|
2015-09-02 15:21:04 +00:00
|
|
|
|
2018-08-16 10:57:59 -07:00
|
|
|
RAID_BLK_DEVICE_TEMPLATE_DEVICES = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda', model='DRIVE 0',
|
|
|
|
size=1765517033472, rotational=True,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid=""),
|
2018-08-16 10:57:59 -07:00
|
|
|
hardware.BlockDevice(name='/dev/sdb', model='DRIVE 1',
|
|
|
|
size=1765517033472, rotational=True,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid=""),
|
2018-08-16 10:57:59 -07:00
|
|
|
hardware.BlockDevice(name='/dev/md0', model='RAID',
|
|
|
|
size=1765517033470, rotational=False,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid=""),
|
2020-07-09 16:31:55 +02:00
|
|
|
hardware.BlockDevice(name='/dev/md1', model='RAID',
|
|
|
|
size=0, rotational=False,
|
2020-10-16 13:03:32 +02:00
|
|
|
vendor="FooTastic", uuid=""),
|
2018-08-16 10:57:59 -07:00
|
|
|
]
|
|
|
|
|
2020-11-11 11:22:31 +01:00
|
|
|
|
2014-06-08 18:45:40 -07:00
|
|
|
class FakeHardwareManager(hardware.GenericHardwareManager):
|
|
|
|
def __init__(self, hardware_support):
|
|
|
|
self._hardware_support = hardware_support
|
|
|
|
|
|
|
|
def evaluate_hardware_support(self):
|
|
|
|
return self._hardware_support
|
|
|
|
|
|
|
|
|
2017-04-03 15:32:44 +10:00
|
|
|
class TestHardwareManagerLoading(base.IronicAgentTest):
|
2014-06-08 18:45:40 -07:00
|
|
|
def setUp(self):
|
|
|
|
super(TestHardwareManagerLoading, self).setUp()
|
|
|
|
# In order to use ExtensionManager.make_test_instance() without
|
|
|
|
# creating a new only-for-test codepath, we instantiate the test
|
|
|
|
# instance outside of the test case in setUp, where we can access
|
|
|
|
# make_test_instance() before it gets mocked. Inside of the test case
|
|
|
|
# we set this as the return value of the mocked constructor, so we can
|
|
|
|
# verify that the constructor is called correctly while still using a
|
|
|
|
# more realistic ExtensionManager
|
|
|
|
fake_ep = mock.Mock()
|
|
|
|
fake_ep.module_name = 'fake'
|
|
|
|
fake_ep.attrs = ['fake attrs']
|
2015-10-02 10:01:00 -07:00
|
|
|
ext1 = extension.Extension(
|
|
|
|
'fake_generic0', fake_ep, None,
|
2014-06-08 18:45:40 -07:00
|
|
|
FakeHardwareManager(hardware.HardwareSupport.GENERIC))
|
2015-10-02 10:01:00 -07:00
|
|
|
ext2 = extension.Extension(
|
|
|
|
'fake_mainline0', fake_ep, None,
|
2014-06-08 18:45:40 -07:00
|
|
|
FakeHardwareManager(hardware.HardwareSupport.MAINLINE))
|
2015-10-02 10:01:00 -07:00
|
|
|
ext3 = extension.Extension(
|
|
|
|
'fake_generic1', fake_ep, None,
|
2014-06-08 18:45:40 -07:00
|
|
|
FakeHardwareManager(hardware.HardwareSupport.GENERIC))
|
|
|
|
self.correct_hw_manager = ext2.obj
|
|
|
|
self.fake_ext_mgr = extension.ExtensionManager.make_test_instance([
|
|
|
|
ext1, ext2, ext3
|
|
|
|
])
|
|
|
|
|
|
|
|
|
2016-02-25 16:32:47 +00:00
|
|
|
@mock.patch.object(hardware, '_udev_settle', lambda *_: None)
|
2017-04-03 15:32:44 +10:00
|
|
|
class TestGenericHardwareManager(base.IronicAgentTest):
|
2014-01-07 16:36:59 -08:00
|
|
|
def setUp(self):
|
2014-04-03 14:02:53 -07:00
|
|
|
super(TestGenericHardwareManager, self).setUp()
|
2014-01-22 11:09:02 -08:00
|
|
|
self.hardware = hardware.GenericHardwareManager()
|
2015-06-15 21:36:27 +05:30
|
|
|
self.node = {'uuid': 'dda135fb-732d-4742-8e72-df8f3199d244',
|
|
|
|
'driver_internal_info': {}}
|
2016-06-02 13:34:10 -04:00
|
|
|
CONF.clear_override('disk_wait_attempts')
|
|
|
|
CONF.clear_override('disk_wait_delay')
|
2014-01-07 16:36:59 -08:00
|
|
|
|
2016-07-18 17:18:14 +01:00
|
|
|
def test_get_clean_steps(self):
|
|
|
|
expected_clean_steps = [
|
|
|
|
{
|
|
|
|
'step': 'erase_devices',
|
|
|
|
'priority': 10,
|
|
|
|
'interface': 'deploy',
|
|
|
|
'reboot_requested': False,
|
|
|
|
'abortable': True
|
|
|
|
},
|
|
|
|
{
|
|
|
|
'step': 'erase_devices_metadata',
|
|
|
|
'priority': 99,
|
|
|
|
'interface': 'deploy',
|
|
|
|
'reboot_requested': False,
|
|
|
|
'abortable': True
|
2019-02-04 13:17:23 +01:00
|
|
|
},
|
2020-11-11 11:22:31 +01:00
|
|
|
{
|
|
|
|
'step': 'erase_pstore',
|
|
|
|
'priority': 0,
|
|
|
|
'interface': 'deploy',
|
|
|
|
'reboot_requested': False,
|
|
|
|
'abortable': True
|
|
|
|
},
|
2019-02-04 13:17:23 +01:00
|
|
|
{
|
|
|
|
'step': 'delete_configuration',
|
|
|
|
'priority': 0,
|
|
|
|
'interface': 'raid',
|
|
|
|
'reboot_requested': False,
|
|
|
|
'abortable': True
|
|
|
|
},
|
|
|
|
{
|
|
|
|
'step': 'create_configuration',
|
|
|
|
'priority': 0,
|
|
|
|
'interface': 'raid',
|
|
|
|
'reboot_requested': False,
|
|
|
|
'abortable': True
|
2016-07-18 17:18:14 +01:00
|
|
|
}
|
|
|
|
]
|
|
|
|
clean_steps = self.hardware.get_clean_steps(self.node, [])
|
|
|
|
self.assertEqual(expected_clean_steps, clean_steps)
|
|
|
|
|
2020-07-27 17:54:50 +02:00
|
|
|
def test_clean_steps_exist(self):
|
|
|
|
for step in self.hardware.get_clean_steps(self.node, []):
|
|
|
|
getattr(self.hardware, step['step'])
|
|
|
|
|
|
|
|
def test_deploy_steps_exist(self):
|
|
|
|
for step in self.hardware.get_deploy_steps(self.node, []):
|
|
|
|
getattr(self.hardware, step['step'])
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('binascii.hexlify', autospec=True)
|
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
2016-11-23 17:14:15 +01:00
|
|
|
def test_collect_lldp_data(self, mock_lldp_info, mock_hexlify):
|
|
|
|
if_names = ['eth0', 'lo']
|
|
|
|
mock_lldp_info.return_value = {if_names[0]: [
|
|
|
|
(0, b''),
|
|
|
|
(1, b'foo\x01'),
|
|
|
|
(2, b'\x02bar')],
|
|
|
|
}
|
|
|
|
mock_hexlify.side_effect = [
|
|
|
|
b'',
|
|
|
|
b'666f6f01',
|
|
|
|
b'02626172'
|
|
|
|
]
|
|
|
|
expected_lldp_data = {
|
|
|
|
'eth0': [
|
|
|
|
(0, ''),
|
|
|
|
(1, '666f6f01'),
|
|
|
|
(2, '02626172')],
|
|
|
|
}
|
|
|
|
result = self.hardware.collect_lldp_data(if_names)
|
2018-09-17 11:10:40 +08:00
|
|
|
self.assertIn(if_names[0], result)
|
2016-11-23 17:14:15 +01:00
|
|
|
self.assertEqual(expected_lldp_data, result)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
2016-11-23 17:14:15 +01:00
|
|
|
def test_collect_lldp_data_netutils_exception(self, mock_lldp_info):
|
|
|
|
if_names = ['eth0', 'lo']
|
|
|
|
mock_lldp_info.side_effect = Exception('fake error')
|
|
|
|
result = self.hardware.collect_lldp_data(if_names)
|
|
|
|
expected_lldp_data = {}
|
|
|
|
self.assertEqual(expected_lldp_data, result)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('binascii.hexlify', autospec=True)
|
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
2016-11-23 17:14:15 +01:00
|
|
|
def test_collect_lldp_data_decode_exception(self, mock_lldp_info,
|
|
|
|
mock_hexlify, mock_log):
|
|
|
|
if_names = ['eth0', 'lo']
|
|
|
|
mock_lldp_info.return_value = {if_names[0]: [
|
|
|
|
(0, b''),
|
|
|
|
(1, b'foo\x01'),
|
|
|
|
(2, b'\x02bar')],
|
|
|
|
}
|
|
|
|
mock_hexlify.side_effect = [
|
|
|
|
b'',
|
|
|
|
b'666f6f01',
|
|
|
|
binascii.Error('fake_error')
|
|
|
|
]
|
|
|
|
expected_lldp_data = {
|
|
|
|
'eth0': [
|
|
|
|
(0, ''),
|
|
|
|
(1, '666f6f01')],
|
|
|
|
}
|
|
|
|
result = self.hardware.collect_lldp_data(if_names)
|
|
|
|
mock_log.warning.assert_called_once()
|
2018-09-17 11:10:40 +08:00
|
|
|
self.assertIn(if_names[0], result)
|
2016-11-23 17:14:15 +01:00
|
|
|
self.assertEqual(expected_lldp_data, result)
|
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
2014-01-27 18:11:44 -08:00
|
|
|
def test_list_network_interfaces(self,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute,
|
2014-01-27 18:11:44 -08:00
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
2015-08-06 13:03:27 +02:00
|
|
|
mocked_listdir,
|
2016-11-17 02:08:13 +02:00
|
|
|
mocked_ifaddresses,
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2014-01-27 17:51:06 -08:00
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
2014-01-27 18:11:44 -08:00
|
|
|
mocked_exists.side_effect = [False, True]
|
2014-04-09 17:26:54 -07:00
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = ['1']
|
2015-08-06 13:03:27 +02:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2015-08-06 13:03:27 +02:00
|
|
|
}
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_get_mac.mock_has_carrier = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
2014-01-27 17:51:06 -08:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-08-29 10:07:14 +07:00
|
|
|
self.assertIsNone(interfaces[0].lldp)
|
2016-05-24 17:46:49 +01:00
|
|
|
self.assertTrue(interfaces[0].has_carrier)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
def test_list_network_interfaces_with_biosdevname(self,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute,
|
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_ifaddresses,
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = ['1']
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2016-11-10 21:29:40 +00:00
|
|
|
}
|
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
|
|
|
mock_has_carrier.return_value = True
|
2016-11-10 21:29:40 +00:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertIsNone(interfaces[0].lldp)
|
|
|
|
self.assertTrue(interfaces[0].has_carrier)
|
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bios_given_nic_name_ok(self, mock_execute):
|
|
|
|
interface_name = 'eth0'
|
|
|
|
mock_execute.return_value = ('em0\n', '')
|
|
|
|
result = self.hardware.get_bios_given_nic_name(interface_name)
|
|
|
|
self.assertEqual('em0', result)
|
|
|
|
mock_execute.assert_called_once_with('biosdevname', '-i',
|
|
|
|
interface_name)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bios_given_nic_name_oserror(self, mock_execute):
|
|
|
|
interface_name = 'eth0'
|
|
|
|
mock_execute.side_effect = OSError()
|
|
|
|
result = self.hardware.get_bios_given_nic_name(interface_name)
|
|
|
|
self.assertIsNone(result)
|
|
|
|
mock_execute.assert_called_once_with('biosdevname', '-i',
|
|
|
|
interface_name)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
|
|
|
def test_get_bios_given_nic_name_process_exec_err4(self, mock_log,
|
|
|
|
mock_execute):
|
|
|
|
interface_name = 'eth0'
|
|
|
|
mock_execute.side_effect = [
|
|
|
|
processutils.ProcessExecutionError(exit_code=4)]
|
|
|
|
|
|
|
|
result = self.hardware.get_bios_given_nic_name(interface_name)
|
|
|
|
|
|
|
|
mock_log.info.assert_called_once_with(
|
|
|
|
'The system is a virtual machine, so biosdevname utility does '
|
|
|
|
'not provide names for virtual NICs.')
|
|
|
|
self.assertIsNone(result)
|
|
|
|
mock_execute.assert_called_once_with('biosdevname', '-i',
|
|
|
|
interface_name)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
|
|
|
def test_get_bios_given_nic_name_process_exec_err3(self, mock_log,
|
|
|
|
mock_execute):
|
|
|
|
interface_name = 'eth0'
|
|
|
|
mock_execute.side_effect = [
|
|
|
|
processutils.ProcessExecutionError(exit_code=3)]
|
|
|
|
|
|
|
|
result = self.hardware.get_bios_given_nic_name(interface_name)
|
|
|
|
|
|
|
|
mock_log.warning.assert_called_once_with(
|
|
|
|
'Biosdevname returned exit code %s', 3)
|
|
|
|
self.assertIsNone(result)
|
|
|
|
mock_execute.assert_called_once_with('biosdevname', '-i',
|
|
|
|
interface_name)
|
2016-05-24 17:46:49 +01:00
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
2016-05-24 17:46:49 +01:00
|
|
|
def test_list_network_interfaces_with_lldp(self,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute,
|
2016-05-24 17:46:49 +01:00
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_ifaddresses,
|
2016-11-17 02:08:13 +02:00
|
|
|
mocked_lldp_info,
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2016-05-24 17:46:49 +01:00
|
|
|
CONF.set_override('collect_lldp', True)
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = ['1']
|
2016-05-24 17:46:49 +01:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2016-05-24 17:46:49 +01:00
|
|
|
}
|
|
|
|
mocked_lldp_info.return_value = {'eth0': [
|
|
|
|
(0, b''),
|
|
|
|
(1, b'\x04\x88Z\x92\xecTY'),
|
|
|
|
(2, b'\x05Ethernet1/18'),
|
|
|
|
(3, b'\x00x')]
|
|
|
|
}
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier.return_value = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2016-05-24 17:46:49 +01:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-05-24 17:46:49 +01:00
|
|
|
expected_lldp_info = [
|
|
|
|
(0, ''),
|
|
|
|
(1, '04885a92ec5459'),
|
|
|
|
(2, '0545746865726e6574312f3138'),
|
|
|
|
(3, '0078'),
|
|
|
|
]
|
|
|
|
self.assertEqual(expected_lldp_info, interfaces[0].lldp)
|
|
|
|
self.assertTrue(interfaces[0].has_carrier)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
2016-05-24 17:46:49 +01:00
|
|
|
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-05-24 17:46:49 +01:00
|
|
|
def test_list_network_interfaces_with_lldp_error(
|
2016-11-10 21:29:40 +00:00
|
|
|
self, mocked_execute, mocked_open, mocked_exists, mocked_listdir,
|
2020-01-21 15:23:18 +01:00
|
|
|
mocked_ifaddresses, mocked_lldp_info, mockedget_managers,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_get_mac, mock_has_carrier):
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2016-05-24 17:46:49 +01:00
|
|
|
CONF.set_override('collect_lldp', True)
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = ['1']
|
2016-05-24 17:46:49 +01:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2016-05-24 17:46:49 +01:00
|
|
|
}
|
|
|
|
mocked_lldp_info.side_effect = Exception('Boom!')
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier.return_value = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
2016-05-24 17:46:49 +01:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-08-29 10:07:14 +07:00
|
|
|
self.assertIsNone(interfaces[0].lldp)
|
2016-04-01 16:36:01 +02:00
|
|
|
self.assertTrue(interfaces[0].has_carrier)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
2016-04-01 16:36:01 +02:00
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
2016-04-01 16:36:01 +02:00
|
|
|
def test_list_network_interfaces_no_carrier(self,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute,
|
2016-04-01 16:36:01 +02:00
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
2016-11-17 02:08:13 +02:00
|
|
|
mocked_ifaddresses,
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers):
|
2016-11-17 02:08:13 +02:00
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2016-04-01 16:36:01 +02:00
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = [OSError('boom')]
|
2016-04-01 16:36:01 +02:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2016-04-01 16:36:01 +02:00
|
|
|
}
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier.return_value = False
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
2016-04-01 16:36:01 +02:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-04-01 16:36:01 +02:00
|
|
|
self.assertFalse(interfaces[0].has_carrier)
|
2016-02-11 14:00:01 +02:00
|
|
|
self.assertIsNone(interfaces[0].vendor)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
2016-02-11 14:00:01 +02:00
|
|
|
|
2020-01-21 15:23:18 +01:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
2019-11-28 17:10:40 +01:00
|
|
|
@mock.patch('builtins.open', autospec=True)
|
2016-11-10 21:29:40 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2018-03-13 16:06:55 +02:00
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
2016-02-11 14:00:01 +02:00
|
|
|
def test_list_network_interfaces_with_vendor_info(self,
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute,
|
2016-02-11 14:00:01 +02:00
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
2016-11-17 02:08:13 +02:00
|
|
|
mocked_ifaddresses,
|
2020-01-21 15:23:18 +01:00
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
2016-02-11 14:00:01 +02:00
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
|
|
|
mac = '00:0c:29:8c:11:b1'
|
2018-03-13 16:06:55 +02:00
|
|
|
read_mock.side_effect = ['0x15b3\n', '0x1014\n']
|
2016-02-11 14:00:01 +02:00
|
|
|
mocked_ifaddresses.return_value = {
|
2018-01-21 22:44:33 +08:00
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
2016-02-11 14:00:01 +02:00
|
|
|
}
|
2016-11-10 21:29:40 +00:00
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
2018-03-13 16:06:55 +02:00
|
|
|
mock_has_carrier.return_value = True
|
|
|
|
mock_get_mac.return_value = mac
|
2016-02-11 14:00:01 +02:00
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(1, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual(mac, interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
2018-01-21 22:44:33 +08:00
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
2016-02-11 14:00:01 +02:00
|
|
|
self.assertTrue(interfaces[0].has_carrier)
|
|
|
|
self.assertEqual('0x15b3', interfaces[0].vendor)
|
|
|
|
self.assertEqual('0x1014', interfaces[0].product)
|
2016-11-10 21:29:40 +00:00
|
|
|
self.assertEqual('em0', interfaces[0].biosdevname)
|
2014-01-27 17:51:06 -08:00
|
|
|
|
2020-10-30 10:32:45 -04:00
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
|
|
|
@mock.patch('builtins.open', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
|
|
|
def test_list_network_vlan_interfaces(self,
|
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
|
|
|
mocked_execute,
|
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_ifaddresses,
|
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
|
|
|
CONF.set_override('enable_vlan_interfaces', 'eth0.100')
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True, False]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
|
|
|
read_mock.side_effect = ['1']
|
|
|
|
mocked_ifaddresses.return_value = {
|
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
|
|
|
}
|
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
|
|
|
mock_get_mac.mock_has_carrier = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(2, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
self.assertEqual('192.168.1.2', interfaces[0].ipv4_address)
|
|
|
|
self.assertEqual('fd00::101', interfaces[0].ipv6_address)
|
|
|
|
self.assertIsNone(interfaces[0].lldp)
|
|
|
|
self.assertEqual('eth0.100', interfaces[1].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[1].mac_address)
|
|
|
|
self.assertIsNone(interfaces[1].lldp)
|
|
|
|
|
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
|
|
|
@mock.patch('builtins.open', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
|
|
|
def test_list_network_vlan_interfaces_using_lldp(self,
|
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
|
|
|
mocked_execute,
|
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_ifaddresses,
|
|
|
|
mocked_lldp_info,
|
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
|
|
|
CONF.set_override('collect_lldp', True)
|
|
|
|
CONF.set_override('enable_vlan_interfaces', 'eth0')
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
|
|
|
mocked_exists.side_effect = [False, True, False]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
|
|
|
read_mock.side_effect = ['1']
|
|
|
|
mocked_lldp_info.return_value = {'eth0': [
|
|
|
|
(0, b''),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00d\x08vlan-100'),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00e\x08vlan-101')]
|
|
|
|
}
|
|
|
|
mock_has_carrier.return_value = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(3, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[0].mac_address)
|
|
|
|
expected_lldp_info = [
|
|
|
|
(0, ''),
|
|
|
|
(127, "0080c203006408766c616e2d313030"),
|
|
|
|
(127, "0080c203006508766c616e2d313031")
|
|
|
|
]
|
|
|
|
self.assertEqual(expected_lldp_info, interfaces[0].lldp)
|
|
|
|
self.assertEqual('eth0.100', interfaces[1].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[1].mac_address)
|
|
|
|
self.assertIsNone(interfaces[1].lldp)
|
|
|
|
self.assertEqual('eth0.101', interfaces[2].name)
|
|
|
|
self.assertEqual('00:0c:29:8c:11:b1', interfaces[2].mac_address)
|
|
|
|
self.assertIsNone(interfaces[2].lldp)
|
|
|
|
|
|
|
|
@mock.patch.object(netutils, 'LOG', autospec=True)
|
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
|
|
|
@mock.patch('netifaces.ifaddresses', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
|
|
|
@mock.patch('builtins.open', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'interface_has_carrier', autospec=True)
|
|
|
|
def test_list_network_vlan_invalid_int(self,
|
|
|
|
mock_has_carrier,
|
|
|
|
mock_get_mac,
|
|
|
|
mocked_execute,
|
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_ifaddresses,
|
|
|
|
mockedget_managers,
|
|
|
|
mocked_log):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
|
|
|
CONF.set_override('collect_lldp', True)
|
|
|
|
CONF.set_override('enable_vlan_interfaces', 'enp0s1')
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0']
|
|
|
|
mocked_exists.side_effect = [False, True, False]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
|
|
|
read_mock.side_effect = ['1']
|
|
|
|
mocked_ifaddresses.return_value = {
|
|
|
|
netifaces.AF_INET: [{'addr': '192.168.1.2'}],
|
|
|
|
netifaces.AF_INET6: [{'addr': 'fd00::101'}]
|
|
|
|
}
|
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
|
|
|
mock_get_mac.mock_has_carrier = True
|
|
|
|
mock_get_mac.return_value = '00:0c:29:8c:11:b1'
|
|
|
|
|
|
|
|
self.hardware.list_network_interfaces()
|
|
|
|
mocked_log.warning.assert_called_once_with(
|
|
|
|
'Provided interface name %s was not found', 'enp0s1')
|
|
|
|
|
|
|
|
@mock.patch('ironic_python_agent.hardware.get_managers', autospec=True)
|
|
|
|
@mock.patch('ironic_python_agent.netutils.get_lldp_info', autospec=True)
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch('os.path.exists', autospec=True)
|
|
|
|
@mock.patch('builtins.open', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(netutils, 'get_mac_addr', autospec=True)
|
|
|
|
def test_list_network_vlan_interfaces_using_lldp_all(self,
|
|
|
|
mock_get_mac,
|
|
|
|
mocked_execute,
|
|
|
|
mocked_open,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_listdir,
|
|
|
|
mocked_lldp_info,
|
|
|
|
mockedget_managers):
|
|
|
|
mockedget_managers.return_value = [hardware.GenericHardwareManager()]
|
|
|
|
CONF.set_override('collect_lldp', True)
|
|
|
|
CONF.set_override('enable_vlan_interfaces', 'all')
|
|
|
|
mocked_listdir.return_value = ['lo', 'eth0', 'eth1']
|
|
|
|
mocked_execute.return_value = ('em0\n', '')
|
|
|
|
mocked_exists.side_effect = [False, True, True]
|
|
|
|
mocked_open.return_value.__enter__ = lambda s: s
|
|
|
|
mocked_open.return_value.__exit__ = mock.Mock()
|
|
|
|
read_mock = mocked_open.return_value.read
|
|
|
|
read_mock.side_effect = ['1']
|
|
|
|
mocked_lldp_info.return_value = {'eth0': [
|
|
|
|
(0, b''),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00d\x08vlan-100'),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00e\x08vlan-101')],
|
|
|
|
'eth1': [
|
|
|
|
(0, b''),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00f\x08vlan-102'),
|
|
|
|
(127, b'\x00\x80\xc2\x03\x00g\x08vlan-103')]
|
|
|
|
}
|
|
|
|
|
|
|
|
interfaces = self.hardware.list_network_interfaces()
|
|
|
|
self.assertEqual(6, len(interfaces))
|
|
|
|
self.assertEqual('eth0', interfaces[0].name)
|
|
|
|
self.assertEqual('eth1', interfaces[1].name)
|
|
|
|
self.assertEqual('eth0.100', interfaces[2].name)
|
|
|
|
self.assertEqual('eth0.101', interfaces[3].name)
|
|
|
|
self.assertEqual('eth1.102', interfaces[4].name)
|
|
|
|
self.assertEqual('eth1.103', interfaces[5].name)
|
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2017-08-28 16:59:39 +02:00
|
|
|
def test_get_os_install_device(self, mocked_execute, mock_cached_node,
|
|
|
|
mocked_listdir, mocked_readlink):
|
2017-09-27 15:42:13 +02:00
|
|
|
mocked_readlink.return_value = '../../sda'
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_listdir.return_value = ['1:0:0:0']
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.return_value = None
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE, '')
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual('/dev/sdb', self.hardware.get_os_install_device())
|
2014-07-18 08:13:12 -07:00
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2018-08-16 10:57:59 -07:00
|
|
|
mock_cached_node.assert_called_once_with()
|
|
|
|
|
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_os_install_device_raid(self, mocked_execute,
|
|
|
|
mock_cached_node, mocked_listdir,
|
|
|
|
mocked_readlink):
|
|
|
|
# NOTE(TheJulia): The readlink and listdir mocks are just to satisfy
|
|
|
|
# what is functionally an available path check and that information
|
|
|
|
# is stored in the returned result for use by root device hints.
|
|
|
|
mocked_readlink.side_effect = '../../sda'
|
|
|
|
mocked_listdir.return_value = ['1:0:0:0']
|
|
|
|
mock_cached_node.return_value = None
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.RAID_BLK_DEVICE_TEMPLATE, '')
|
2018-08-16 10:57:59 -07:00
|
|
|
# This should ideally select the smallest device and in theory raid
|
|
|
|
# should always be smaller
|
|
|
|
self.assertEqual('/dev/md0', self.hardware.get_os_install_device())
|
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.assert_called_once_with()
|
2014-01-28 11:25:00 -08:00
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-05-19 10:48:02 +01:00
|
|
|
def test_get_os_install_device_fails(self, mocked_execute,
|
2017-08-28 16:59:39 +02:00
|
|
|
mock_cached_node,
|
|
|
|
mocked_listdir, mocked_readlink):
|
2015-09-02 15:21:04 +00:00
|
|
|
"""Fail to find device >=4GB w/o root device hints"""
|
2017-09-27 15:42:13 +02:00
|
|
|
mocked_readlink.return_value = '../../sda'
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_listdir.return_value = ['1:0:0:0']
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.return_value = None
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE_SMALL, '')
|
2015-09-02 15:21:04 +00:00
|
|
|
ex = self.assertRaises(errors.DeviceNotFound,
|
|
|
|
self.hardware.get_os_install_device)
|
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2015-09-02 15:21:04 +00:00
|
|
|
self.assertIn(str(4 * units.Gi), ex.details)
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.assert_called_once_with()
|
2015-09-02 15:21:04 +00:00
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
2016-01-19 13:36:26 +00:00
|
|
|
def _get_os_install_device_root_device_hints(self, hints, expected_device,
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node, mock_dev):
|
2017-03-09 14:02:06 +00:00
|
|
|
mock_cached_node.return_value = {'properties': {'root_device': hints},
|
2020-01-03 17:29:05 +01:00
|
|
|
'uuid': 'node1',
|
|
|
|
'instance_info': {}}
|
2015-03-10 11:20:40 +00:00
|
|
|
model = 'fastable sd131 7'
|
2015-09-18 12:52:33 +02:00
|
|
|
mock_dev.return_value = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn0',
|
2015-11-18 09:53:54 +00:00
|
|
|
wwn_with_extension='wwn0ven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
2015-09-18 12:52:33 +02:00
|
|
|
serial='serial0'),
|
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model=model,
|
|
|
|
size=10737418240,
|
2016-07-06 14:16:16 +01:00
|
|
|
rotational=True,
|
2015-09-18 12:52:33 +02:00
|
|
|
vendor='fake-vendor',
|
|
|
|
wwn='fake-wwn',
|
2015-11-18 09:53:54 +00:00
|
|
|
wwn_with_extension='fake-wwnven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
2017-08-28 16:59:39 +02:00
|
|
|
serial='fake-serial',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:0'),
|
2015-09-18 12:52:33 +02:00
|
|
|
]
|
2015-03-10 11:20:40 +00:00
|
|
|
|
2016-01-19 13:36:26 +00:00
|
|
|
self.assertEqual(expected_device,
|
|
|
|
self.hardware.get_os_install_device())
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.assert_called_once_with()
|
2015-09-18 12:52:33 +02:00
|
|
|
mock_dev.assert_called_once_with()
|
2015-03-10 11:20:40 +00:00
|
|
|
|
2016-01-19 13:36:26 +00:00
|
|
|
def test_get_os_install_device_root_device_hints_model(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'model': 'fastable sd131 7'}, '/dev/sdb')
|
|
|
|
|
|
|
|
def test_get_os_install_device_root_device_hints_wwn(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'wwn': 'wwn0'}, '/dev/sda')
|
|
|
|
|
|
|
|
def test_get_os_install_device_root_device_hints_serial(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'serial': 'serial0'}, '/dev/sda')
|
|
|
|
|
|
|
|
def test_get_os_install_device_root_device_hints_size(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'size': 10}, '/dev/sdb')
|
|
|
|
|
2016-07-07 09:38:17 +01:00
|
|
|
def test_get_os_install_device_root_device_hints_size_str(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'size': '10'}, '/dev/sdb')
|
|
|
|
|
2016-09-07 14:10:11 +01:00
|
|
|
def test_get_os_install_device_root_device_hints_size_not_int(self):
|
2016-07-07 09:38:17 +01:00
|
|
|
self.assertRaises(errors.DeviceNotFound,
|
|
|
|
self._get_os_install_device_root_device_hints,
|
|
|
|
{'size': 'not-int'}, '/dev/sdb')
|
|
|
|
|
2016-01-19 13:36:26 +00:00
|
|
|
def test_get_os_install_device_root_device_hints_vendor(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'vendor': 'fake-vendor'}, '/dev/sdb')
|
|
|
|
|
|
|
|
def test_get_os_install_device_root_device_hints_name(self):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'name': '/dev/sdb'}, '/dev/sdb')
|
|
|
|
|
2016-07-06 14:16:16 +01:00
|
|
|
def test_get_os_install_device_root_device_hints_rotational(self):
|
|
|
|
for value in (True, 'true', 'on', 'y', 'yes'):
|
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'rotational': value}, '/dev/sdb')
|
|
|
|
|
2017-09-27 15:42:13 +02:00
|
|
|
def test_get_os_install_device_root_device_hints_by_path(self):
|
2017-08-28 16:59:39 +02:00
|
|
|
self._get_os_install_device_root_device_hints(
|
|
|
|
{'by_path': '/dev/disk/by-path/1:0:0:0'}, '/dev/sdb')
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
2015-09-18 12:52:33 +02:00
|
|
|
def test_get_os_install_device_root_device_hints_no_device_found(
|
2016-05-19 10:48:02 +01:00
|
|
|
self, mock_cached_node, mock_dev):
|
2015-09-18 12:52:33 +02:00
|
|
|
model = 'fastable sd131 7'
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.return_value = {
|
2020-01-03 17:29:05 +01:00
|
|
|
'instance_info': {},
|
2016-05-19 10:48:02 +01:00
|
|
|
'properties': {
|
|
|
|
'root_device': {
|
|
|
|
'model': model,
|
|
|
|
'wwn': 'fake-wwn',
|
|
|
|
'serial': 'fake-serial',
|
|
|
|
'vendor': 'fake-vendor',
|
|
|
|
'size': 10}}}
|
2015-09-18 12:52:33 +02:00
|
|
|
# Model is different here
|
|
|
|
mock_dev.return_value = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn0',
|
|
|
|
serial='serial0'),
|
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model='Another Model',
|
|
|
|
size=10737418240,
|
|
|
|
rotational=False,
|
|
|
|
vendor='fake-vendor',
|
|
|
|
wwn='fake-wwn',
|
|
|
|
serial='fake-serial'),
|
|
|
|
]
|
2015-03-10 11:20:40 +00:00
|
|
|
self.assertRaises(errors.DeviceNotFound,
|
|
|
|
self.hardware.get_os_install_device)
|
2016-05-19 10:48:02 +01:00
|
|
|
mock_cached_node.assert_called_once_with()
|
2015-09-18 12:52:33 +02:00
|
|
|
mock_dev.assert_called_once_with()
|
2015-03-10 11:20:40 +00:00
|
|
|
|
2020-01-03 17:29:05 +01:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
def test_get_os_install_device_root_device_hints_iinfo(self,
|
|
|
|
mock_cached_node,
|
|
|
|
mock_dev):
|
|
|
|
model = 'fastable sd131 7'
|
|
|
|
mock_cached_node.return_value = {
|
|
|
|
'instance_info': {'root_device': {'model': model}},
|
|
|
|
'uuid': 'node1'
|
|
|
|
}
|
|
|
|
mock_dev.return_value = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn0',
|
|
|
|
wwn_with_extension='wwn0ven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
|
|
|
serial='serial0'),
|
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model=model,
|
|
|
|
size=10737418240,
|
|
|
|
rotational=True,
|
|
|
|
vendor='fake-vendor',
|
|
|
|
wwn='fake-wwn',
|
|
|
|
wwn_with_extension='fake-wwnven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
|
|
|
serial='fake-serial',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:0'),
|
|
|
|
]
|
|
|
|
|
|
|
|
self.assertEqual('/dev/sdb', self.hardware.get_os_install_device())
|
|
|
|
mock_cached_node.assert_called_once_with()
|
|
|
|
mock_dev.assert_called_once_with()
|
|
|
|
|
2020-08-19 18:44:39 -07:00
|
|
|
@mock.patch.object(hardware, 'update_cached_node', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
def test_get_os_install_device_no_root_device(self, mock_cached_node,
|
|
|
|
mock_dev,
|
|
|
|
mock_update):
|
|
|
|
mock_cached_node.return_value = {'properties': {},
|
|
|
|
'uuid': 'node1',
|
|
|
|
'instance_info': {}}
|
|
|
|
mock_dev.return_value = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn0',
|
|
|
|
wwn_with_extension='wwn0ven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
|
|
|
serial='serial0'),
|
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model='magical disk',
|
|
|
|
size=10737418240,
|
|
|
|
rotational=True,
|
|
|
|
vendor='fake-vendor',
|
|
|
|
wwn='fake-wwn',
|
|
|
|
wwn_with_extension='fake-wwnven0',
|
|
|
|
wwn_vendor_extension='ven0',
|
|
|
|
serial='fake-serial',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:0'),
|
|
|
|
]
|
|
|
|
mock_update.return_value = {'properties': {'root_device':
|
|
|
|
{'name': '/dev/sda'}},
|
|
|
|
'uuid': 'node1',
|
|
|
|
'instance_info': {'magic': 'value'}}
|
|
|
|
self.assertEqual('/dev/sda',
|
|
|
|
self.hardware.get_os_install_device(
|
|
|
|
permit_refresh=True))
|
|
|
|
self.assertEqual(1, mock_cached_node.call_count)
|
|
|
|
mock_dev.assert_called_once_with()
|
|
|
|
|
2016-02-11 14:00:01 +02:00
|
|
|
def test__get_device_info(self):
|
2015-03-10 11:20:40 +00:00
|
|
|
fileobj = mock.mock_open(read_data='fake-vendor')
|
2016-02-17 14:08:42 +02:00
|
|
|
with mock.patch(
|
2019-11-28 17:10:40 +01:00
|
|
|
'builtins.open', fileobj, create=True) as mock_open:
|
2016-02-11 14:00:01 +02:00
|
|
|
vendor = hardware._get_device_info(
|
|
|
|
'/dev/sdfake', 'block', 'vendor')
|
2015-03-10 11:20:40 +00:00
|
|
|
mock_open.assert_called_once_with(
|
|
|
|
'/sys/class/block/sdfake/device/vendor', 'r')
|
|
|
|
self.assertEqual('fake-vendor', vendor)
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_cpus(self, mocked_execute, mte):
|
|
|
|
mocked_execute.return_value = (hws.LSCPU_OUTPUT, '')
|
|
|
|
mte.return_value = (hws.CPUINFO_FLAGS_OUTPUT, '')
|
2014-04-09 17:09:51 -07:00
|
|
|
|
2015-08-06 13:03:27 +02:00
|
|
|
cpus = self.hardware.get_cpus()
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual('Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz',
|
|
|
|
cpus.model_name)
|
|
|
|
self.assertEqual('2400.0000', cpus.frequency)
|
|
|
|
self.assertEqual(4, cpus.count)
|
|
|
|
self.assertEqual('x86_64', cpus.architecture)
|
2016-04-20 13:58:14 +02:00
|
|
|
self.assertEqual(['fpu', 'vme', 'de', 'pse'], cpus.flags)
|
2015-08-06 13:03:27 +02:00
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_cpus2(self, mocked_execute, mte):
|
|
|
|
mocked_execute.return_value = (hws.LSCPU_OUTPUT_NO_MAX_MHZ, '')
|
|
|
|
mte.return_value = (hws.CPUINFO_FLAGS_OUTPUT, '')
|
2014-04-09 17:09:51 -07:00
|
|
|
|
|
|
|
cpus = self.hardware.get_cpus()
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual('Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz',
|
|
|
|
cpus.model_name)
|
|
|
|
self.assertEqual('1794.433', cpus.frequency)
|
|
|
|
self.assertEqual(12, cpus.count)
|
|
|
|
self.assertEqual('x86_64', cpus.architecture)
|
2016-04-20 13:58:14 +02:00
|
|
|
self.assertEqual(['fpu', 'vme', 'de', 'pse'], cpus.flags)
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_cpus_no_flags(self, mocked_execute, mte):
|
|
|
|
mocked_execute.return_value = (hws.LSCPU_OUTPUT, '')
|
|
|
|
mte.side_effect = processutils.ProcessExecutionError()
|
2016-04-20 13:58:14 +02:00
|
|
|
|
|
|
|
cpus = self.hardware.get_cpus()
|
|
|
|
self.assertEqual('Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz',
|
|
|
|
cpus.model_name)
|
|
|
|
self.assertEqual('2400.0000', cpus.frequency)
|
|
|
|
self.assertEqual(4, cpus.count)
|
|
|
|
self.assertEqual('x86_64', cpus.architecture)
|
|
|
|
self.assertEqual([], cpus.flags)
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_cpus_illegal_flags(self, mocked_execute, mte):
|
|
|
|
mocked_execute.return_value = (hws.LSCPU_OUTPUT, '')
|
|
|
|
mte.return_value = ('I am not a flag', '')
|
2016-04-20 13:58:14 +02:00
|
|
|
|
|
|
|
cpus = self.hardware.get_cpus()
|
|
|
|
self.assertEqual('Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz',
|
|
|
|
cpus.model_name)
|
|
|
|
self.assertEqual('2400.0000', cpus.frequency)
|
|
|
|
self.assertEqual(4, cpus.count)
|
|
|
|
self.assertEqual('x86_64', cpus.architecture)
|
|
|
|
self.assertEqual([], cpus.flags)
|
2015-08-06 13:03:27 +02:00
|
|
|
|
2017-01-24 22:59:46 +00:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
2017-01-25 08:52:46 -08:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-03-27 22:38:59 +11:00
|
|
|
def test_get_memory_psutil_v1(self, mocked_execute, mocked_psutil):
|
2017-01-25 08:52:46 -08:00
|
|
|
mocked_psutil.return_value.total = 3952 * 1024 * 1024
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_V1
|
2015-08-06 13:03:27 +02:00
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual(3952 * 1024 * 1024, mem.total)
|
|
|
|
self.assertEqual(4096, mem.physical_mb)
|
2014-04-09 17:09:51 -07:00
|
|
|
|
2017-01-25 08:52:46 -08:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-03-27 22:38:59 +11:00
|
|
|
def test_get_memory_psutil_v2(self, mocked_execute, mocked_psutil):
|
|
|
|
mocked_psutil.return_value.total = 3952 * 1024 * 1024
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_V2
|
2019-03-27 22:38:59 +11:00
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertEqual(3952 * 1024 * 1024, mem.total)
|
|
|
|
self.assertEqual(65536, mem.physical_mb)
|
|
|
|
|
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_memory_psutil_exception_v1(self, mocked_execute,
|
|
|
|
mocked_psutil):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_V1
|
2017-01-25 08:52:46 -08:00
|
|
|
mocked_psutil.side_effect = AttributeError()
|
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertIsNone(mem.total)
|
|
|
|
self.assertEqual(4096, mem.physical_mb)
|
|
|
|
|
2019-03-27 22:38:59 +11:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_memory_psutil_exception_v2(self, mocked_execute,
|
|
|
|
mocked_psutil):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_V2
|
2019-03-27 22:38:59 +11:00
|
|
|
mocked_psutil.side_effect = AttributeError()
|
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertIsNone(mem.total)
|
|
|
|
self.assertEqual(65536, mem.physical_mb)
|
|
|
|
|
2017-11-30 16:30:42 +00:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_memory_lshw_exception(self, mocked_execute, mocked_psutil):
|
|
|
|
mocked_execute.side_effect = OSError()
|
|
|
|
mocked_psutil.return_value.total = 3952 * 1024 * 1024
|
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertEqual(3952 * 1024 * 1024, mem.total)
|
|
|
|
self.assertIsNone(mem.physical_mb)
|
|
|
|
|
2019-07-24 16:03:33 +08:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_memory_arm64_lshw(self, mocked_execute, mocked_psutil):
|
|
|
|
mocked_psutil.return_value.total = 3952 * 1024 * 1024
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_ARM64
|
2019-07-24 16:03:33 +08:00
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertEqual(3952 * 1024 * 1024, mem.total)
|
|
|
|
self.assertEqual(3952, mem.physical_mb)
|
|
|
|
|
2020-11-06 19:03:56 +01:00
|
|
|
@mock.patch('psutil.virtual_memory', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_memory_lshw_list(self, mocked_execute, mocked_psutil):
|
|
|
|
mocked_psutil.return_value.total = 3952 * 1024 * 1024
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (f"[{hws.LSHW_JSON_OUTPUT_V2[0]}]", "")
|
2020-11-06 19:03:56 +01:00
|
|
|
mem = self.hardware.get_memory()
|
|
|
|
|
|
|
|
self.assertEqual(3952 * 1024 * 1024, mem.total)
|
|
|
|
self.assertEqual(65536, mem.physical_mb)
|
|
|
|
|
2019-06-07 12:11:08 -04:00
|
|
|
@mock.patch('ironic_python_agent.netutils.get_hostname', autospec=True)
|
|
|
|
def test_list_hardware_info(self, mocked_get_hostname):
|
2014-01-28 11:25:00 -08:00
|
|
|
self.hardware.list_network_interfaces = mock.Mock()
|
|
|
|
self.hardware.list_network_interfaces.return_value = [
|
|
|
|
hardware.NetworkInterface('eth0', '00:0c:29:8c:11:b1'),
|
|
|
|
hardware.NetworkInterface('eth1', '00:0c:29:8c:11:b2'),
|
|
|
|
]
|
|
|
|
|
2014-04-09 17:09:51 -07:00
|
|
|
self.hardware.get_cpus = mock.Mock()
|
|
|
|
self.hardware.get_cpus.return_value = hardware.CPU(
|
|
|
|
'Awesome CPU x14 9001',
|
|
|
|
9001,
|
2015-08-06 13:03:27 +02:00
|
|
|
14,
|
|
|
|
'x86_64')
|
2014-04-09 17:09:51 -07:00
|
|
|
|
|
|
|
self.hardware.get_memory = mock.Mock()
|
|
|
|
self.hardware.get_memory.return_value = hardware.Memory(1017012)
|
|
|
|
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [
|
2014-07-18 08:13:12 -07:00
|
|
|
hardware.BlockDevice('/dev/sdj', 'big', 1073741824, True),
|
|
|
|
hardware.BlockDevice('/dev/hdaa', 'small', 65535, False),
|
2014-04-09 17:09:51 -07:00
|
|
|
]
|
|
|
|
|
2016-04-20 13:58:14 +02:00
|
|
|
self.hardware.get_boot_info = mock.Mock()
|
|
|
|
self.hardware.get_boot_info.return_value = hardware.BootInfo(
|
|
|
|
current_boot_mode='bios', pxe_interface='boot:if')
|
|
|
|
|
2017-04-03 15:32:44 +10:00
|
|
|
self.hardware.get_bmc_address = mock.Mock()
|
2019-03-04 15:31:14 +03:00
|
|
|
self.hardware.get_bmc_v6address = mock.Mock()
|
2017-04-03 15:32:44 +10:00
|
|
|
self.hardware.get_system_vendor_info = mock.Mock()
|
|
|
|
|
2019-06-07 12:11:08 -04:00
|
|
|
mocked_get_hostname.return_value = 'mock_hostname'
|
|
|
|
|
2014-01-28 11:25:00 -08:00
|
|
|
hardware_info = self.hardware.list_hardware_info()
|
2016-01-12 09:03:19 +00:00
|
|
|
self.assertEqual(self.hardware.get_memory(), hardware_info['memory'])
|
|
|
|
self.assertEqual(self.hardware.get_cpus(), hardware_info['cpu'])
|
|
|
|
self.assertEqual(self.hardware.list_block_devices(),
|
|
|
|
hardware_info['disks'])
|
|
|
|
self.assertEqual(self.hardware.list_network_interfaces(),
|
|
|
|
hardware_info['interfaces'])
|
2016-04-20 13:58:14 +02:00
|
|
|
self.assertEqual(self.hardware.get_boot_info(),
|
|
|
|
hardware_info['boot'])
|
2019-06-07 12:11:08 -04:00
|
|
|
self.assertEqual('mock_hostname', hardware_info['hostname'])
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
2015-08-07 15:41:30 -07:00
|
|
|
def test_list_block_devices(self, list_mock):
|
|
|
|
device = hardware.BlockDevice('/dev/hdaa', 'small', 65535, False)
|
|
|
|
list_mock.return_value = [device]
|
|
|
|
devices = self.hardware.list_block_devices()
|
|
|
|
|
|
|
|
self.assertEqual([device], devices)
|
|
|
|
|
|
|
|
list_mock.assert_called_once_with()
|
|
|
|
|
2018-08-29 22:57:02 -05:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
def test_list_block_devices_including_partitions(self, list_mock):
|
|
|
|
device = hardware.BlockDevice('/dev/hdaa', 'small', 65535, False)
|
|
|
|
partition = hardware.BlockDevice('/dev/hdaa1', '', 32767, False)
|
|
|
|
list_mock.side_effect = [[device], [partition]]
|
|
|
|
devices = self.hardware.list_block_devices(include_partitions=True)
|
|
|
|
|
|
|
|
self.assertEqual([device, partition], devices)
|
|
|
|
|
|
|
|
self.assertEqual([mock.call(), mock.call(block_type='part',
|
|
|
|
ignore_raid=True)],
|
|
|
|
list_mock.call_args_list)
|
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_get_device_info', autospec=True)
|
2019-07-08 18:42:16 +02:00
|
|
|
@mock.patch.object(pyudev.Devices, 'from_device_file', autospec=False)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2015-09-18 12:52:33 +02:00
|
|
|
def test_list_all_block_device(self, mocked_execute, mocked_udev,
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_dev_vendor, mock_listdir,
|
|
|
|
mock_readlink):
|
|
|
|
by_path_map = {
|
2017-09-27 15:42:13 +02:00
|
|
|
'/dev/disk/by-path/1:0:0:0': '../../dev/sda',
|
|
|
|
'/dev/disk/by-path/1:0:0:1': '../../dev/sdb',
|
|
|
|
'/dev/disk/by-path/1:0:0:2': '../../dev/sdc',
|
|
|
|
# pretend that the by-path link to ../../dev/sdd is missing
|
2017-08-28 16:59:39 +02:00
|
|
|
}
|
|
|
|
mock_readlink.side_effect = lambda x, m=by_path_map: m[x]
|
|
|
|
mock_listdir.return_value = [os.path.basename(x)
|
|
|
|
for x in sorted(by_path_map)]
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE, '')
|
2020-02-10 14:28:02 -08:00
|
|
|
mocked_udev.side_effect = [pyudev.DeviceNotFoundByFileError(),
|
|
|
|
pyudev.DeviceNotFoundByNumberError('block',
|
|
|
|
1234),
|
|
|
|
pyudev.DeviceNotFoundByFileError(),
|
|
|
|
pyudev.DeviceNotFoundByFileError()]
|
2015-09-18 12:52:33 +02:00
|
|
|
mocked_dev_vendor.return_value = 'Super Vendor'
|
|
|
|
devices = hardware.list_all_block_devices()
|
|
|
|
expected_devices = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
|
|
|
rotational=False,
|
2016-12-07 10:50:45 +00:00
|
|
|
vendor='Super Vendor',
|
2017-08-28 16:59:39 +02:00
|
|
|
hctl='1:0:0:0',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:0'),
|
2015-09-18 12:52:33 +02:00
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model='Fastable SD131 7',
|
|
|
|
size=10737418240,
|
|
|
|
rotational=False,
|
2016-12-07 10:50:45 +00:00
|
|
|
vendor='Super Vendor',
|
2017-08-28 16:59:39 +02:00
|
|
|
hctl='1:0:0:0',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:1'),
|
2015-09-18 12:52:33 +02:00
|
|
|
hardware.BlockDevice(name='/dev/sdc',
|
|
|
|
model='NWD-BLP4-1600',
|
|
|
|
size=1765517033472,
|
|
|
|
rotational=False,
|
2016-12-07 10:50:45 +00:00
|
|
|
vendor='Super Vendor',
|
2017-08-28 16:59:39 +02:00
|
|
|
hctl='1:0:0:0',
|
|
|
|
by_path='/dev/disk/by-path/1:0:0:2'),
|
2015-09-18 12:52:33 +02:00
|
|
|
hardware.BlockDevice(name='/dev/sdd',
|
|
|
|
model='NWD-BLP4-1600',
|
|
|
|
size=1765517033472,
|
|
|
|
rotational=False,
|
2016-12-07 10:50:45 +00:00
|
|
|
vendor='Super Vendor',
|
2017-09-27 15:42:13 +02:00
|
|
|
hctl='1:0:0:0'),
|
2015-09-18 12:52:33 +02:00
|
|
|
]
|
|
|
|
|
2015-12-04 12:33:07 +01:00
|
|
|
self.assertEqual(4, len(devices))
|
2015-09-18 12:52:33 +02:00
|
|
|
for expected, device in zip(expected_devices, devices):
|
|
|
|
# Compare all attrs of the objects
|
|
|
|
for attr in ['name', 'model', 'size', 'rotational',
|
2016-12-07 10:50:45 +00:00
|
|
|
'wwn', 'vendor', 'serial', 'hctl']:
|
2015-09-18 12:52:33 +02:00
|
|
|
self.assertEqual(getattr(expected, attr),
|
|
|
|
getattr(device, attr))
|
2016-12-07 10:50:45 +00:00
|
|
|
expected_calls = [mock.call('/sys/block/%s/device/scsi_device' % dev)
|
|
|
|
for dev in ('sda', 'sdb', 'sdc', 'sdd')]
|
|
|
|
mock_listdir.assert_has_calls(expected_calls)
|
2015-09-18 12:52:33 +02:00
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
expected_calls = [mock.call('/dev/disk/by-path/1:0:0:%d' % dev)
|
2017-09-27 15:42:13 +02:00
|
|
|
for dev in range(3)]
|
2017-08-28 16:59:39 +02:00
|
|
|
mock_readlink.assert_has_calls(expected_calls)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_get_device_info', autospec=True)
|
2019-07-08 18:42:16 +02:00
|
|
|
@mock.patch.object(pyudev.Devices, 'from_device_file', autospec=False)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-12-07 10:50:45 +00:00
|
|
|
def test_list_all_block_device_hctl_fail(self, mocked_execute, mocked_udev,
|
|
|
|
mocked_dev_vendor,
|
|
|
|
mocked_listdir):
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_listdir.side_effect = (OSError, OSError, IndexError)
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE_SMALL, '')
|
2016-12-07 10:50:45 +00:00
|
|
|
mocked_dev_vendor.return_value = 'Super Vendor'
|
|
|
|
devices = hardware.list_all_block_devices()
|
|
|
|
self.assertEqual(2, len(devices))
|
2017-08-28 16:59:39 +02:00
|
|
|
expected_calls = [
|
|
|
|
mock.call('/dev/disk/by-path'),
|
|
|
|
mock.call('/sys/block/sda/device/scsi_device'),
|
|
|
|
mock.call('/sys/block/sdb/device/scsi_device')
|
|
|
|
]
|
|
|
|
self.assertEqual(expected_calls, mocked_listdir.call_args_list)
|
2016-12-07 10:50:45 +00:00
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(os, 'listdir', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_get_device_info', autospec=True)
|
2019-07-08 18:42:16 +02:00
|
|
|
@mock.patch.object(pyudev.Devices, 'from_device_file', autospec=False)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2015-09-18 12:52:33 +02:00
|
|
|
def test_list_all_block_device_with_udev(self, mocked_execute, mocked_udev,
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_dev_vendor, mocked_listdir,
|
|
|
|
mocked_readlink):
|
2017-09-27 15:42:13 +02:00
|
|
|
mocked_readlink.return_value = '../../sda'
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_listdir.return_value = ['1:0:0:0']
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE, '')
|
2015-09-18 12:52:33 +02:00
|
|
|
mocked_udev.side_effect = iter([
|
2015-11-16 14:50:55 +00:00
|
|
|
{'ID_WWN': 'wwn%d' % i, 'ID_SERIAL_SHORT': 'serial%d' % i,
|
|
|
|
'ID_WWN_WITH_EXTENSION': 'wwn-ext%d' % i,
|
|
|
|
'ID_WWN_VENDOR_EXTENSION': 'wwn-vendor-ext%d' % i}
|
2015-09-18 12:52:33 +02:00
|
|
|
for i in range(4)
|
|
|
|
])
|
|
|
|
mocked_dev_vendor.return_value = 'Super Vendor'
|
2015-08-07 15:41:30 -07:00
|
|
|
devices = hardware.list_all_block_devices()
|
2014-07-18 08:13:12 -07:00
|
|
|
expected_devices = [
|
|
|
|
hardware.BlockDevice(name='/dev/sda',
|
|
|
|
model='TinyUSB Drive',
|
|
|
|
size=3116853504,
|
2015-09-18 12:52:33 +02:00
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn0',
|
2015-11-16 14:50:55 +00:00
|
|
|
wwn_with_extension='wwn-ext0',
|
|
|
|
wwn_vendor_extension='wwn-vendor-ext0',
|
2016-12-07 10:50:45 +00:00
|
|
|
serial='serial0',
|
|
|
|
hctl='1:0:0:0'),
|
2014-07-18 08:13:12 -07:00
|
|
|
hardware.BlockDevice(name='/dev/sdb',
|
|
|
|
model='Fastable SD131 7',
|
2015-03-10 11:20:40 +00:00
|
|
|
size=10737418240,
|
2015-09-18 12:52:33 +02:00
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn1',
|
2015-11-16 14:50:55 +00:00
|
|
|
wwn_with_extension='wwn-ext1',
|
|
|
|
wwn_vendor_extension='wwn-vendor-ext1',
|
2016-12-07 10:50:45 +00:00
|
|
|
serial='serial1',
|
|
|
|
hctl='1:0:0:0'),
|
2014-07-18 08:13:12 -07:00
|
|
|
hardware.BlockDevice(name='/dev/sdc',
|
|
|
|
model='NWD-BLP4-1600',
|
|
|
|
size=1765517033472,
|
2015-09-18 12:52:33 +02:00
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn2',
|
2015-11-16 14:50:55 +00:00
|
|
|
wwn_with_extension='wwn-ext2',
|
|
|
|
wwn_vendor_extension='wwn-vendor-ext2',
|
2016-12-07 10:50:45 +00:00
|
|
|
serial='serial2',
|
|
|
|
hctl='1:0:0:0'),
|
2014-07-18 08:13:12 -07:00
|
|
|
hardware.BlockDevice(name='/dev/sdd',
|
|
|
|
model='NWD-BLP4-1600',
|
|
|
|
size=1765517033472,
|
2015-09-18 12:52:33 +02:00
|
|
|
rotational=False,
|
|
|
|
vendor='Super Vendor',
|
|
|
|
wwn='wwn3',
|
2015-11-16 14:50:55 +00:00
|
|
|
wwn_with_extension='wwn-ext3',
|
|
|
|
wwn_vendor_extension='wwn-vendor-ext3',
|
2016-12-07 10:50:45 +00:00
|
|
|
serial='serial3',
|
|
|
|
hctl='1:0:0:0')
|
2014-07-18 08:13:12 -07:00
|
|
|
]
|
|
|
|
|
|
|
|
self.assertEqual(4, len(expected_devices))
|
|
|
|
for expected, device in zip(expected_devices, devices):
|
|
|
|
# Compare all attrs of the objects
|
2015-09-18 12:52:33 +02:00
|
|
|
for attr in ['name', 'model', 'size', 'rotational',
|
2015-11-16 14:50:55 +00:00
|
|
|
'wwn', 'vendor', 'serial', 'wwn_with_extension',
|
2016-12-07 10:50:45 +00:00
|
|
|
'wwn_vendor_extension', 'hctl']:
|
2014-07-18 08:13:12 -07:00
|
|
|
self.assertEqual(getattr(expected, attr),
|
|
|
|
getattr(device, attr))
|
2016-12-07 10:50:45 +00:00
|
|
|
expected_calls = [mock.call('/sys/block/%s/device/scsi_device' % dev)
|
|
|
|
for dev in ('sda', 'sdb', 'sdc', 'sdd')]
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_listdir.assert_has_calls(expected_calls)
|
2014-07-18 08:13:12 -07:00
|
|
|
|
2020-02-13 14:09:09 -08:00
|
|
|
@mock.patch.object(hardware, 'ThreadPool', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
2020-02-13 14:09:09 -08:00
|
|
|
def test_erase_devices_no_parallel_by_default(self, mocked_dispatch,
|
|
|
|
mock_threadpool):
|
|
|
|
|
|
|
|
# NOTE(TheJulia): This test was previously more elaborate and
|
|
|
|
# had a high failure rate on py37 and py38. So instead, lets just
|
|
|
|
# test that the threadpool is defaulted to 1 value to ensure
|
|
|
|
# that parallel erasures are not initiated. If the code is ever
|
|
|
|
# modified, differently, hopefully the person editing sees this
|
|
|
|
# message and understands the purpose is single process execution
|
|
|
|
# by default.
|
2015-08-17 09:51:21 -07:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
2020-02-13 14:09:09 -08:00
|
|
|
|
2015-08-17 09:51:21 -07:00
|
|
|
self.hardware.list_block_devices.return_value = [
|
|
|
|
hardware.BlockDevice('/dev/sdj', 'big', 1073741824, True),
|
|
|
|
hardware.BlockDevice('/dev/hdaa', 'small', 65535, False),
|
|
|
|
]
|
|
|
|
|
2020-02-13 14:09:09 -08:00
|
|
|
calls = [mock.call(1)]
|
|
|
|
self.hardware.erase_devices({}, [])
|
|
|
|
mock_threadpool.assert_has_calls(calls)
|
2015-08-17 09:51:21 -07:00
|
|
|
|
2016-07-25 19:07:27 +08:00
|
|
|
@mock.patch('multiprocessing.pool.ThreadPool.apply_async', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
|
|
|
def test_erase_devices_concurrency(self, mocked_dispatch, mocked_async):
|
|
|
|
internal_info = self.node['driver_internal_info']
|
|
|
|
internal_info['disk_erasure_concurrency'] = 10
|
|
|
|
mocked_dispatch.return_value = 'erased device'
|
|
|
|
|
2020-02-11 15:54:51 +01:00
|
|
|
apply_result = mock.Mock()
|
2016-07-25 19:07:27 +08:00
|
|
|
apply_result._success = True
|
|
|
|
apply_result._ready = True
|
2020-02-11 15:54:51 +01:00
|
|
|
apply_result.get.return_value = 'erased device'
|
2016-07-25 19:07:27 +08:00
|
|
|
mocked_async.return_value = apply_result
|
|
|
|
|
2018-10-24 10:48:27 +08:00
|
|
|
blkdev1 = hardware.BlockDevice('/dev/sdj', 'big', 1073741824, True)
|
|
|
|
blkdev2 = hardware.BlockDevice('/dev/hdaa', 'small', 65535, False)
|
2016-07-25 19:07:27 +08:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
2018-10-24 10:48:27 +08:00
|
|
|
self.hardware.list_block_devices.return_value = [blkdev1, blkdev2]
|
2016-07-25 19:07:27 +08:00
|
|
|
|
|
|
|
expected = {'/dev/hdaa': 'erased device', '/dev/sdj': 'erased device'}
|
|
|
|
|
|
|
|
result = self.hardware.erase_devices(self.node, [])
|
|
|
|
|
2018-10-24 10:48:27 +08:00
|
|
|
calls = [mock.call(mock.ANY, mocked_dispatch, ('erase_block_device',),
|
|
|
|
{'node': self.node, 'block_device': dev})
|
|
|
|
for dev in (blkdev1, blkdev2)]
|
|
|
|
mocked_async.assert_has_calls(calls)
|
2016-07-25 19:07:27 +08:00
|
|
|
self.assertEqual(expected, result)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware, 'ThreadPool', autospec=True)
|
|
|
|
def test_erase_devices_concurrency_pool_size(self, mocked_pool):
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [
|
|
|
|
hardware.BlockDevice('/dev/sdj', 'big', 1073741824, True),
|
|
|
|
hardware.BlockDevice('/dev/hdaa', 'small', 65535, False),
|
|
|
|
]
|
|
|
|
|
|
|
|
# Test pool size 10 with 2 disks
|
|
|
|
internal_info = self.node['driver_internal_info']
|
|
|
|
internal_info['disk_erasure_concurrency'] = 10
|
|
|
|
|
|
|
|
self.hardware.erase_devices(self.node, [])
|
|
|
|
mocked_pool.assert_called_with(2)
|
|
|
|
|
|
|
|
# Test default pool size with 2 disks
|
|
|
|
internal_info = self.node['driver_internal_info']
|
|
|
|
del internal_info['disk_erasure_concurrency']
|
|
|
|
|
|
|
|
self.hardware.erase_devices(self.node, [])
|
|
|
|
mocked_pool.assert_called_with(1)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
|
|
|
def test_erase_devices_without_disk(self, mocked_dispatch):
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = []
|
|
|
|
|
|
|
|
expected = {}
|
|
|
|
result = self.hardware.erase_devices({}, [])
|
|
|
|
self.assertEqual(expected, result)
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_ata_success(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2014-06-04 10:44:25 -07:00
|
|
|
mocked_execute.side_effect = [
|
2016-04-08 14:10:52 -07:00
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=False), ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2018-06-13 12:13:20 -07:00
|
|
|
('', ''),
|
|
|
|
('', ''),
|
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=False), ''),
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2018-06-13 12:13:20 -07:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
|
|
|
mock.call('hdparm', '--user-master', 'u', '--security-set-pass',
|
|
|
|
'NULL', '/dev/sda'),
|
|
|
|
mock.call('hdparm', '--user-master', 'u', '--security-erase',
|
|
|
|
'NULL', '/dev/sda'),
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2018-06-13 12:13:20 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_ata_success_no_smartctl(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2018-06-13 12:13:20 -07:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=False), ''),
|
|
|
|
OSError('boom'),
|
2014-06-04 10:44:25 -07:00
|
|
|
('', ''),
|
|
|
|
('', ''),
|
2016-04-08 14:10:52 -07:00
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=False), ''),
|
2014-06-04 10:44:25 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
2014-06-04 10:44:25 -07:00
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
2018-06-13 12:13:20 -07:00
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
2014-06-04 10:44:25 -07:00
|
|
|
mock.call('hdparm', '--user-master', 'u', '--security-set-pass',
|
|
|
|
'NULL', '/dev/sda'),
|
|
|
|
mock.call('hdparm', '--user-master', 'u', '--security-erase',
|
|
|
|
'NULL', '/dev/sda'),
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_nosecurity_shred(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2021-02-11 15:36:09 +01:00
|
|
|
hdparm_output = hws.HDPARM_INFO_TEMPLATE.split('\nSecurity:')[0]
|
2014-06-04 10:44:25 -07:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
2015-03-17 16:26:06 -07:00
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_UNAVAILABLE_OUTPUT, ''),
|
|
|
|
(hws.SHRED_OUTPUT_1_ITERATION_ZERO_TRUE, '')
|
2014-06-04 10:44:25 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
2015-03-17 16:26:06 -07:00
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
2018-06-13 12:13:20 -07:00
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
2015-03-17 16:26:06 -07:00
|
|
|
mock.call('shred', '--force', '--zero', '--verbose',
|
2016-04-12 08:37:18 -04:00
|
|
|
'--iterations', '1', '/dev/sda')
|
2015-03-17 16:26:06 -07:00
|
|
|
])
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_notsupported_shred(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=False, enabled=False, frozen=False, enhanced_erase=False)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
2015-03-17 16:26:06 -07:00
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_UNAVAILABLE_OUTPUT, ''),
|
|
|
|
(hws.SHRED_OUTPUT_1_ITERATION_ZERO_TRUE, '')
|
2018-06-13 12:13:20 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2018-06-13 12:13:20 -07:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
|
|
|
mock.call('shred', '--force', '--zero', '--verbose',
|
|
|
|
'--iterations', '1', '/dev/sda')
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2018-06-13 12:13:20 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_smartctl_unsupported_shred(self,
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_execute,
|
|
|
|
mocked_raid_member):
|
2018-06-13 12:13:20 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_UNAVAILABLE_OUTPUT, ''),
|
|
|
|
(hws.SHRED_OUTPUT_1_ITERATION_ZERO_TRUE, '')
|
2018-06-13 12:13:20 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2018-06-13 12:13:20 -07:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
|
|
|
mock.call('shred', '--force', '--zero', '--verbose',
|
|
|
|
'--iterations', '1', '/dev/sda')
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2018-06-13 12:13:20 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_smartctl_fails_security_fallback_to_shred(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mocked_raid_member):
|
2018-06-13 12:13:20 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
|
|
|
processutils.ProcessExecutionError(),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SHRED_OUTPUT_1_ITERATION_ZERO_TRUE, '')
|
2014-06-04 10:44:25 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
2015-03-17 16:26:06 -07:00
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
2018-06-13 12:13:20 -07:00
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
2015-03-17 16:26:06 -07:00
|
|
|
mock.call('shred', '--force', '--zero', '--verbose',
|
2015-07-01 15:08:18 +00:00
|
|
|
'--iterations', '1', '/dev/sda')
|
2015-03-17 16:26:06 -07:00
|
|
|
])
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_shred_uses_internal_info(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2016-04-12 08:37:18 -04:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=False, enabled=False, frozen=False, enhanced_erase=False)
|
|
|
|
|
2016-06-23 17:41:23 -04:00
|
|
|
info = self.node['driver_internal_info']
|
2016-04-12 08:37:18 -04:00
|
|
|
info['agent_erase_devices_iterations'] = 2
|
|
|
|
info['agent_erase_devices_zeroize'] = False
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
|
|
|
(hws.SHRED_OUTPUT_2_ITERATIONS_ZERO_FALSE, '')
|
2016-04-12 08:37:18 -04:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2016-04-12 08:37:18 -04:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
2018-06-13 12:13:20 -07:00
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
2016-04-12 08:37:18 -04:00
|
|
|
mock.call('shred', '--force', '--verbose',
|
|
|
|
'--iterations', '2', '/dev/sda')
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_shred_0_pass_no_zeroize(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
2016-04-12 08:37:18 -04:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=False, enabled=False, frozen=False, enhanced_erase=False)
|
|
|
|
|
2016-06-23 17:41:23 -04:00
|
|
|
info = self.node['driver_internal_info']
|
2016-04-12 08:37:18 -04:00
|
|
|
info['agent_erase_devices_iterations'] = 0
|
|
|
|
info['agent_erase_devices_zeroize'] = False
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_UNAVAILABLE_OUTPUT, ''),
|
|
|
|
(hws.SHRED_OUTPUT_0_ITERATIONS_ZERO_FALSE, '')
|
2016-04-12 08:37:18 -04:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2016-04-12 08:37:18 -04:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('hdparm', '-I', '/dev/sda'),
|
2018-06-13 12:13:20 -07:00
|
|
|
mock.call('smartctl', '-d', 'ata', '/dev/sda', '-g', 'security',
|
|
|
|
check_exit_code=[0, 127]),
|
2016-04-12 08:37:18 -04:00
|
|
|
mock.call('shred', '--force', '--verbose',
|
|
|
|
'--iterations', '0', '/dev/sda')
|
|
|
|
])
|
|
|
|
|
2015-04-29 22:27:46 +05:30
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_virtual_media_device', autospec=True)
|
|
|
|
def test_erase_block_device_virtual_media(self, vm_mock):
|
|
|
|
vm_mock.return_value = True
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
2015-04-29 22:27:46 +05:30
|
|
|
vm_mock.assert_called_once_with(self.hardware, block_device)
|
|
|
|
|
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'exists', autospec=True)
|
|
|
|
def test__is_virtual_media_device_exists(self, mocked_exists,
|
|
|
|
mocked_link):
|
|
|
|
mocked_exists.return_value = True
|
|
|
|
mocked_link.return_value = '../../sda'
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
res = self.hardware._is_virtual_media_device(block_device)
|
|
|
|
self.assertTrue(res)
|
|
|
|
mocked_exists.assert_called_once_with('/dev/disk/by-label/ir-vfd-dev')
|
|
|
|
mocked_link.assert_called_once_with('/dev/disk/by-label/ir-vfd-dev')
|
|
|
|
|
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'exists', autospec=True)
|
|
|
|
def test__is_virtual_media_device_exists_no_match(self, mocked_exists,
|
|
|
|
mocked_link):
|
|
|
|
mocked_exists.return_value = True
|
|
|
|
mocked_link.return_value = '../../sdb'
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
res = self.hardware._is_virtual_media_device(block_device)
|
|
|
|
self.assertFalse(res)
|
|
|
|
mocked_exists.assert_called_once_with('/dev/disk/by-label/ir-vfd-dev')
|
|
|
|
mocked_link.assert_called_once_with('/dev/disk/by-label/ir-vfd-dev')
|
|
|
|
|
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'exists', autospec=True)
|
|
|
|
def test__is_virtual_media_device_path_doesnt_exist(self, mocked_exists,
|
2015-10-02 10:01:00 -07:00
|
|
|
mocked_link):
|
2015-04-29 22:27:46 +05:30
|
|
|
mocked_exists.return_value = False
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
res = self.hardware._is_virtual_media_device(block_device)
|
|
|
|
self.assertFalse(res)
|
|
|
|
mocked_exists.assert_called_once_with('/dev/disk/by-label/ir-vfd-dev')
|
|
|
|
self.assertFalse(mocked_link.called)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2015-04-24 15:08:41 +05:30
|
|
|
def test_erase_block_device_shred_fail_oserror(self, mocked_execute):
|
|
|
|
mocked_execute.side_effect = OSError
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
res = self.hardware._shred_block_device(self.node, block_device)
|
2015-04-24 15:08:41 +05:30
|
|
|
self.assertFalse(res)
|
2015-10-02 10:01:00 -07:00
|
|
|
mocked_execute.assert_called_once_with(
|
|
|
|
'shred', '--force', '--zero', '--verbose', '--iterations', '1',
|
|
|
|
'/dev/sda')
|
2015-04-24 15:08:41 +05:30
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2015-04-24 15:08:41 +05:30
|
|
|
def test_erase_block_device_shred_fail_processerror(self, mocked_execute):
|
|
|
|
mocked_execute.side_effect = processutils.ProcessExecutionError
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
res = self.hardware._shred_block_device(self.node, block_device)
|
2015-04-24 15:08:41 +05:30
|
|
|
self.assertFalse(res)
|
2015-10-02 10:01:00 -07:00
|
|
|
mocked_execute.assert_called_once_with(
|
|
|
|
'shred', '--force', '--zero', '--verbose', '--iterations', '1',
|
|
|
|
'/dev/sda')
|
2015-04-24 15:08:41 +05:30
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_ata_security_unlock_fallback_pass(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mocked_raid_member):
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=True, locked=True
|
|
|
|
)
|
|
|
|
hdparm_output_unlocked = create_hdparm_info(
|
|
|
|
supported=True, enabled=True, frozen=False, enhanced_erase=False)
|
|
|
|
hdparm_output_not_enabled = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
processutils.ProcessExecutionError(), # NULL fails to unlock
|
|
|
|
(hdparm_output, ''), # recheck security lines
|
|
|
|
None, # security unlock with ""
|
|
|
|
(hdparm_output_unlocked, ''),
|
|
|
|
'',
|
|
|
|
(hdparm_output_not_enabled, '')
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
|
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
'--security-unlock', '', '/dev/sda')
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test_erase_block_device_ata_security_enabled(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mock_shred, mocked_raid_member):
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
# Tests that an exception is thrown if all of the recovery passwords
|
|
|
|
# fail to unlock the device without throwing exception
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
supported=True, enabled=True, locked=True)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
None,
|
|
|
|
(hdparm_output, ''),
|
|
|
|
None,
|
2016-01-21 11:26:44 -05:00
|
|
|
(hdparm_output, ''),
|
|
|
|
None,
|
2014-06-04 10:44:25 -07:00
|
|
|
(hdparm_output, '')
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2016-01-21 11:26:44 -05:00
|
|
|
self.assertRaises(
|
|
|
|
errors.IncompatibleHardwareMethodError,
|
|
|
|
self.hardware.erase_block_device,
|
|
|
|
self.node,
|
|
|
|
block_device)
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
'--security-unlock', '', '/dev/sda')
|
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
'--security-unlock', 'NULL', '/dev/sda')
|
2016-01-21 11:26:44 -05:00
|
|
|
self.assertFalse(mock_shred.called)
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test_erase_block_device_ata_security_enabled_unlock_attempt(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mock_shred, mocked_raid_member):
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
supported=True, enabled=True, locked=True)
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output_not_enabled = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2016-01-21 11:26:44 -05:00
|
|
|
'',
|
|
|
|
(hdparm_output_not_enabled, ''),
|
|
|
|
'',
|
|
|
|
'',
|
|
|
|
(hdparm_output_not_enabled, '')
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
self.assertFalse(mock_shred.called)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test__ata_erase_security_enabled_unlock_exception(
|
|
|
|
self, mocked_execute):
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
# test that an exception is thrown when security unlock fails with
|
|
|
|
# ProcessExecutionError
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
supported=True, enabled=True, locked=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
processutils.ProcessExecutionError(),
|
|
|
|
(hdparm_output, ''),
|
|
|
|
processutils.ProcessExecutionError(),
|
|
|
|
(hdparm_output, ''),
|
2016-01-21 11:26:44 -05:00
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
self.assertRaises(errors.BlockDeviceEraseError,
|
|
|
|
self.hardware._ata_erase,
|
|
|
|
block_device)
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
'--security-unlock', '', '/dev/sda')
|
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
'--security-unlock', 'NULL', '/dev/sda')
|
2016-01-21 11:26:44 -05:00
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test__ata_erase_security_enabled_set_password_exception(
|
|
|
|
self, mocked_execute):
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2016-01-21 11:26:44 -05:00
|
|
|
processutils.ProcessExecutionError()
|
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
|
|
|
|
self.assertRaises(errors.BlockDeviceEraseError,
|
|
|
|
self.hardware._ata_erase,
|
|
|
|
block_device)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test__ata_erase_security_erase_exec_exception(
|
|
|
|
self, mocked_execute):
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
# Exception on security erase
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
2018-06-13 11:58:27 -07:00
|
|
|
hdparm_unlocked_output = create_hdparm_info(
|
|
|
|
supported=True, locked=True, frozen=False, enhanced_erase=False)
|
2016-01-21 11:26:44 -05:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output, '', '-1'),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
'', # security-set-pass
|
2018-06-13 11:58:27 -07:00
|
|
|
processutils.ProcessExecutionError(), # security-erase
|
|
|
|
(hdparm_unlocked_output, '', '-1'),
|
|
|
|
'', # attempt security unlock
|
|
|
|
(hdparm_output, '', '-1')
|
2016-01-21 11:26:44 -05:00
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
|
2014-06-04 10:44:25 -07:00
|
|
|
self.assertRaises(errors.BlockDeviceEraseError,
|
2016-01-21 11:26:44 -05:00
|
|
|
self.hardware._ata_erase,
|
|
|
|
block_device)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_ata_frozen(self, mocked_execute, mock_shred,
|
|
|
|
mocked_raid_member):
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=True, enhanced_erase=False)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
2018-06-13 12:13:20 -07:00
|
|
|
(hdparm_output, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, '')
|
2014-06-04 10:44:25 -07:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2016-01-21 11:26:44 -05:00
|
|
|
self.assertRaises(
|
|
|
|
errors.IncompatibleHardwareMethodError,
|
|
|
|
self.hardware.erase_block_device,
|
|
|
|
self.node,
|
|
|
|
block_device)
|
|
|
|
self.assertFalse(mock_shred.called)
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-11 09:33:59 +02:00
|
|
|
def test_erase_block_device_ata_failed(self, mocked_execute, mock_shred,
|
|
|
|
mocked_raid_member):
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output_before = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
2016-01-21 11:26:44 -05:00
|
|
|
|
2016-04-08 14:10:52 -07:00
|
|
|
# If security mode remains enabled after the erase, it is indicative
|
2016-01-21 11:26:44 -05:00
|
|
|
# of a failed erase.
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output_after = create_hdparm_info(
|
|
|
|
supported=True, enabled=True, frozen=False, enhanced_erase=False)
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output_before, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2016-01-21 11:26:44 -05:00
|
|
|
('', ''),
|
|
|
|
('', ''),
|
|
|
|
(hdparm_output_after, ''),
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2016-01-21 11:26:44 -05:00
|
|
|
self.assertRaises(
|
|
|
|
errors.IncompatibleHardwareMethodError,
|
|
|
|
self.hardware.erase_block_device,
|
|
|
|
self.node,
|
|
|
|
block_device)
|
|
|
|
self.assertFalse(mock_shred.called)
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-01-21 11:26:44 -05:00
|
|
|
def test_erase_block_device_ata_failed_continued(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mock_shred, mocked_raid_member):
|
2016-01-21 11:26:44 -05:00
|
|
|
|
2016-06-23 17:41:23 -04:00
|
|
|
info = self.node['driver_internal_info']
|
2016-01-21 11:26:44 -05:00
|
|
|
info['agent_continue_if_ata_erase_failed'] = True
|
|
|
|
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output_before = create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False, enhanced_erase=False)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2016-04-08 14:10:52 -07:00
|
|
|
# If security mode remains enabled after the erase, it is indicative
|
2014-06-04 10:44:25 -07:00
|
|
|
# of a failed erase.
|
2016-04-08 14:10:52 -07:00
|
|
|
hdparm_output_after = create_hdparm_info(
|
|
|
|
supported=True, enabled=True, frozen=False, enhanced_erase=False)
|
2014-06-04 10:44:25 -07:00
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hdparm_output_before, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2014-06-04 10:44:25 -07:00
|
|
|
('', ''),
|
|
|
|
('', ''),
|
|
|
|
(hdparm_output_after, ''),
|
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2014-06-04 10:44:25 -07:00
|
|
|
|
2014-07-18 08:13:12 -07:00
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2016-01-21 11:26:44 -05:00
|
|
|
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
self.assertTrue(mock_shred.called)
|
2015-02-05 17:03:33 -06:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2018-06-13 12:20:22 -07:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager, '_shred_block_device',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_ata_erase_disabled(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mocked_execute, mock_shred, mocked_raid_member):
|
2018-06-13 12:20:22 -07:00
|
|
|
|
|
|
|
info = self.node['driver_internal_info']
|
|
|
|
info['agent_enable_ata_secure_erase'] = False
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2018-06-13 12:20:22 -07:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
|
|
|
|
|
|
|
self.hardware.erase_block_device(self.node, block_device)
|
|
|
|
self.assertTrue(mock_shred.called)
|
|
|
|
self.assertFalse(mocked_execute.called)
|
|
|
|
|
2015-02-05 17:03:33 -06:00
|
|
|
def test_normal_vs_enhanced_security_erase(self):
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2015-02-05 17:03:33 -06:00
|
|
|
def test_security_erase_option(test_case,
|
2016-04-08 14:10:52 -07:00
|
|
|
enhanced_erase,
|
2015-02-05 17:03:33 -06:00
|
|
|
expected_option,
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_execute,
|
|
|
|
mocked_raid_member):
|
2015-02-05 17:03:33 -06:00
|
|
|
mocked_execute.side_effect = [
|
2016-04-08 14:10:52 -07:00
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=enhanced_erase), ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.SMARTCTL_NORMAL_OUTPUT, ''),
|
2015-02-05 17:03:33 -06:00
|
|
|
('', ''),
|
|
|
|
('', ''),
|
2016-04-08 14:10:52 -07:00
|
|
|
(create_hdparm_info(
|
|
|
|
supported=True, enabled=False, frozen=False,
|
|
|
|
enhanced_erase=enhanced_erase), ''),
|
2015-02-05 17:03:33 -06:00
|
|
|
]
|
2019-06-11 09:33:59 +02:00
|
|
|
mocked_raid_member.return_value = False
|
2015-02-05 17:03:33 -06:00
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/sda', 'big', 1073741824,
|
|
|
|
True)
|
2015-06-15 21:36:27 +05:30
|
|
|
test_case.hardware.erase_block_device(self.node, block_device)
|
2015-02-05 17:03:33 -06:00
|
|
|
mocked_execute.assert_any_call('hdparm', '--user-master', 'u',
|
|
|
|
expected_option,
|
|
|
|
'NULL', '/dev/sda')
|
|
|
|
|
2015-10-02 10:01:00 -07:00
|
|
|
test_security_erase_option(
|
2016-04-08 14:10:52 -07:00
|
|
|
self, True, '--security-erase-enhanced')
|
2015-10-02 10:01:00 -07:00
|
|
|
test_security_erase_option(
|
2016-04-08 14:10:52 -07:00
|
|
|
self, False, '--security-erase')
|
2015-08-06 13:03:27 +02:00
|
|
|
|
2020-11-11 11:22:31 +01:00
|
|
|
def test__find_pstore_mount_point(self):
|
|
|
|
with mock.patch('builtins.open',
|
|
|
|
mock.mock_open(),
|
|
|
|
create=True) as mocked_open:
|
|
|
|
mocked_open.return_value.__iter__ = \
|
2021-02-11 15:36:09 +01:00
|
|
|
lambda self: iter(hws.PROC_MOUNTS_OUTPUT.splitlines())
|
2020-11-11 11:22:31 +01:00
|
|
|
|
|
|
|
self.assertEqual(self.hardware._find_pstore_mount_point(),
|
|
|
|
"/sys/fs/pstore")
|
|
|
|
mocked_open.assert_called_once_with('/proc/mounts', 'r')
|
|
|
|
|
|
|
|
def test__find_pstore_mount_point_no_pstore(self):
|
|
|
|
with mock.patch('builtins.open',
|
|
|
|
mock.mock_open(),
|
|
|
|
create=True) as mocked_open:
|
|
|
|
mocked_open.return_value.__iter__.return_value = \
|
2021-02-11 15:36:09 +01:00
|
|
|
hws.PROC_MOUNTS_OUTPUT_NO_PSTORE.splitlines()
|
2020-11-11 11:22:31 +01:00
|
|
|
self.assertIsNone(self.hardware._find_pstore_mount_point())
|
|
|
|
mocked_open.assert_called_once_with('/proc/mounts', 'r')
|
|
|
|
|
|
|
|
@mock.patch('os.listdir', autospec=True)
|
|
|
|
@mock.patch.object(shutil, 'rmtree', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_find_pstore_mount_point', autospec=True)
|
|
|
|
def test_erase_pstore(self, mocked_find_pstore, mocked_rmtree,
|
|
|
|
mocked_listdir):
|
|
|
|
mocked_find_pstore.return_value = '/sys/fs/pstore'
|
|
|
|
pstore_entries = ['dmesg-erst-663482778',
|
|
|
|
'dmesg-erst-663482779']
|
|
|
|
mocked_listdir.return_value = pstore_entries
|
|
|
|
self.hardware.erase_pstore(self.node, [])
|
|
|
|
mocked_listdir.assert_called_once()
|
|
|
|
self.assertEqual(mocked_rmtree.call_count,
|
|
|
|
len(pstore_entries))
|
|
|
|
mocked_rmtree.assert_has_calls([
|
|
|
|
mock.call('/sys/fs/pstore/' + arg) for arg in pstore_entries
|
|
|
|
])
|
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-07-18 17:18:14 +01:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_virtual_media_device', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'list_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(disk_utils, 'destroy_disk_metadata', autospec=True)
|
|
|
|
def test_erase_devices_metadata(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mock_metadata, mock_list_devs, mock__is_vmedia,
|
|
|
|
mock_execute):
|
2016-07-18 17:18:14 +01:00
|
|
|
block_devices = [
|
|
|
|
hardware.BlockDevice('/dev/sr0', 'vmedia', 12345, True),
|
2019-06-11 09:33:59 +02:00
|
|
|
hardware.BlockDevice('/dev/sdb2', 'raid-member', 32767, False),
|
2016-07-18 17:18:14 +01:00
|
|
|
hardware.BlockDevice('/dev/sda', 'small', 65535, False),
|
2018-08-29 22:57:02 -05:00
|
|
|
hardware.BlockDevice('/dev/sda1', '', 32767, False),
|
2019-06-11 09:33:59 +02:00
|
|
|
hardware.BlockDevice('/dev/sda2', 'raid-member', 32767, False),
|
|
|
|
hardware.BlockDevice('/dev/md0', 'raid-device', 32767, False)
|
2016-07-18 17:18:14 +01:00
|
|
|
]
|
2018-08-29 22:57:02 -05:00
|
|
|
# NOTE(coreywright): Don't return the list, but a copy of it, because
|
|
|
|
# we depend on its elements' order when referencing it later during
|
|
|
|
# verification, but the method under test sorts the list changing it.
|
|
|
|
mock_list_devs.return_value = list(block_devices)
|
|
|
|
mock__is_vmedia.side_effect = lambda _, dev: dev.name == '/dev/sr0'
|
2019-06-11 09:33:59 +02:00
|
|
|
mock_execute.side_effect = [
|
|
|
|
('sdb2 linux_raid_member host:1 f9978968', ''),
|
|
|
|
('sda2 linux_raid_member host:1 f9978969', ''),
|
|
|
|
('sda1', ''), ('sda', ''), ('md0', '')]
|
2016-07-18 17:18:14 +01:00
|
|
|
|
|
|
|
self.hardware.erase_devices_metadata(self.node, [])
|
2019-06-11 09:33:59 +02:00
|
|
|
|
2018-08-29 22:57:02 -05:00
|
|
|
self.assertEqual([mock.call('/dev/sda1', self.node['uuid']),
|
2019-06-11 09:33:59 +02:00
|
|
|
mock.call('/dev/sda', self.node['uuid']),
|
|
|
|
mock.call('/dev/md0', self.node['uuid'])],
|
2018-08-29 22:57:02 -05:00
|
|
|
mock_metadata.call_args_list)
|
|
|
|
mock_list_devs.assert_called_once_with(self.hardware,
|
|
|
|
include_partitions=True)
|
|
|
|
self.assertEqual([mock.call(self.hardware, block_devices[0]),
|
2019-06-11 09:33:59 +02:00
|
|
|
mock.call(self.hardware, block_devices[1]),
|
|
|
|
mock.call(self.hardware, block_devices[4]),
|
|
|
|
mock.call(self.hardware, block_devices[3]),
|
2018-08-29 22:57:02 -05:00
|
|
|
mock.call(self.hardware, block_devices[2]),
|
2019-06-11 09:33:59 +02:00
|
|
|
mock.call(self.hardware, block_devices[5])],
|
2018-08-29 22:57:02 -05:00
|
|
|
mock__is_vmedia.call_args_list)
|
2016-07-18 17:18:14 +01:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
2016-07-18 17:18:14 +01:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_virtual_media_device', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'list_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(disk_utils, 'destroy_disk_metadata', autospec=True)
|
|
|
|
def test_erase_devices_metadata_error(
|
2019-06-11 09:33:59 +02:00
|
|
|
self, mock_metadata, mock_list_devs, mock__is_vmedia,
|
|
|
|
mock__is_raid_member):
|
2016-07-18 17:18:14 +01:00
|
|
|
block_devices = [
|
|
|
|
hardware.BlockDevice('/dev/sda', 'small', 65535, False),
|
|
|
|
hardware.BlockDevice('/dev/sdb', 'big', 10737418240, True),
|
|
|
|
]
|
|
|
|
mock__is_vmedia.return_value = False
|
2019-06-11 09:33:59 +02:00
|
|
|
mock__is_raid_member.return_value = False
|
2018-08-29 22:57:02 -05:00
|
|
|
# NOTE(coreywright): Don't return the list, but a copy of it, because
|
|
|
|
# we depend on its elements' order when referencing it later during
|
|
|
|
# verification, but the method under test sorts the list changing it.
|
|
|
|
mock_list_devs.return_value = list(block_devices)
|
|
|
|
# Simulate first call to destroy_disk_metadata() failing, which is for
|
|
|
|
# /dev/sdb due to erase_devices_metadata() reverse sorting block
|
|
|
|
# devices by name, and second call succeeding, which is for /dev/sda
|
2016-07-18 17:18:14 +01:00
|
|
|
error_output = 'Booo00000ooommmmm'
|
2018-08-29 22:57:02 -05:00
|
|
|
error_regex = '(?s)/dev/sdb.*' + error_output
|
2016-07-18 17:18:14 +01:00
|
|
|
mock_metadata.side_effect = (
|
|
|
|
processutils.ProcessExecutionError(error_output),
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
|
2018-08-29 22:57:02 -05:00
|
|
|
self.assertRaisesRegex(errors.BlockDeviceEraseError, error_regex,
|
2016-07-18 17:18:14 +01:00
|
|
|
self.hardware.erase_devices_metadata,
|
|
|
|
self.node, [])
|
|
|
|
# Assert all devices are erased independent if one of them
|
|
|
|
# failed previously
|
2018-08-29 22:57:02 -05:00
|
|
|
self.assertEqual([mock.call('/dev/sdb', self.node['uuid']),
|
|
|
|
mock.call('/dev/sda', self.node['uuid'])],
|
|
|
|
mock_metadata.call_args_list)
|
|
|
|
mock_list_devs.assert_called_once_with(self.hardware,
|
|
|
|
include_partitions=True)
|
|
|
|
self.assertEqual([mock.call(self.hardware, block_devices[1]),
|
|
|
|
mock.call(self.hardware, block_devices[0])],
|
|
|
|
mock__is_vmedia.call_args_list)
|
2016-07-18 17:18:14 +01:00
|
|
|
|
2019-06-11 09:33:59 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test__is_linux_raid_member(self, mocked_execute):
|
|
|
|
raid_member = hardware.BlockDevice('/dev/sda1', 'small', 65535, False)
|
|
|
|
mocked_execute.return_value = ('linux_raid_member host.domain:0 '
|
|
|
|
'85fa41e4-e0ae'), ''
|
|
|
|
self.assertTrue(self.hardware._is_linux_raid_member(raid_member))
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test__is_linux_raid_member_false(self, mocked_execute):
|
|
|
|
raid_member = hardware.BlockDevice('/dev/md0', 'small', 65535, False)
|
|
|
|
mocked_execute.return_value = 'md0', ''
|
|
|
|
self.assertFalse(self.hardware._is_linux_raid_member(raid_member))
|
|
|
|
|
2020-01-28 22:41:47 -06:00
|
|
|
def test__is_read_only_device(self):
|
|
|
|
fileobj = mock.mock_open(read_data='1\n')
|
|
|
|
device = hardware.BlockDevice('/dev/sdfake', 'fake', 1024, False)
|
|
|
|
with mock.patch(
|
|
|
|
'builtins.open', fileobj, create=True) as mock_open:
|
|
|
|
self.assertTrue(self.hardware._is_read_only_device(device))
|
|
|
|
mock_open.assert_called_once_with(
|
|
|
|
'/sys/block/sdfake/ro', 'r')
|
|
|
|
|
|
|
|
def test__is_read_only_device_false(self):
|
|
|
|
fileobj = mock.mock_open(read_data='0\n')
|
|
|
|
device = hardware.BlockDevice('/dev/sdfake', 'fake', 1024, False)
|
|
|
|
with mock.patch(
|
|
|
|
'builtins.open', fileobj, create=True) as mock_open:
|
|
|
|
self.assertFalse(self.hardware._is_read_only_device(device))
|
|
|
|
mock_open.assert_called_once_with(
|
|
|
|
'/sys/block/sdfake/ro', 'r')
|
|
|
|
|
|
|
|
def test__is_read_only_device_error(self):
|
|
|
|
device = hardware.BlockDevice('/dev/sdfake', 'fake', 1024, False)
|
|
|
|
with mock.patch(
|
|
|
|
'builtins.open', side_effect=IOError,
|
|
|
|
autospec=True) as mock_open:
|
|
|
|
self.assertFalse(self.hardware._is_read_only_device(device))
|
|
|
|
mock_open.assert_called_once_with(
|
|
|
|
'/sys/block/sdfake/ro', 'r')
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address(self, mocked_execute, mte):
|
2015-08-06 13:03:27 +02:00
|
|
|
mocked_execute.return_value = '192.1.2.3\n', ''
|
|
|
|
self.assertEqual('192.1.2.3', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_virt(self, mocked_execute, mte):
|
2015-08-06 13:03:27 +02:00
|
|
|
mocked_execute.side_effect = processutils.ProcessExecutionError()
|
|
|
|
self.assertIsNone(self.hardware.get_bmc_address())
|
2015-08-28 11:14:52 -07:00
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-07-05 18:20:43 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_zeroed(self, mocked_execute, mte):
|
2017-07-05 18:20:43 +02:00
|
|
|
mocked_execute.return_value = '0.0.0.0\n', ''
|
|
|
|
self.assertEqual('0.0.0.0', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-07-05 18:20:43 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_invalid(self, mocked_execute, mte):
|
2017-07-05 18:20:43 +02:00
|
|
|
# In case of invalid lan channel, stdout is empty and the error
|
|
|
|
# on stderr is "Invalid channel"
|
|
|
|
mocked_execute.return_value = '\n', 'Invalid channel: 55'
|
|
|
|
self.assertEqual('0.0.0.0', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-07-05 18:20:43 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_random_error(self, mocked_execute, mte):
|
2017-07-05 18:20:43 +02:00
|
|
|
mocked_execute.return_value = '192.1.2.3\n', 'Random error message'
|
|
|
|
self.assertEqual('192.1.2.3', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-07-05 18:20:43 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_iterate_channels(self, mocked_execute, mte):
|
2017-07-05 18:20:43 +02:00
|
|
|
# For channel 1 we simulate unconfigured IP
|
|
|
|
# and for any other we return a correct IP address
|
|
|
|
def side_effect(*args, **kwargs):
|
|
|
|
if args[0].startswith("ipmitool lan print 1"):
|
2017-09-11 16:08:33 +02:00
|
|
|
return '', 'Invalid channel 1\n'
|
|
|
|
elif args[0].startswith("ipmitool lan print 2"):
|
2017-07-05 18:20:43 +02:00
|
|
|
return '0.0.0.0\n', ''
|
2017-09-11 16:08:33 +02:00
|
|
|
elif args[0].startswith("ipmitool lan print 3"):
|
|
|
|
return 'meow', ''
|
2017-07-05 18:20:43 +02:00
|
|
|
else:
|
|
|
|
return '192.1.2.3\n', ''
|
|
|
|
mocked_execute.side_effect = side_effect
|
|
|
|
self.assertEqual('192.1.2.3', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2017-07-05 18:20:43 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_address_not_available(self, mocked_execute, mte):
|
2017-07-05 18:20:43 +02:00
|
|
|
mocked_execute.return_value = '', ''
|
|
|
|
self.assertEqual('0.0.0.0', self.hardware.get_bmc_address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_not_enabled(self, mocked_execute, mte):
|
2019-05-20 11:08:31 +00:00
|
|
|
mocked_execute.side_effect = [('ipv4\n', '')] * 11
|
2019-03-04 15:31:14 +03:00
|
|
|
self.assertEqual('::/0', self.hardware.get_bmc_v6address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_dynamic_address(self, mocked_execute, mte):
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
('ipv6\n', ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.IPMITOOL_LAN6_PRINT_DYNAMIC_ADDR, '')
|
2019-03-04 15:31:14 +03:00
|
|
|
]
|
|
|
|
self.assertEqual('2001:1234:1234:1234:1234:1234:1234:1234',
|
|
|
|
self.hardware.get_bmc_v6address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_static_address_both(self, mocked_execute, mte):
|
|
|
|
dynamic_disabled = \
|
2021-02-11 15:36:09 +01:00
|
|
|
hws.IPMITOOL_LAN6_PRINT_DYNAMIC_ADDR.replace('active', 'disabled')
|
2019-03-04 15:31:14 +03:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
('both\n', ''),
|
|
|
|
(dynamic_disabled, ''),
|
2021-02-11 15:36:09 +01:00
|
|
|
(hws.IPMITOOL_LAN6_PRINT_STATIC_ADDR, '')
|
2019-03-04 15:31:14 +03:00
|
|
|
]
|
|
|
|
self.assertEqual('2001:5678:5678:5678:5678:5678:5678:5678',
|
|
|
|
self.hardware.get_bmc_v6address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
def test_get_bmc_v6address_virt(self, mocked_execute, mte):
|
2019-03-04 15:31:14 +03:00
|
|
|
mocked_execute.side_effect = processutils.ProcessExecutionError()
|
|
|
|
self.assertIsNone(self.hardware.get_bmc_v6address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_invalid_enables(self, mocked_execute, mte):
|
|
|
|
def side_effect(*args, **kwargs):
|
|
|
|
if args[0].startswith('ipmitool lan6 print'):
|
|
|
|
return '', 'Failed to get IPv6/IPv4 Addressing Enables'
|
|
|
|
|
|
|
|
mocked_execute.side_effect = side_effect
|
|
|
|
self.assertEqual('::/0', self.hardware.get_bmc_v6address())
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_invalid_get_address(self, mocked_execute, mte):
|
|
|
|
def side_effect(*args, **kwargs):
|
|
|
|
if args[0].startswith('ipmitool lan6 print'):
|
|
|
|
if args[0].endswith('dynamic_addr') \
|
|
|
|
or args[0].endswith('static_addr'):
|
|
|
|
raise processutils.ProcessExecutionError()
|
|
|
|
return 'ipv6', ''
|
|
|
|
|
|
|
|
mocked_execute.side_effect = side_effect
|
|
|
|
self.assertEqual('::/0', self.hardware.get_bmc_v6address())
|
|
|
|
|
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-05-20 11:08:31 +00:00
|
|
|
def test_get_bmc_v6address_ipmitool_invalid_stdout_format(
|
2019-03-04 15:31:14 +03:00
|
|
|
self, mocked_execute, mte, mocked_log):
|
|
|
|
def side_effect(*args, **kwargs):
|
|
|
|
if args[0].startswith('ipmitool lan6 print'):
|
|
|
|
if args[0].endswith('dynamic_addr') \
|
|
|
|
or args[0].endswith('static_addr'):
|
|
|
|
return 'Invalid\n\tyaml', ''
|
|
|
|
return 'ipv6', ''
|
|
|
|
|
|
|
|
mocked_execute.side_effect = side_effect
|
|
|
|
self.assertEqual('::/0', self.hardware.get_bmc_v6address())
|
|
|
|
one_call = mock.call('Cannot process output of "%(cmd)s" '
|
|
|
|
'command: %(e)s', mock.ANY)
|
|
|
|
mocked_log.warning.assert_has_calls([one_call] * 14)
|
|
|
|
|
2021-02-25 14:20:59 +01:00
|
|
|
@mock.patch.object(il_utils, 'try_execute', autospec=True)
|
2019-03-04 15:31:14 +03:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_bmc_v6address_channel_7(self, mocked_execute, mte):
|
|
|
|
def side_effect(*args, **kwargs):
|
|
|
|
if not args[0].startswith('ipmitool lan6 print 7'):
|
|
|
|
# ipv6 is not enabled for channels 1-6
|
|
|
|
if 'enables |' in args[0]:
|
|
|
|
return '', ''
|
|
|
|
else:
|
|
|
|
if 'enables |' in args[0]:
|
|
|
|
return 'ipv6', ''
|
|
|
|
if args[0].endswith('dynamic_addr'):
|
|
|
|
raise processutils.ProcessExecutionError()
|
|
|
|
elif args[0].endswith('static_addr'):
|
2021-02-11 15:36:09 +01:00
|
|
|
return hws.IPMITOOL_LAN6_PRINT_STATIC_ADDR, ''
|
2019-03-04 15:31:14 +03:00
|
|
|
|
|
|
|
mocked_execute.side_effect = side_effect
|
|
|
|
self.assertEqual('2001:5678:5678:5678:5678:5678:5678:5678',
|
|
|
|
self.hardware.get_bmc_v6address())
|
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_no_configuration(self, mocked_execute):
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.validate_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-07-29 10:10:18 +02:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_do_create_configuration', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'delete_configuration', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'validate_configuration', autospec=True)
|
|
|
|
def test_apply_configuration(self, mocked_validate, mocked_delete,
|
|
|
|
mocked_create):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
result = self.hardware.apply_configuration(self.node, [], raid_config)
|
|
|
|
self.assertIs(result, mocked_create.return_value)
|
|
|
|
mocked_validate.assert_called_once_with(self.hardware, raid_config,
|
|
|
|
self.node)
|
|
|
|
mocked_delete.assert_called_once_with(self.hardware, self.node, [])
|
|
|
|
mocked_create.assert_called_once_with(self.hardware, self.node, [],
|
|
|
|
raid_config)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_do_create_configuration', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'delete_configuration', autospec=True)
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'validate_configuration', autospec=True)
|
|
|
|
def test_apply_configuration_no_delete(self, mocked_validate,
|
|
|
|
mocked_delete, mocked_create):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
result = self.hardware.apply_configuration(self.node, [], raid_config,
|
|
|
|
delete_existing=False)
|
|
|
|
self.assertIs(result, mocked_create.return_value)
|
|
|
|
mocked_validate.assert_called_once_with(self.hardware, raid_config,
|
|
|
|
self.node)
|
|
|
|
self.assertFalse(mocked_delete.called)
|
|
|
|
mocked_create.assert_called_once_with(self.hardware, self.node, [],
|
|
|
|
raid_config)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True)
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration(self, mocked_os_path_isdir, mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts, mocked_actual_comp):
|
Revert "Software raid: mbr/gpt partition table alternative"
This reverts commit 258d963e406c512bb90295c700ee22e4609abcd0.
Remove the mbr/gpt choice from softraid features for now, as it cannot be
directly used without additional commits, to be pushed soon. Furthermore, it
could even lead to instance spawn misconfiguration, if the disk label
specified in instance_info cannot fit with the boot mode + partitioning layout
(example: you build softraid over gpt, then you lose the mbr gap. Thus you need
an additional bios boot partition, in BIOS boot mode, or an esp, in UEFI boot
mode, outside the raid, for grub to install itself inthere, to be able to
assemble raid and find root device correctly).
Change-Id: I3a0a704ea99a40eb3fc8e879270dfbd356951488
2019-09-20 15:14:40 +00:00
|
|
|
node = self.node
|
2019-08-06 13:52:13 +02:00
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
2019-08-06 16:39:14 +02:00
|
|
|
"size_gb": "10",
|
2019-02-04 13:17:23 +01:00
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
2019-08-06 13:52:13 +02:00
|
|
|
node['target_raid_config'] = raid_config
|
2019-08-06 16:39:14 +02:00
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
2019-02-04 13:17:23 +01:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
2020-04-01 12:16:59 +02:00
|
|
|
mock_list_parts.side_effect = [
|
|
|
|
[],
|
|
|
|
processutils.ProcessExecutionError
|
|
|
|
]
|
2019-02-04 13:17:23 +01:00
|
|
|
|
2019-08-06 16:39:14 +02:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 13:52:13 +02:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
2019-08-06 13:52:13 +02:00
|
|
|
mocked_os_path_isdir.assert_has_calls([
|
|
|
|
mock.call('/sys/firmware/efi')
|
|
|
|
])
|
2019-02-04 13:17:23 +01:00
|
|
|
mocked_execute.assert_has_calls([
|
2019-08-06 13:52:13 +02:00
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'msdos'),
|
2019-08-06 16:39:14 +02:00
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
2019-08-06 13:52:13 +02:00
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'msdos'),
|
2019-08-06 16:39:14 +02:00
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
2019-06-05 16:42:37 +02:00
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
2019-02-04 13:17:23 +01:00
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
self.assertEqual(2, mock_list_parts.call_count)
|
|
|
|
mock_list_parts.assert_has_calls([
|
|
|
|
mock.call(x) for x in ['/dev/sda', '/dev/sdb']
|
|
|
|
])
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-20 16:41:28 +02:00
|
|
|
@mock.patch.object(utils, 'get_node_boot_mode', lambda node: 'bios')
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-11-17 13:27:36 -08:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
def test_create_configuration_raid_5(self, mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts, mocked_actual_comp):
|
2019-11-17 13:27:36 -08:00
|
|
|
node = self.node
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "5",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
device3 = hardware.BlockDevice('/dev/sdc', 'sdc', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2,
|
|
|
|
device3]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sdb
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, # mklabel sdc
|
|
|
|
('42', None), # sgdisk -F sdc
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sdc
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sdc
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1', '/dev/sdc1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2', '/dev/sdc2'),
|
|
|
|
]
|
|
|
|
|
2019-11-17 13:27:36 -08:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdc'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdc', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdc', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 3,
|
|
|
|
'/dev/sda1', '/dev/sdb1', '/dev/sdc1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '5', '--raid-devices', 3,
|
|
|
|
'/dev/sda2', '/dev/sdb2', '/dev/sdc2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-20 16:41:28 +02:00
|
|
|
@mock.patch.object(utils, 'get_node_boot_mode', lambda node: 'bios')
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-11-17 13:27:36 -08:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
def test_create_configuration_raid_6(self, mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts, mocked_actual_comp):
|
2019-11-17 13:27:36 -08:00
|
|
|
node = self.node
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "6",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
device3 = hardware.BlockDevice('/dev/sdc', 'sdc', 107374182400, True)
|
|
|
|
device4 = hardware.BlockDevice('/dev/sdd', 'sdd', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2,
|
|
|
|
device3, device4]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sdb
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, # mklabel sdc
|
|
|
|
('42', None), # sgdisk -F sdc
|
|
|
|
None, # mklabel sdd
|
|
|
|
('42', None), # sgdisk -F sdd
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sdc
|
|
|
|
None, None, # parted + partx sdd
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sdc
|
|
|
|
None, None, # parted + partx sdd
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1', '/dev/sdc1', '/dev/sdd1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2', '/dev/sdc2', '/dev/sdd2'),
|
|
|
|
]
|
|
|
|
|
2019-11-17 13:27:36 -08:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdc'),
|
|
|
|
mock.call('parted', '/dev/sdd', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdd'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdc', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdd', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdd', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdc', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdc', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdd', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdd', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 4,
|
|
|
|
'/dev/sda1', '/dev/sdb1', '/dev/sdc1', '/dev/sdd1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '6', '--raid-devices', 4,
|
|
|
|
'/dev/sda2', '/dev/sdb2', '/dev/sdc2', '/dev/sdd2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-08-06 16:39:14 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=True)
|
|
|
|
def test_create_configuration_efi(self, mocked_os_path_isdir,
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_execute, mock_list_parts,
|
|
|
|
mocked_actual_comp):
|
2019-08-06 13:52:13 +02:00
|
|
|
node = self.node
|
|
|
|
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
None, # mklabel sda
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 13:52:13 +02:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
mocked_os_path_isdir.assert_has_calls([
|
|
|
|
mock.call('/sys/firmware/efi')
|
|
|
|
])
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'gpt'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'gpt'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
2020-07-02 17:27:49 +02:00
|
|
|
'mkpart', 'primary', '551MiB', '10GiB'),
|
2019-08-06 13:52:13 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
2020-07-02 17:27:49 +02:00
|
|
|
'mkpart', 'primary', '551MiB', '10GiB'),
|
2019-08-06 13:52:13 +02:00
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_force_gpt_with_disk_label(
|
2020-10-02 21:36:46 +02:00
|
|
|
self, mocked_os_path_isdir, mocked_execute, mock_list_part,
|
|
|
|
mocked_actual_comp):
|
2019-08-06 13:52:13 +02:00
|
|
|
node = self.node
|
|
|
|
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
node['properties'] = {
|
|
|
|
'capabilities': {
|
|
|
|
'disk_label': 'gpt'
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
None, # mklabel sda
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 13:52:13 +02:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
mocked_os_path_isdir.assert_has_calls([
|
|
|
|
mock.call('/sys/firmware/efi')
|
|
|
|
])
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'gpt'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'gpt'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '8MiB', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '8MiB', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_no_max(self, _mocked_isdir, mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts, mocked_actual_comp):
|
2019-08-06 16:39:14 +02:00
|
|
|
node = self.node
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "20",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 16:39:14 +02:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
2020-10-02 21:36:46 +02:00
|
|
|
None, None, # mdadms
|
2019-08-06 16:39:14 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '30GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '30GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-08-06 16:39:14 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_max_is_first_logical(self, _mocked_isdir,
|
|
|
|
mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts,
|
|
|
|
mocked_actual_comp):
|
2019-08-06 16:39:14 +02:00
|
|
|
node = self.node
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "20",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 16:39:14 +02:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '20GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '20GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '20GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '20GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-20 16:41:28 +02:00
|
|
|
@mock.patch.object(utils, 'get_node_boot_mode', lambda node: 'bios')
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2020-03-16 13:16:05 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
def test_create_configuration_with_hints(self, mocked_execute,
|
2020-10-02 21:36:46 +02:00
|
|
|
mock_list_parts,
|
|
|
|
mocked_actual_comp):
|
2020-03-16 13:16:05 +01:00
|
|
|
node = self.node
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "10",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
"physical_disks": [
|
|
|
|
{'size': '>= 50'}
|
|
|
|
] * 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
"physical_disks": [
|
|
|
|
{'rotational': True}
|
|
|
|
] * 2,
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [
|
|
|
|
device1,
|
|
|
|
hardware.BlockDevice('/dev/sdc', 'sdc', 21474836480, False),
|
|
|
|
device2,
|
|
|
|
hardware.BlockDevice('/dev/sdd', 'sdd', 21474836480, False),
|
|
|
|
]
|
|
|
|
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # mklabel sda
|
|
|
|
('42', None), # sgdisk -F sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/sda1', '/dev/sdb1'),
|
|
|
|
('/dev/sda2', '/dev/sdb2'),
|
|
|
|
]
|
|
|
|
|
2020-03-16 13:16:05 +01:00
|
|
|
result = self.hardware.create_configuration(node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sda'),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel',
|
|
|
|
'msdos'),
|
|
|
|
mock.call('sgdisk', '-F', '/dev/sdb'),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '42s', '10GiB'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sda', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
|
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
|
|
|
mock.call('partx', '-u', '/dev/sdb', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/sda1', '/dev/sdb1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/sda2', '/dev/sdb2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
self.assertEqual(2, mock_list_parts.call_count)
|
|
|
|
mock_list_parts.assert_has_calls([
|
|
|
|
mock.call(x) for x in ['/dev/sda', '/dev/sdb']
|
|
|
|
])
|
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_invalid_raid_config(self,
|
|
|
|
mocked_os_path_is_dir,
|
|
|
|
mocked_execute):
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-03-16 13:16:05 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_create_configuration_invalid_hints(self, mocked_execute):
|
|
|
|
for hints in [
|
|
|
|
[],
|
|
|
|
[{'size': '>= 50'}], # more than one disk required,
|
|
|
|
"size >= 50",
|
|
|
|
[{'size': '>= 50'}, "size >= 50"],
|
|
|
|
]:
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
"physical_disks": hints,
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_create_configuration_mismatching_hints(self, mocked_execute):
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [
|
|
|
|
device1,
|
|
|
|
hardware.BlockDevice('/dev/sdc', 'sdc', 21474836480, False),
|
|
|
|
device2,
|
|
|
|
hardware.BlockDevice('/dev/sdd', 'sdd', 21474836480, False),
|
|
|
|
]
|
|
|
|
for hints in [
|
|
|
|
[{'size': '>= 150'}] * 2,
|
|
|
|
[{'name': '/dev/sda'}] * 2,
|
|
|
|
]:
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
"physical_disks": hints,
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError,
|
|
|
|
'No candidates',
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True)
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_partitions_detected(self,
|
|
|
|
mocked_os_path_is_dir,
|
|
|
|
mocked_execute,
|
2020-04-01 12:16:59 +02:00
|
|
|
mock_list_parts):
|
2019-08-06 13:52:13 +02:00
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
2020-04-01 12:16:59 +02:00
|
|
|
mock_list_parts.side_effect = [
|
|
|
|
[],
|
|
|
|
[{'partition_name': '/dev/sdb1'}],
|
|
|
|
]
|
2019-02-04 13:17:23 +01:00
|
|
|
self.node['target_raid_config'] = raid_config
|
2019-08-06 16:39:14 +02:00
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
2019-02-04 13:17:23 +01:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
2020-04-01 12:16:59 +02:00
|
|
|
self.hardware.list_block_devices.return_value = [
|
|
|
|
device1, device2
|
|
|
|
]
|
2019-11-17 13:27:36 -08:00
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=False)
|
|
|
|
def test_create_configuration_device_handling_failures(
|
|
|
|
self, mocked_os_path_is_dir, mocked_execute, mock_list_parts):
|
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
2019-08-06 16:39:14 +02:00
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
2019-02-04 13:17:23 +01:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.side_effect = [
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
2019-11-17 13:27:36 -08:00
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
2019-02-04 13:17:23 +01:00
|
|
|
[device1, device2]]
|
|
|
|
|
|
|
|
# partition table creation
|
|
|
|
error_regex = "Failed to create partition table on /dev/sda"
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
processutils.ProcessExecutionError]
|
2020-04-24 12:10:22 +02:00
|
|
|
self.assertRaisesRegex(errors.CommandExecutionError, error_regex,
|
2019-02-04 13:17:23 +01:00
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
# partition creation
|
|
|
|
error_regex = "Failed to create partitions on /dev/sda"
|
|
|
|
mocked_execute.side_effect = [
|
2019-08-06 16:39:14 +02:00
|
|
|
None, # partition tables on sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # partition tables on sdb
|
|
|
|
('42', None), # sgdisk -F sdb
|
2019-02-04 13:17:23 +01:00
|
|
|
processutils.ProcessExecutionError]
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
# raid device creation
|
|
|
|
error_regex = ("Failed to create md device /dev/md0 "
|
|
|
|
"on /dev/sda1 /dev/sdb1")
|
|
|
|
mocked_execute.side_effect = [
|
2019-08-06 16:39:14 +02:00
|
|
|
None, # partition tables on sda
|
|
|
|
('42', None), # sgdisk -F sda
|
|
|
|
None, # partition tables on sdb
|
|
|
|
('42', None), # sgdisk -F sdb
|
2019-08-06 11:18:29 +02:00
|
|
|
None, None, None, None, # RAID-1 partitions on sd{a,b} + partx
|
|
|
|
None, None, None, None, # RAID-N partitions on sd{a,b} + partx
|
|
|
|
processutils.ProcessExecutionError]
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-11-17 13:27:36 -08:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_create_configuration_device_handling_failures_raid5(
|
2020-04-01 12:16:59 +02:00
|
|
|
self, mocked_execute, mock_list_parts):
|
2019-11-17 13:27:36 -08:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "5",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.side_effect = [
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2]]
|
|
|
|
|
|
|
|
# validation configuration explicitly fails before any action
|
|
|
|
error_regex = ("Software RAID configuration is not possible for "
|
|
|
|
"RAID level 5 with only 2 block devices found.")
|
|
|
|
# Execute is actually called for listing_block_devices
|
|
|
|
self.assertFalse(mocked_execute.called)
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_create_configuration_device_handling_failures_raid6(
|
|
|
|
self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "6",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/sda', 'sda', 107374182400, True)
|
|
|
|
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 107374182400, True)
|
|
|
|
device3 = hardware.BlockDevice('/dev/sdc', 'sdc', 107374182400, True)
|
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.side_effect = [
|
|
|
|
[device1, device2, device3],
|
|
|
|
[device1, device2, device3]]
|
2020-10-02 21:36:46 +02:00
|
|
|
|
2019-11-17 13:27:36 -08:00
|
|
|
# pre-creation validation fails as insufficent number of devices found
|
|
|
|
error_regex = ("Software RAID configuration is not possible for "
|
|
|
|
"RAID level 6 with only 3 block devices found.")
|
2020-10-02 21:36:46 +02:00
|
|
|
|
2019-11-17 13:27:36 -08:00
|
|
|
# Execute is actually called for listing_block_devices
|
|
|
|
self.assertFalse(mocked_execute.called)
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2019-07-25 17:13:08 +02:00
|
|
|
def test_create_configuration_empty_target_raid_config(self):
|
|
|
|
self.node['target_raid_config'] = {}
|
|
|
|
result = self.hardware.create_configuration(self.node, [])
|
|
|
|
self.assertEqual(result, {})
|
|
|
|
|
2021-02-10 13:48:29 +01:00
|
|
|
@mock.patch.object(raid_utils, '_get_actual_component_devices',
|
2020-10-02 21:36:46 +02:00
|
|
|
autospec=True)
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-08-06 11:18:29 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=True)
|
|
|
|
def test_create_configuration_with_nvme(self, mocked_os_path_isdir,
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_execute, mock_list_parts,
|
|
|
|
mocked_actual_comp):
|
2019-08-06 11:18:29 +02:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
2019-08-06 16:39:14 +02:00
|
|
|
"size_gb": "10",
|
2019-08-06 11:18:29 +02:00
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/nvme0n1', 'nvme0n1',
|
2019-08-06 16:39:14 +02:00
|
|
|
107374182400, True)
|
2019-08-06 11:18:29 +02:00
|
|
|
device2 = hardware.BlockDevice('/dev/nvme1n1', 'nvme1n1',
|
2019-08-06 16:39:14 +02:00
|
|
|
107374182400, True)
|
2019-08-06 11:18:29 +02:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.return_value = [device1, device2]
|
|
|
|
|
2019-08-06 16:39:14 +02:00
|
|
|
mocked_execute.side_effect = [
|
|
|
|
None, # mklabel sda
|
|
|
|
None, # mklabel sda
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None, # parted + partx sda
|
|
|
|
None, None, # parted + partx sdb
|
|
|
|
None, None # mdadms
|
|
|
|
]
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
mocked_actual_comp.side_effect = [
|
|
|
|
('/dev/nvme0n1p1', '/dev/nvme1n1p1'),
|
|
|
|
('/dev/nvme0n1p2', '/dev/nvme1n1p2'),
|
|
|
|
]
|
|
|
|
|
2019-08-06 11:18:29 +02:00
|
|
|
result = self.hardware.create_configuration(self.node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('parted', '/dev/nvme0n1', '-s', '--', 'mklabel',
|
2019-08-06 13:52:13 +02:00
|
|
|
'gpt'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('parted', '/dev/nvme1n1', '-s', '--', 'mklabel',
|
2019-08-06 13:52:13 +02:00
|
|
|
'gpt'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('parted', '/dev/nvme0n1', '-s', '-a', 'optimal', '--',
|
2020-07-02 17:27:49 +02:00
|
|
|
'mkpart', 'primary', '551MiB', '10GiB'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/nvme0n1', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/nvme1n1', '-s', '-a', 'optimal', '--',
|
2020-07-02 17:27:49 +02:00
|
|
|
'mkpart', 'primary', '551MiB', '10GiB'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/nvme1n1', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/nvme0n1', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/nvme0n1', check_exit_code=False),
|
|
|
|
mock.call('parted', '/dev/nvme1n1', '-s', '-a', 'optimal', '--',
|
2019-08-06 16:39:14 +02:00
|
|
|
'mkpart', 'primary', '10GiB', '-1'),
|
2019-08-06 11:18:29 +02:00
|
|
|
mock.call('partx', '-u', '/dev/nvme1n1', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md0', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '1', '--raid-devices', 2,
|
|
|
|
'/dev/nvme0n1p1', '/dev/nvme1n1p1'),
|
|
|
|
mock.call('mdadm', '--create', '/dev/md1', '--force', '--run',
|
|
|
|
'--metadata=1', '--level', '0', '--raid-devices', 2,
|
|
|
|
'/dev/nvme0n1p2', '/dev/nvme1n1p2')])
|
|
|
|
self.assertEqual(raid_config, result)
|
|
|
|
|
2020-04-01 12:16:59 +02:00
|
|
|
@mock.patch.object(disk_utils, 'list_partitions', autospec=True,
|
|
|
|
return_value=[])
|
2019-08-06 11:18:29 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-08-06 13:52:13 +02:00
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True, return_value=True)
|
|
|
|
def test_create_configuration_failure_with_nvme(self,
|
|
|
|
mocked_os_path_isdir,
|
|
|
|
mocked_execute,
|
2020-04-01 12:16:59 +02:00
|
|
|
mock_list_parts):
|
2019-08-06 11:18:29 +02:00
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.node['target_raid_config'] = raid_config
|
|
|
|
device1 = hardware.BlockDevice('/dev/nvme0n1', 'nvme0n1',
|
2019-08-06 16:39:14 +02:00
|
|
|
107374182400, True)
|
2019-08-06 11:18:29 +02:00
|
|
|
device2 = hardware.BlockDevice('/dev/nvme1n1', 'nvme1n1',
|
2019-08-06 16:39:14 +02:00
|
|
|
107374182400, True)
|
2019-08-06 11:18:29 +02:00
|
|
|
self.hardware.list_block_devices = mock.Mock()
|
|
|
|
self.hardware.list_block_devices.side_effect = [
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
2019-11-17 13:27:36 -08:00
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
|
|
|
[device1, device2],
|
2019-08-06 11:18:29 +02:00
|
|
|
[device1, device2]]
|
|
|
|
|
|
|
|
# partition table creation
|
|
|
|
error_regex = "Failed to create partition table on /dev/nvme0n1"
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
processutils.ProcessExecutionError]
|
2020-04-24 12:10:22 +02:00
|
|
|
self.assertRaisesRegex(errors.CommandExecutionError, error_regex,
|
2019-08-06 11:18:29 +02:00
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
# partition creation
|
|
|
|
error_regex = "Failed to create partitions on /dev/nvme0n1"
|
|
|
|
mocked_execute.side_effect = [
|
2019-08-06 16:39:14 +02:00
|
|
|
None, # partition tables on sda
|
|
|
|
None, # partition tables on sdb
|
2019-08-06 11:18:29 +02:00
|
|
|
processutils.ProcessExecutionError]
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
# raid device creation
|
|
|
|
error_regex = ("Failed to create md device /dev/md0 "
|
|
|
|
"on /dev/nvme0n1p1 /dev/nvme1n1p1")
|
|
|
|
mocked_execute.side_effect = [
|
2019-08-06 16:39:14 +02:00
|
|
|
None, # partition tables on sda
|
|
|
|
None, # partition tables on sdb
|
2019-08-06 11:18:29 +02:00
|
|
|
None, None, None, None, # RAID-1 partitions on sd{a,b} + partx
|
|
|
|
None, None, None, None, # RAID-N partitions on sd{a,b} + partx
|
2019-02-04 13:17:23 +01:00
|
|
|
processutils.ProcessExecutionError]
|
|
|
|
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
|
|
|
|
self.hardware.create_configuration,
|
|
|
|
self.node, [])
|
|
|
|
|
2020-10-02 21:36:46 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2020-09-24 16:25:01 +02:00
|
|
|
def test__get_md_uuid(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.side_effect = [(hws.MDADM_DETAIL_OUTPUT, '')]
|
2020-09-24 16:25:01 +02:00
|
|
|
md_uuid = hardware._get_md_uuid('/dev/md0')
|
|
|
|
self.assertEqual('83143055:2781ddf5:2c8f44c7:9b45d92e', md_uuid)
|
2019-02-04 13:17:23 +01:00
|
|
|
|
2020-09-24 16:25:01 +02:00
|
|
|
@mock.patch.object(hardware, '_get_md_uuid', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
2019-07-12 14:50:14 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2020-09-24 16:25:01 +02:00
|
|
|
def test__get_component_devices(self, mocked_execute,
|
|
|
|
mocked_list_all_block_devices,
|
|
|
|
mocked_md_uuid):
|
|
|
|
raid_device1 = hardware.BlockDevice('/dev/md0', 'RAID-1',
|
|
|
|
107374182400, True)
|
|
|
|
sda = hardware.BlockDevice('/dev/sda', 'model12', 21, True)
|
|
|
|
sdz = hardware.BlockDevice('/dev/sdz', 'model12', 21, True)
|
|
|
|
sda1 = hardware.BlockDevice('/dev/sda1', 'model12', 21, True)
|
|
|
|
sdz1 = hardware.BlockDevice('/dev/sdz1', 'model12', 21, True)
|
|
|
|
|
|
|
|
mocked_md_uuid.return_value = '83143055:2781ddf5:2c8f44c7:9b45d92e'
|
|
|
|
hardware.list_all_block_devices.side_effect = [
|
|
|
|
[sda, sdz], # list_all_block_devices
|
|
|
|
[sda1, sdz1], # list_all_block_devices partitions
|
|
|
|
]
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
['mdadm --examine output for sda', '_'],
|
|
|
|
['mdadm --examine output for sdz', '_'],
|
2021-02-11 15:36:09 +01:00
|
|
|
[hws.MDADM_EXAMINE_OUTPUT_MEMBER, '_'],
|
|
|
|
[hws.MDADM_EXAMINE_OUTPUT_NON_MEMBER, '_'],
|
2020-09-24 16:25:01 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
component_devices = hardware._get_component_devices(raid_device1)
|
|
|
|
self.assertEqual(['/dev/sda1'], component_devices)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('mdadm', '--examine', '/dev/sda',
|
|
|
|
use_standard_locale=True),
|
|
|
|
mock.call('mdadm', '--examine', '/dev/sdz',
|
|
|
|
use_standard_locale=True),
|
|
|
|
mock.call('mdadm', '--examine', '/dev/sda1',
|
|
|
|
use_standard_locale=True),
|
|
|
|
mock.call('mdadm', '--examine', '/dev/sdz1',
|
|
|
|
use_standard_locale=True)])
|
2019-07-12 14:50:14 +02:00
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-02-26 17:56:20 +01:00
|
|
|
def test_get_holder_disks(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.side_effect = [(hws.MDADM_DETAIL_OUTPUT, '')]
|
2019-08-09 14:59:53 +02:00
|
|
|
holder_disks = hardware.get_holder_disks('/dev/md0')
|
2019-02-04 13:17:23 +01:00
|
|
|
self.assertEqual(['/dev/vde', '/dev/vdf'], holder_disks)
|
|
|
|
|
2021-02-26 21:42:18 -05:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
@mock.patch.object(os.path, 'exists', autospec=True)
|
|
|
|
@mock.patch.object(os, 'stat', autospec=True)
|
|
|
|
def test_get_holder_disks_with_whole_device(self, mocked_stat,
|
|
|
|
mocked_exists,
|
|
|
|
mocked_execute):
|
|
|
|
mocked_execute.side_effect = [(hws.MDADM_DETAIL_OUTPUT_WHOLE_DEVICE,
|
|
|
|
'')]
|
|
|
|
mocked_exists.return_value = True
|
|
|
|
mocked_stat.return_value.st_mode = stat.S_IFBLK
|
|
|
|
holder_disks = hardware.get_holder_disks('/dev/md0')
|
|
|
|
self.assertEqual(['/dev/vde', '/dev/vdf'], holder_disks)
|
|
|
|
|
2019-08-09 14:59:53 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_holder_disks_with_nvme(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.side_effect = [(hws.MDADM_DETAIL_OUTPUT_NVME, '')]
|
2019-08-09 14:59:53 +02:00
|
|
|
holder_disks = hardware.get_holder_disks('/dev/md0')
|
|
|
|
self.assertEqual(['/dev/nvme0n1', '/dev/nvme1n1'], holder_disks)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_holder_disks_unexpected_devices(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
side_effect = hws.MDADM_DETAIL_OUTPUT_NVME.replace('nvme1n1p1',
|
|
|
|
'notmatching1a')
|
2019-08-09 14:59:53 +02:00
|
|
|
mocked_execute.side_effect = [(side_effect, '')]
|
|
|
|
self.assertRaisesRegex(
|
|
|
|
errors.SoftwareRAIDError,
|
|
|
|
r'^Software RAID caused unknown error: Could not get holder disks '
|
|
|
|
r'of /dev/md0: unexpected pattern for partition '
|
|
|
|
r'/dev/notmatching1a$',
|
|
|
|
hardware.get_holder_disks, '/dev/md0')
|
|
|
|
|
2019-07-12 14:50:14 +02:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_holder_disks_broken_raid0(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.side_effect = [(hws.MDADM_DETAIL_OUTPUT_BROKEN_RAID0,
|
|
|
|
'')]
|
2019-08-09 14:59:53 +02:00
|
|
|
holder_disks = hardware.get_holder_disks('/dev/md126')
|
2019-07-12 14:50:14 +02:00
|
|
|
self.assertEqual(['/dev/sda'], holder_disks)
|
|
|
|
|
2019-06-07 14:22:38 +02:00
|
|
|
@mock.patch.object(hardware, 'get_holder_disks', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_get_component_devices', autospec=True)
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2019-06-07 14:22:38 +02:00
|
|
|
def test_delete_configuration(self, mocked_execute, mocked_list,
|
|
|
|
mocked_get_component, mocked_get_holder):
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_device1 = hardware.BlockDevice('/dev/md0', 'RAID-1',
|
2019-08-06 16:39:14 +02:00
|
|
|
107374182400, True)
|
2019-02-04 13:17:23 +01:00
|
|
|
raid_device2 = hardware.BlockDevice('/dev/md1', 'RAID-0',
|
|
|
|
2147483648, True)
|
2019-09-12 19:01:32 +02:00
|
|
|
sda = hardware.BlockDevice('/dev/sda', 'model12', 21, True)
|
|
|
|
sdb = hardware.BlockDevice('/dev/sdb', 'model12', 21, True)
|
|
|
|
sdc = hardware.BlockDevice('/dev/sdc', 'model12', 21, True)
|
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
hardware.list_all_block_devices.side_effect = [
|
2019-09-12 19:01:32 +02:00
|
|
|
[raid_device1, raid_device2], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (md)
|
2019-09-12 19:01:32 +02:00
|
|
|
[sda, sdb, sdc], # list_all_block_devices disks
|
|
|
|
[], # list_all_block_devices parts
|
|
|
|
[], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (md)
|
2019-09-12 19:01:32 +02:00
|
|
|
]
|
2019-06-07 14:22:38 +02:00
|
|
|
mocked_get_component.side_effect = [
|
2019-09-12 19:01:32 +02:00
|
|
|
["/dev/sda1", "/dev/sdb1"],
|
|
|
|
["/dev/sda2", "/dev/sdb2"]]
|
2019-06-07 14:22:38 +02:00
|
|
|
mocked_get_holder.side_effect = [
|
2019-02-04 13:17:23 +01:00
|
|
|
["/dev/sda", "/dev/sdb"],
|
|
|
|
["/dev/sda", "/dev/sdb"]]
|
|
|
|
mocked_execute.side_effect = [
|
2019-09-12 19:01:32 +02:00
|
|
|
None, # mdadm --assemble --scan
|
|
|
|
None, # wipefs md0
|
|
|
|
None, # mdadm --stop md0
|
2019-02-04 13:17:23 +01:00
|
|
|
['_', 'mdadm --examine output for sda1'],
|
2019-09-12 19:01:32 +02:00
|
|
|
None, # mdadm zero-superblock sda1
|
2019-02-04 13:17:23 +01:00
|
|
|
['_', 'mdadm --examine output for sdb1'],
|
2019-09-12 19:01:32 +02:00
|
|
|
None, # mdadm zero-superblock sdb1
|
|
|
|
None, # wipefs sda
|
|
|
|
None, # wipefs sdb
|
|
|
|
None, # wipfs md1
|
|
|
|
None, # mdadm --stop md1
|
2019-02-04 13:17:23 +01:00
|
|
|
['_', 'mdadm --examine output for sda2'],
|
2019-09-12 19:01:32 +02:00
|
|
|
None, # mdadm zero-superblock sda2
|
2019-02-04 13:17:23 +01:00
|
|
|
['_', 'mdadm --examine output for sdb2'],
|
2019-09-12 19:01:32 +02:00
|
|
|
None, # mdadm zero-superblock sdb2
|
|
|
|
None, # wipefs sda
|
|
|
|
None, # wipefs sda
|
|
|
|
['_', 'mdadm --examine output for sdc'],
|
|
|
|
None, # mdadm zero-superblock sdc
|
|
|
|
# examine sdb
|
|
|
|
processutils.ProcessExecutionError('No md superblock detected'),
|
|
|
|
# examine sda
|
|
|
|
processutils.ProcessExecutionError('No md superblock detected'),
|
|
|
|
None, # mdadm --assemble --scan
|
|
|
|
]
|
2019-02-04 13:17:23 +01:00
|
|
|
|
|
|
|
self.hardware.delete_configuration(self.node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('wipefs', '-af', '/dev/md0'),
|
|
|
|
mock.call('mdadm', '--stop', '/dev/md0'),
|
2019-06-06 17:31:59 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sda1',
|
|
|
|
use_standard_locale=True),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sda1'),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sdb1',
|
2019-06-06 17:31:59 +02:00
|
|
|
use_standard_locale=True),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sdb1'),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('wipefs', '-af', '/dev/md1'),
|
|
|
|
mock.call('mdadm', '--stop', '/dev/md1'),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sda2',
|
2019-06-06 17:31:59 +02:00
|
|
|
use_standard_locale=True),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sda2'),
|
2019-06-06 17:31:59 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sdb2',
|
|
|
|
use_standard_locale=True),
|
2019-02-04 13:17:23 +01:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sdb2'),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sdc',
|
|
|
|
use_standard_locale=True),
|
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sdc'),
|
|
|
|
mock.call('mdadm', '--examine', '/dev/sdb',
|
|
|
|
use_standard_locale=True),
|
2020-09-28 10:31:15 +02:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sdb'),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--examine', '/dev/sda',
|
|
|
|
use_standard_locale=True),
|
2020-09-28 10:31:15 +02:00
|
|
|
mock.call('mdadm', '--zero-superblock', '/dev/sda'),
|
|
|
|
mock.call('wipefs', '-af', '/dev/sda'),
|
|
|
|
mock.call('wipefs', '-af', '/dev/sdb'),
|
2019-09-12 19:01:32 +02:00
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
])
|
2019-02-04 13:17:23 +01:00
|
|
|
|
2019-07-26 14:47:15 +02:00
|
|
|
@mock.patch.object(hardware, '_get_component_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_delete_configuration_partition(self, mocked_execute, mocked_list,
|
|
|
|
mocked_get_component):
|
2019-09-12 19:01:32 +02:00
|
|
|
# This test checks that if no components are returned for a given
|
|
|
|
# raid device, then it must be a nested partition and so it gets
|
|
|
|
# skipped
|
2019-07-26 14:47:15 +02:00
|
|
|
raid_device1_part1 = hardware.BlockDevice('/dev/md0p1', 'RAID-1',
|
|
|
|
1073741824, True)
|
2019-09-12 19:01:32 +02:00
|
|
|
hardware.list_all_block_devices.side_effect = [
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid
|
|
|
|
[raid_device1_part1], # list_all_block_devices raid (md)
|
2019-09-12 19:01:32 +02:00
|
|
|
[], # list_all_block_devices disks
|
|
|
|
[], # list_all_block_devices parts
|
|
|
|
[], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (md)
|
2019-09-12 19:01:32 +02:00
|
|
|
]
|
2019-07-26 14:47:15 +02:00
|
|
|
mocked_get_component.return_value = []
|
|
|
|
self.assertIsNone(self.hardware.delete_configuration(self.node, []))
|
2019-09-12 19:01:32 +02:00
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
])
|
|
|
|
|
|
|
|
@mock.patch.object(hardware, '_get_component_devices', autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_delete_configuration_failure_blocks_remaining(
|
|
|
|
self, mocked_execute, mocked_list, mocked_get_component):
|
|
|
|
|
|
|
|
# This test checks that, if after two raid clean passes there still
|
|
|
|
# remain softraid hints on drives, then the delete_configuration call
|
|
|
|
# raises an error
|
|
|
|
raid_device1 = hardware.BlockDevice('/dev/md0', 'RAID-1',
|
|
|
|
107374182400, True)
|
|
|
|
|
|
|
|
hardware.list_all_block_devices.side_effect = [
|
|
|
|
[raid_device1], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (type md)
|
2019-09-12 19:01:32 +02:00
|
|
|
[], # list_all_block_devices disks
|
|
|
|
[], # list_all_block_devices parts
|
|
|
|
[raid_device1], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (type md)
|
2019-09-12 19:01:32 +02:00
|
|
|
[], # list_all_block_devices disks
|
|
|
|
[], # list_all_block_devices parts
|
|
|
|
[raid_device1], # list_all_block_devices raid
|
2020-07-09 14:11:28 +01:00
|
|
|
[], # list_all_block_devices raid (type md)
|
2019-09-12 19:01:32 +02:00
|
|
|
]
|
|
|
|
mocked_get_component.return_value = []
|
|
|
|
|
|
|
|
self.assertRaisesRegex(
|
|
|
|
errors.SoftwareRAIDError,
|
|
|
|
r"^Software RAID caused unknown error: Unable to clean all "
|
|
|
|
r"softraid correctly. Remaining \['/dev/md0'\]$",
|
|
|
|
self.hardware.delete_configuration, self.node, [])
|
|
|
|
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
mock.call('mdadm', '--assemble', '--scan', check_exit_code=False),
|
|
|
|
])
|
2019-07-26 14:47:15 +02:00
|
|
|
|
2019-02-04 13:17:23 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_valid_raid1(self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
2019-06-06 17:31:59 +02:00
|
|
|
self.assertIsNone(self.hardware.validate_configuration(raid_config,
|
|
|
|
self.node))
|
2019-02-04 13:17:23 +01:00
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_valid_raid1_raidN(self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "100",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
2019-06-06 17:31:59 +02:00
|
|
|
self.assertIsNone(self.hardware.validate_configuration(raid_config,
|
|
|
|
self.node))
|
2019-02-04 13:17:23 +01:00
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_invalid_MAX_MAX(self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.validate_configuration,
|
|
|
|
raid_config, self.node)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_invalid_raid_level(self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "42",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.validate_configuration,
|
|
|
|
raid_config, self.node)
|
|
|
|
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_validate_configuration_invalid_no_of_raids(self, mocked_execute):
|
|
|
|
raid_config = {
|
|
|
|
"logical_disks": [
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"size_gb": "MAX",
|
|
|
|
"raid_level": "1+0",
|
|
|
|
"controller": "software",
|
|
|
|
},
|
|
|
|
]
|
|
|
|
}
|
|
|
|
self.assertRaises(errors.SoftwareRAIDError,
|
|
|
|
self.hardware.validate_configuration,
|
|
|
|
raid_config, self.node)
|
|
|
|
|
2017-03-19 08:30:25 -07:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2016-02-01 17:18:30 +09:00
|
|
|
def test_get_system_vendor_info(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = hws.LSHW_JSON_OUTPUT_V1
|
2017-11-30 16:30:42 +00:00
|
|
|
vendor_info = self.hardware.get_system_vendor_info()
|
|
|
|
self.assertEqual('ABC123 (GENERIC_SERVER)', vendor_info.product_name)
|
|
|
|
self.assertEqual('1234567', vendor_info.serial_number)
|
|
|
|
self.assertEqual('GENERIC', vendor_info.manufacturer)
|
|
|
|
|
2020-11-16 14:51:53 +01:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_system_vendor_info_lshw_list(self, mocked_execute):
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (f"[{hws.LSHW_JSON_OUTPUT_V2[0]}]", "")
|
2020-11-16 14:51:53 +01:00
|
|
|
vendor_info = self.hardware.get_system_vendor_info()
|
|
|
|
self.assertEqual('ABCD', vendor_info.product_name)
|
|
|
|
self.assertEqual('1234', vendor_info.serial_number)
|
|
|
|
self.assertEqual('ABCD', vendor_info.manufacturer)
|
|
|
|
|
2017-11-30 16:30:42 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_get_system_vendor_info_failure(self, mocked_execute):
|
|
|
|
mocked_execute.side_effect = processutils.ProcessExecutionError()
|
|
|
|
vendor_info = self.hardware.get_system_vendor_info()
|
|
|
|
self.assertEqual('', vendor_info.product_name)
|
|
|
|
self.assertEqual('', vendor_info.serial_number)
|
|
|
|
self.assertEqual('', vendor_info.manufacturer)
|
2016-02-01 17:18:30 +09:00
|
|
|
|
2019-08-09 10:17:54 +02:00
|
|
|
@mock.patch.object(utils, 'get_agent_params',
|
|
|
|
lambda: {'BOOTIF': 'boot:if'})
|
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True)
|
|
|
|
def test_get_boot_info_pxe_interface(self, mocked_isdir):
|
|
|
|
mocked_isdir.return_value = False
|
|
|
|
result = self.hardware.get_boot_info()
|
|
|
|
self.assertEqual(hardware.BootInfo(current_boot_mode='bios',
|
|
|
|
pxe_interface='boot:if'),
|
|
|
|
result)
|
|
|
|
|
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True)
|
|
|
|
def test_get_boot_info_bios(self, mocked_isdir):
|
|
|
|
mocked_isdir.return_value = False
|
|
|
|
result = self.hardware.get_boot_info()
|
|
|
|
self.assertEqual(hardware.BootInfo(current_boot_mode='bios'), result)
|
|
|
|
mocked_isdir.assert_called_once_with('/sys/firmware/efi')
|
|
|
|
|
|
|
|
@mock.patch.object(os.path, 'isdir', autospec=True)
|
|
|
|
def test_get_boot_info_uefi(self, mocked_isdir):
|
|
|
|
mocked_isdir.return_value = True
|
|
|
|
result = self.hardware.get_boot_info()
|
|
|
|
self.assertEqual(hardware.BootInfo(current_boot_mode='uefi'), result)
|
|
|
|
mocked_isdir.assert_called_once_with('/sys/firmware/efi')
|
|
|
|
|
2021-01-22 10:20:15 +10:00
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_nvme_crypto_success(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
|
|
|
info = self.node['driver_internal_info']
|
|
|
|
info['agent_enable_nvme_erase'] = True
|
|
|
|
info['agent_continue_if_secure_erase_failed'] = True
|
|
|
|
mocked_raid_member.return_value = False
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hws.NVME_CLI_INFO_TEMPLATE_CRYPTO_SUPPORTED, ''),
|
|
|
|
('', ''),
|
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/nvme0n1', "testdisk",
|
|
|
|
1073741824, False)
|
|
|
|
retval = self.hardware._nvme_erase(block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('nvme', 'id-ctrl', '/dev/nvme0n1', '-o', 'json'),
|
2021-03-02 15:37:35 +10:00
|
|
|
mock.call('nvme', 'format', '/dev/nvme0n1', '-s', 2, '-f'),
|
2021-01-22 10:20:15 +10:00
|
|
|
])
|
|
|
|
|
|
|
|
self.assertTrue(retval)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_nvme_userdata_success(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
|
|
|
info = self.node['driver_internal_info']
|
|
|
|
info['agent_enable_nvme_erase'] = True
|
|
|
|
info['agent_continue_if_secure_erase_failed'] = True
|
|
|
|
mocked_raid_member.return_value = False
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hws.NVME_CLI_INFO_TEMPLATE_USERDATA_SUPPORTED, ''),
|
|
|
|
('', ''),
|
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/nvme0n1', "testdisk",
|
|
|
|
1073741824, False)
|
|
|
|
retval = self.hardware._nvme_erase(block_device)
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('nvme', 'id-ctrl', '/dev/nvme0n1', '-o', 'json'),
|
2021-03-02 15:37:35 +10:00
|
|
|
mock.call('nvme', 'format', '/dev/nvme0n1', '-s', 1, '-f'),
|
2021-01-22 10:20:15 +10:00
|
|
|
])
|
|
|
|
|
|
|
|
self.assertTrue(retval)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_nvme_failed(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
|
|
|
info = self.node['driver_internal_info']
|
|
|
|
info['agent_enable_nvme_erase'] = True
|
|
|
|
mocked_raid_member.return_value = False
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hws.NVME_CLI_INFO_TEMPLATE_CRYPTO_SUPPORTED, ''),
|
|
|
|
(processutils.ProcessExecutionError()),
|
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/nvme0n1', "testdisk",
|
|
|
|
1073741824, False)
|
|
|
|
self.assertRaises(errors.BlockDeviceEraseError,
|
|
|
|
self.hardware._nvme_erase, block_device)
|
|
|
|
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'_is_linux_raid_member', autospec=True)
|
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
|
|
|
def test_erase_block_device_nvme_format_unsupported(self, mocked_execute,
|
|
|
|
mocked_raid_member):
|
|
|
|
info = self.node['driver_internal_info']
|
|
|
|
info['agent_enable_nvme_erase'] = True
|
|
|
|
mocked_raid_member.return_value = False
|
|
|
|
mocked_execute.side_effect = [
|
|
|
|
(hws.NVME_CLI_INFO_TEMPLATE_FORMAT_UNSUPPORTED, ''),
|
|
|
|
]
|
|
|
|
|
|
|
|
block_device = hardware.BlockDevice('/dev/nvme0n1', "testdisk",
|
|
|
|
1073741824, False)
|
|
|
|
self.assertRaises(errors.BlockDeviceEraseError,
|
|
|
|
self.hardware._nvme_erase, block_device)
|
|
|
|
|
2019-08-09 10:17:54 +02:00
|
|
|
|
|
|
|
@mock.patch.object(hardware.GenericHardwareManager,
|
|
|
|
'get_os_install_device', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_md_scan_and_assemble', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_check_for_iscsi', autospec=True)
|
|
|
|
@mock.patch.object(time, 'sleep', autospec=True)
|
|
|
|
class TestEvaluateHardwareSupport(base.IronicAgentTest):
|
|
|
|
def setUp(self):
|
|
|
|
super(TestEvaluateHardwareSupport, self).setUp()
|
|
|
|
self.hardware = hardware.GenericHardwareManager()
|
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_waits_for_disks(
|
2019-08-09 10:17:54 +02:00
|
|
|
self, mocked_sleep, mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble, mocked_get_inst_dev):
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.side_effect = [
|
2016-05-24 10:04:12 +02:00
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
None
|
|
|
|
]
|
|
|
|
|
|
|
|
result = self.hardware.evaluate_hardware_support()
|
|
|
|
|
2016-06-09 09:23:29 -07:00
|
|
|
self.assertTrue(mocked_check_for_iscsi.called)
|
2019-08-09 10:17:54 +02:00
|
|
|
self.assertTrue(mocked_md_assemble.called)
|
2016-05-24 10:04:12 +02:00
|
|
|
self.assertEqual(hardware.HardwareSupport.GENERIC, result)
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.assert_called_with(mock.ANY)
|
|
|
|
self.assertEqual(2, mocked_get_inst_dev.call_count)
|
2016-06-02 13:34:10 -04:00
|
|
|
mocked_sleep.assert_called_once_with(CONF.disk_wait_delay)
|
|
|
|
|
2017-10-31 10:14:25 -04:00
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_no_wait_for_disks(
|
2019-08-09 10:17:54 +02:00
|
|
|
self, mocked_log, mocked_sleep, mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble, mocked_get_inst_dev):
|
2017-03-09 14:02:06 +00:00
|
|
|
CONF.set_override('disk_wait_attempts', '0')
|
|
|
|
|
|
|
|
result = self.hardware.evaluate_hardware_support()
|
|
|
|
|
|
|
|
self.assertTrue(mocked_check_for_iscsi.called)
|
|
|
|
self.assertEqual(hardware.HardwareSupport.GENERIC, result)
|
|
|
|
self.assertFalse(mocked_get_inst_dev.called)
|
|
|
|
self.assertFalse(mocked_sleep.called)
|
2017-10-31 10:14:25 -04:00
|
|
|
self.assertFalse(mocked_log.called)
|
2017-03-09 14:02:06 +00:00
|
|
|
|
2017-10-31 10:14:25 -04:00
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_waits_for_disks_nonconfigured(
|
2019-08-09 10:17:54 +02:00
|
|
|
self, mocked_log, mocked_sleep, mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble, mocked_get_inst_dev):
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.side_effect = [
|
2016-06-02 13:34:10 -04:00
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
None
|
|
|
|
]
|
|
|
|
|
|
|
|
self.hardware.evaluate_hardware_support()
|
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.assert_called_with(mock.ANY)
|
|
|
|
self.assertEqual(10, mocked_get_inst_dev.call_count)
|
|
|
|
expected_calls = [mock.call(CONF.disk_wait_delay)] * 9
|
|
|
|
mocked_sleep.assert_has_calls(expected_calls)
|
2017-10-31 10:14:25 -04:00
|
|
|
mocked_log.warning.assert_called_once_with(
|
|
|
|
'The root device was not detected in %d seconds',
|
|
|
|
CONF.disk_wait_delay * 9)
|
2016-06-02 13:34:10 -04:00
|
|
|
|
2017-10-31 10:14:25 -04:00
|
|
|
@mock.patch.object(hardware, 'LOG', autospec=True)
|
2019-08-09 10:17:54 +02:00
|
|
|
def test_evaluate_hw_waits_for_disks_configured(self, mocked_log,
|
|
|
|
mocked_sleep,
|
|
|
|
mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble,
|
|
|
|
mocked_get_inst_dev):
|
2017-10-31 10:14:25 -04:00
|
|
|
CONF.set_override('disk_wait_attempts', '1')
|
2016-06-02 13:34:10 -04:00
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.side_effect = [
|
2016-06-02 13:34:10 -04:00
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
errors.DeviceNotFound('boom'),
|
|
|
|
None
|
|
|
|
]
|
|
|
|
|
|
|
|
self.hardware.evaluate_hardware_support()
|
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.assert_called_with(mock.ANY)
|
2017-10-31 10:14:25 -04:00
|
|
|
self.assertEqual(1, mocked_get_inst_dev.call_count)
|
|
|
|
self.assertFalse(mocked_sleep.called)
|
|
|
|
mocked_log.warning.assert_called_once_with(
|
|
|
|
'The root device was not detected')
|
2016-06-02 13:34:10 -04:00
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_disks_timeout_unconfigured(self, mocked_sleep,
|
2019-08-09 10:17:54 +02:00
|
|
|
mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble,
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev):
|
|
|
|
mocked_get_inst_dev.side_effect = errors.DeviceNotFound('boom')
|
2016-06-02 13:34:10 -04:00
|
|
|
self.hardware.evaluate_hardware_support()
|
|
|
|
mocked_sleep.assert_called_with(3)
|
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_disks_timeout_configured(self, mocked_sleep,
|
2019-08-09 10:17:54 +02:00
|
|
|
mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble,
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_root_dev):
|
2017-06-14 13:47:29 +08:00
|
|
|
CONF.set_override('disk_wait_delay', '5')
|
2016-06-02 13:34:10 -04:00
|
|
|
mocked_root_dev.side_effect = errors.DeviceNotFound('boom')
|
|
|
|
|
|
|
|
self.hardware.evaluate_hardware_support()
|
|
|
|
mocked_sleep.assert_called_with(5)
|
2016-05-24 10:04:12 +02:00
|
|
|
|
2017-03-09 14:02:06 +00:00
|
|
|
def test_evaluate_hw_disks_timeout(
|
2019-08-09 10:17:54 +02:00
|
|
|
self, mocked_sleep, mocked_check_for_iscsi,
|
|
|
|
mocked_md_assemble, mocked_get_inst_dev):
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.side_effect = errors.DeviceNotFound('boom')
|
2016-05-24 10:04:12 +02:00
|
|
|
result = self.hardware.evaluate_hardware_support()
|
|
|
|
self.assertEqual(hardware.HardwareSupport.GENERIC, result)
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.assert_called_with(mock.ANY)
|
2016-06-02 13:34:10 -04:00
|
|
|
self.assertEqual(CONF.disk_wait_attempts,
|
2017-03-09 14:02:06 +00:00
|
|
|
mocked_get_inst_dev.call_count)
|
2016-06-02 13:34:10 -04:00
|
|
|
mocked_sleep.assert_called_with(CONF.disk_wait_delay)
|
2016-05-24 10:04:12 +02:00
|
|
|
|
2015-08-28 11:14:52 -07:00
|
|
|
|
2016-12-07 10:50:45 +00:00
|
|
|
@mock.patch.object(os, 'listdir', lambda *_: [])
|
2016-02-25 16:32:47 +00:00
|
|
|
@mock.patch.object(utils, 'execute', autospec=True)
|
2017-04-03 15:32:44 +10:00
|
|
|
class TestModuleFunctions(base.IronicAgentTest):
|
2015-08-28 11:14:52 -07:00
|
|
|
|
2017-08-28 16:59:39 +02:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
2016-02-11 14:00:01 +02:00
|
|
|
@mock.patch.object(hardware, '_get_device_info',
|
|
|
|
lambda x, y, z: 'FooTastic')
|
2016-02-25 16:32:47 +00:00
|
|
|
@mock.patch.object(hardware, '_udev_settle', autospec=True)
|
2019-07-08 18:42:16 +02:00
|
|
|
@mock.patch.object(hardware.pyudev.Devices, "from_device_file",
|
2017-03-19 08:30:25 -07:00
|
|
|
autospec=False)
|
2016-02-25 16:32:47 +00:00
|
|
|
def test_list_all_block_devices_success(self, mocked_fromdevfile,
|
2017-08-28 16:59:39 +02:00
|
|
|
mocked_udev, mocked_readlink,
|
|
|
|
mocked_execute):
|
2017-09-27 15:42:13 +02:00
|
|
|
mocked_readlink.return_value = '../../sda'
|
2015-08-28 11:14:52 -07:00
|
|
|
mocked_fromdevfile.return_value = {}
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.BLK_DEVICE_TEMPLATE_SMALL, '')
|
2015-08-28 11:14:52 -07:00
|
|
|
result = hardware.list_all_block_devices()
|
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2015-08-28 11:14:52 -07:00
|
|
|
self.assertEqual(BLK_DEVICE_TEMPLATE_SMALL_DEVICES, result)
|
2016-02-25 16:32:47 +00:00
|
|
|
mocked_udev.assert_called_once_with()
|
2015-08-28 11:14:52 -07:00
|
|
|
|
2018-08-16 10:57:59 -07:00
|
|
|
@mock.patch.object(os, 'readlink', autospec=True)
|
|
|
|
@mock.patch.object(hardware, '_get_device_info',
|
|
|
|
lambda x, y, z: 'FooTastic')
|
|
|
|
@mock.patch.object(hardware, '_udev_settle', autospec=True)
|
2019-07-08 18:42:16 +02:00
|
|
|
@mock.patch.object(hardware.pyudev.Devices, "from_device_file",
|
2018-08-16 10:57:59 -07:00
|
|
|
autospec=False)
|
|
|
|
def test_list_all_block_devices_success_raid(self, mocked_fromdevfile,
|
|
|
|
mocked_udev, mocked_readlink,
|
|
|
|
mocked_execute):
|
|
|
|
mocked_readlink.return_value = '../../sda'
|
|
|
|
mocked_fromdevfile.return_value = {}
|
2021-02-11 15:36:09 +01:00
|
|
|
mocked_execute.return_value = (hws.RAID_BLK_DEVICE_TEMPLATE, '')
|
2020-07-09 16:31:55 +02:00
|
|
|
result = hardware.list_all_block_devices(ignore_empty=False)
|
2018-08-16 10:57:59 -07:00
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2018-08-16 10:57:59 -07:00
|
|
|
self.assertEqual(RAID_BLK_DEVICE_TEMPLATE_DEVICES, result)
|
|
|
|
mocked_udev.assert_called_once_with()
|
|
|
|
|
2016-02-11 14:00:01 +02:00
|
|
|
@mock.patch.object(hardware, '_get_device_info',
|
|
|
|
lambda x, y: "FooTastic")
|
2016-02-25 16:32:47 +00:00
|
|
|
@mock.patch.object(hardware, '_udev_settle', autospec=True)
|
|
|
|
def test_list_all_block_devices_wrong_block_type(self, mocked_udev,
|
|
|
|
mocked_execute):
|
2015-08-28 11:14:52 -07:00
|
|
|
mocked_execute.return_value = ('TYPE="foo" MODEL="model"', '')
|
|
|
|
result = hardware.list_all_block_devices()
|
|
|
|
mocked_execute.assert_called_once_with(
|
2021-03-02 16:19:32 +01:00
|
|
|
'lsblk', '-Pbia', '-oKNAME,MODEL,SIZE,ROTA,TYPE,UUID,PARTUUID')
|
2015-08-28 11:14:52 -07:00
|
|
|
self.assertEqual([], result)
|
2016-02-25 16:32:47 +00:00
|
|
|
mocked_udev.assert_called_once_with()
|
2015-08-28 11:14:52 -07:00
|
|
|
|
2016-02-25 16:32:47 +00:00
|
|
|
@mock.patch.object(hardware, '_udev_settle', autospec=True)
|
|
|
|
def test_list_all_block_devices_missing(self, mocked_udev,
|
|
|
|
mocked_execute):
|
2015-08-28 11:14:52 -07:00
|
|
|
"""Test for missing values returned from lsblk"""
|
|
|
|
mocked_execute.return_value = ('TYPE="disk" MODEL="model"', '')
|
2016-06-21 19:56:11 +03:00
|
|
|
self.assertRaisesRegex(
|
2015-08-28 11:14:52 -07:00
|
|
|
errors.BlockDeviceError,
|
2021-02-03 12:01:17 +01:00
|
|
|
r'^Block device caused unknown error: KNAME, PARTUUID, ROTA, '
|
|
|
|
r'SIZE, UUID must be returned by lsblk.$',
|
2015-08-28 11:14:52 -07:00
|
|
|
hardware.list_all_block_devices)
|
2016-02-25 16:32:47 +00:00
|
|
|
mocked_udev.assert_called_once_with()
|
|
|
|
|
|
|
|
def test__udev_settle(self, mocked_execute):
|
|
|
|
hardware._udev_settle()
|
|
|
|
mocked_execute.assert_called_once_with('udevadm', 'settle')
|
2016-04-08 14:10:52 -07:00
|
|
|
|
2016-06-09 09:23:29 -07:00
|
|
|
def test__check_for_iscsi(self, mocked_execute):
|
|
|
|
hardware._check_for_iscsi()
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('iscsistart', '-f'),
|
|
|
|
mock.call('iscsistart', '-b')])
|
|
|
|
|
|
|
|
def test__check_for_iscsi_no_iscsi(self, mocked_execute):
|
|
|
|
mocked_execute.side_effect = processutils.ProcessExecutionError()
|
|
|
|
hardware._check_for_iscsi()
|
|
|
|
mocked_execute.assert_has_calls([
|
|
|
|
mock.call('iscsistart', '-f')])
|
|
|
|
|
2016-04-08 14:10:52 -07:00
|
|
|
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
def create_hdparm_info(supported=False, enabled=False, locked=False,
|
|
|
|
frozen=False, enhanced_erase=False):
|
2016-04-08 14:10:52 -07:00
|
|
|
|
|
|
|
def update_values(values, state, key):
|
|
|
|
if not state:
|
|
|
|
values[key] = 'not' + values[key]
|
|
|
|
|
|
|
|
values = {
|
|
|
|
'supported': '\tsupported',
|
|
|
|
'enabled': '\tenabled',
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
'locked': '\tlocked',
|
2016-04-08 14:10:52 -07:00
|
|
|
'frozen': '\tfrozen',
|
|
|
|
'enhanced_erase': '\tsupported: enhanced erase',
|
|
|
|
}
|
|
|
|
|
|
|
|
update_values(values, supported, 'supported')
|
|
|
|
update_values(values, enabled, 'enabled')
|
rework ATA secure erase
hdparm versions prior to 9.51 interpret the value, NULL, as a
password with string value: "NULL".
Example output of hdparm with NULL password:
[root@localhost ~]# hdparm --user-master u --security-unlock NULL /dev/sda
security_password="NULL"
/dev/sda:
Issuing SECURITY_UNLOCK command, password="NULL", user=user
SECURITY_UNLOCK: Input/output error
Example output of hdparm with "" as password:
[root@localhost ~]# hdparm --user-master u --security-unlock "" /dev/sda
security_password=""
/dev/sda:
Issuing SECURITY_UNLOCK command, password="", user=user
Note the values of security_password in the output above. The output
was observed on a CentOS 7 system, which ships hdparm 9.43 in the
offical repositories.
This change attempts to unlock the drive with the empty string if an
unlock with NULL was unsucessful.
Issuing a security-unlock will cause a state transition from SEC4
(security enabled, locked, not frozen) to SEC5 (security enabled,
unlocked, not frozen). In order to check that a password unlock attempt
was successful it makes sense to check that the drive is in the unlocked
state (a necessary condition for SEC5). Only after all unlock attempts
fail, do we consider the drive out of our control.
The conditions to check the drive is in the right state have been
adjusted to ensure that the drive is in the SEC5 state prior to issuing
a secure erase. Previously, on the "recovery from previous fail" path,
the security state was asserted to be "not enabled" after an unlock -
this could never have been the case.
A good overview of the ATA security states can be found here:
http://www.admin-magazine.com/Archive/2014/19/Using-the-ATA-security-features-of-modern-hard-disks-and-SSDs
Change-Id: Ic24b706a04ff6c08d750b9e3d79eb79eab2952ad
Story: 2001762
Task: 12161
Story: 2001763
Task: 12162
2018-05-10 21:53:44 +01:00
|
|
|
update_values(values, locked, 'locked')
|
2016-04-08 14:10:52 -07:00
|
|
|
update_values(values, frozen, 'frozen')
|
|
|
|
update_values(values, enhanced_erase, 'enhanced_erase')
|
|
|
|
|
2021-02-11 15:36:09 +01:00
|
|
|
return hws.HDPARM_INFO_TEMPLATE % values
|
2019-12-12 17:13:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
@mock.patch('ironic_python_agent.hardware.dispatch_to_all_managers',
|
|
|
|
autospec=True)
|
|
|
|
class TestVersions(base.IronicAgentTest):
|
|
|
|
version = {'generic': '1', 'specific': '1'}
|
|
|
|
|
|
|
|
def test_get_current_versions(self, mock_dispatch):
|
|
|
|
mock_dispatch.return_value = {'SpecificHardwareManager':
|
|
|
|
{'name': 'specific', 'version': '1'},
|
|
|
|
'GenericHardwareManager':
|
|
|
|
{'name': 'generic', 'version': '1'}}
|
|
|
|
self.assertEqual(self.version, hardware.get_current_versions())
|
|
|
|
|
|
|
|
def test_check_versions(self, mock_dispatch):
|
|
|
|
mock_dispatch.return_value = {'SpecificHardwareManager':
|
|
|
|
{'name': 'specific', 'version': '1'}}
|
|
|
|
|
|
|
|
self.assertRaises(errors.VersionMismatch,
|
|
|
|
hardware.check_versions,
|
|
|
|
{'not_specific': '1'})
|
2020-07-02 10:14:02 -07:00
|
|
|
|
|
|
|
|
|
|
|
@mock.patch('ironic_python_agent.hardware.dispatch_to_managers',
|
|
|
|
autospec=True)
|
|
|
|
class TestListHardwareInfo(base.IronicAgentTest):
|
|
|
|
|
|
|
|
def test_caching(self, mock_dispatch):
|
|
|
|
fake_info = {'I am': 'hardware'}
|
|
|
|
mock_dispatch.return_value = fake_info
|
|
|
|
|
|
|
|
self.assertEqual(fake_info, hardware.list_hardware_info())
|
|
|
|
self.assertEqual(fake_info, hardware.list_hardware_info())
|
|
|
|
mock_dispatch.assert_called_once_with('list_hardware_info')
|
|
|
|
|
|
|
|
self.assertEqual(fake_info,
|
|
|
|
hardware.list_hardware_info(use_cache=False))
|
|
|
|
self.assertEqual(fake_info, hardware.list_hardware_info())
|
|
|
|
mock_dispatch.assert_called_with('list_hardware_info')
|
|
|
|
self.assertEqual(2, mock_dispatch.call_count)
|
2020-08-19 18:44:39 -07:00
|
|
|
|
|
|
|
|
|
|
|
class TestAPIClientSaveAndUse(base.IronicAgentTest):
|
|
|
|
|
|
|
|
def test_save_api_client(self):
|
|
|
|
hardware.API_CLIENT = None
|
|
|
|
mock_api_client = mock.Mock()
|
|
|
|
hardware.save_api_client(mock_api_client, 1, 2)
|
|
|
|
self.assertEqual(mock_api_client, hardware.API_CLIENT)
|
|
|
|
|
|
|
|
@mock.patch('ironic_python_agent.hardware.dispatch_to_managers',
|
|
|
|
autospec=True)
|
|
|
|
@mock.patch.object(hardware, 'get_cached_node', autospec=True)
|
|
|
|
def test_update_node_cache(self, mock_cached_node, mock_dispatch):
|
|
|
|
mock_cached_node.return_value = {'uuid': 'node1'}
|
|
|
|
updated_node = {'uuid': 'node1', 'other': 'key'}
|
|
|
|
hardware.API_CLIENT = None
|
|
|
|
mock_api_client = mock.Mock()
|
|
|
|
hardware.save_api_client(mock_api_client, 1, 2)
|
|
|
|
mock_api_client.lookup_node.return_value = {'node': updated_node}
|
|
|
|
self.assertEqual(updated_node, hardware.update_cached_node())
|
|
|
|
mock_api_client.lookup_node.assert_called_with(
|
|
|
|
hardware_info=mock.ANY,
|
|
|
|
timeout=1,
|
|
|
|
starting_interval=2,
|
2020-09-09 15:14:46 -07:00
|
|
|
node_uuid='node1')
|
2020-08-19 18:44:39 -07:00
|
|
|
self.assertEqual(updated_node, hardware.NODE)
|
|
|
|
calls = [mock.call('list_hardware_info'),
|
|
|
|
mock.call('wait_for_disks')]
|
|
|
|
mock_dispatch.assert_has_calls(calls)
|