Test software RAID in deploy-time on Victoria and newer

Adds a new feature flag to enable it instead of cleaning time.

Make the standalone-redfish job voting since it covers some
of the tests that are not covered by the ipmi one.

Story: #2006963
Task: #40462
Change-Id: Id2e376e38771700d295f632e84ab3e32710d8180
This commit is contained in:
Dmitry Tantsur 2020-05-07 12:01:56 +02:00
parent 875c77df68
commit 0cc7aa92cc
4 changed files with 55 additions and 28 deletions

View File

@ -170,6 +170,10 @@ BaremetalFeaturesGroup = [
help="Defines if software RAID is enabled (available " help="Defines if software RAID is enabled (available "
"starting with Train). Requires at least two disks " "starting with Train). Requires at least two disks "
"on testing nodes."), "on testing nodes."),
cfg.BoolOpt('deploy_time_raid',
default=False,
help="Defines if in-band RAID can be built in deploy time "
"(possible starting with Victoria)."),
] ]
BaremetalIntrospectionGroup = [ BaremetalIntrospectionGroup = [

View File

@ -571,32 +571,53 @@ class BaremetalStandaloneScenarioTest(BaremetalStandaloneManager):
self.assertTrue(self.ping_ip_address(self.node_ip, self.assertTrue(self.ping_ip_address(self.node_ip,
should_succeed=should_succeed)) should_succeed=should_succeed))
def build_raid_and_verify_node(self, config=None, clean_steps=None): def build_raid_and_verify_node(self, config=None, deploy_time=False):
config = config or self.raid_config config = config or self.raid_config
clean_steps = clean_steps or [ if deploy_time:
{ steps = [
"interface": "raid", {
"step": "delete_configuration" "interface": "deploy",
}, "step": "erase_devices_metadata",
# NOTE(dtantsur): software RAID building fails if any "priority": 98,
# partitions exist on holder devices. "args": {},
{ },
"interface": "deploy", {
"step": "erase_devices_metadata" "interface": "raid",
}, "step": "apply_configuration",
{ "priority": 97,
"interface": "raid", "args": {"raid_config": config},
"step": "create_configuration" }
} ]
] self.baremetal_client.create_deploy_template(
'CUSTOM_RAID', steps=steps)
self.baremetal_client.set_node_raid_config(self.node['uuid'], config) self.baremetal_client.add_node_trait(self.node['uuid'],
self.manual_cleaning(self.node, clean_steps=clean_steps) 'CUSTOM_RAID')
else:
steps = [
{
"interface": "raid",
"step": "delete_configuration"
},
{
"interface": "deploy",
"step": "erase_devices_metadata",
},
{
"interface": "raid",
"step": "create_configuration",
}
]
self.baremetal_client.set_node_raid_config(self.node['uuid'],
config)
self.manual_cleaning(self.node, clean_steps=steps)
# NOTE(dtantsur): this is not required, but it allows us to check that # NOTE(dtantsur): this is not required, but it allows us to check that
# the RAID device was in fact created and is used for deployment. # the RAID device was in fact created and is used for deployment.
patch = [{'path': '/properties/root_device', patch = [{'path': '/properties/root_device',
'op': 'add', 'value': {'name': '/dev/md0'}}] 'op': 'add', 'value': {'name': '/dev/md0'}}]
if deploy_time:
patch.append({'path': '/instance_info/traits',
'op': 'add', 'value': ['CUSTOM_RAID']})
self.update_node(self.node['uuid'], patch=patch) self.update_node(self.node['uuid'], patch=patch)
# NOTE(dtantsur): apparently cirros cannot boot from md devices :( # NOTE(dtantsur): apparently cirros cannot boot from md devices :(
# So we only move the node to active (verifying deployment). # So we only move the node to active (verifying deployment).

View File

@ -87,7 +87,7 @@ class SoftwareRaidIscsi(bsm.BaremetalStandaloneScenarioTest):
wholedisk_image = True wholedisk_image = True
deploy_interface = 'iscsi' deploy_interface = 'iscsi'
raid_interface = 'agent' raid_interface = 'agent'
api_microversion = '1.31' api_microversion = '1.55'
# Software RAID is always local boot # Software RAID is always local boot
boot_option = 'local' boot_option = 'local'
@ -112,7 +112,8 @@ class SoftwareRaidIscsi(bsm.BaremetalStandaloneScenarioTest):
@decorators.idempotent_id('7ecba4f7-98b8-4ea1-b95e-3ec399f46798') @decorators.idempotent_id('7ecba4f7-98b8-4ea1-b95e-3ec399f46798')
@utils.services('image', 'network') @utils.services('image', 'network')
def test_software_raid(self): def test_software_raid(self):
self.build_raid_and_verify_node() self.build_raid_and_verify_node(
deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid)
# NOTE(TheJulia): tearing down/terminating the instance does not # NOTE(TheJulia): tearing down/terminating the instance does not
# remove the root device hint, so it is best for us to go ahead # remove the root device hint, so it is best for us to go ahead
# and remove it before exiting the test. # and remove it before exiting the test.
@ -134,7 +135,7 @@ class SoftwareRaidDirect(bsm.BaremetalStandaloneScenarioTest):
wholedisk_image = True wholedisk_image = True
deploy_interface = 'direct' deploy_interface = 'direct'
raid_interface = 'agent' raid_interface = 'agent'
api_microversion = '1.31' api_microversion = '1.55'
# Software RAID is always local boot # Software RAID is always local boot
boot_option = 'local' boot_option = 'local'
@ -160,7 +161,8 @@ class SoftwareRaidDirect(bsm.BaremetalStandaloneScenarioTest):
@decorators.idempotent_id('125361ac-0eb3-4d79-8be2-a91936aa3f46') @decorators.idempotent_id('125361ac-0eb3-4d79-8be2-a91936aa3f46')
@utils.services('image', 'network') @utils.services('image', 'network')
def test_software_raid(self): def test_software_raid(self):
self.build_raid_and_verify_node() self.build_raid_and_verify_node(
deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid)
# NOTE(TheJulia): tearing down/terminating the instance does not # NOTE(TheJulia): tearing down/terminating the instance does not
# remove the root device hint, so it is best for us to go ahead # remove the root device hint, so it is best for us to go ahead
# and remove it before exiting the test. # and remove it before exiting the test.

View File

@ -21,16 +21,15 @@
- ironic-inspector-tempest-train - ironic-inspector-tempest-train
- ironic-inspector-tempest-stein: - ironic-inspector-tempest-stein:
voting: false voting: false
# NOTE(dtantsur): these jobs cover rarely changed tests and are quite - ironic-standalone-redfish
# unstable, so keep them non-voting.
- ironic-standalone-redfish:
voting: false
- ironic-standalone-redfish-ussuri: - ironic-standalone-redfish-ussuri:
voting: false voting: false
- ironic-standalone-redfish-train: - ironic-standalone-redfish-train:
voting: false voting: false
- ironic-standalone-redfish-stein: - ironic-standalone-redfish-stein:
voting: false voting: false
# NOTE(dtantsur): these jobs cover rarely changed tests and are quite
# unstable, so keep them non-voting.
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode: - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode:
voting: false voting: false
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-ussuri: - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-ussuri:
@ -58,4 +57,5 @@
- ironic-inspector-tempest - ironic-inspector-tempest
- ironic-inspector-tempest-ussuri - ironic-inspector-tempest-ussuri
- ironic-inspector-tempest-train - ironic-inspector-tempest-train
- ironic-standalone-redfish
- ironic-inspector-tempest-discovery - ironic-inspector-tempest-discovery