Add Ironic standalone tests

This patch adds the following standalone tests:

  * agent_ipmitool + wholedisk image + bios
  * agent_ipmitool + partitioned image + bios
  * pxe_ipmitool + wholedisk image + bios
  * pxe_ipmitool + partitioned image + bios

Partial-Bug: #1660606

Change-Id: Ic04b0f134e20d9937a610a14d7c4128f45738eeb
This commit is contained in:
Vasyl Saienko 2017-01-20 16:26:04 +00:00
parent 632f082b2c
commit e48375538c
11 changed files with 520 additions and 20 deletions

View File

@ -17,6 +17,7 @@ iptables
ipxe ipxe
gnupg gnupg
libguestfs0 libguestfs0
libguestfs-tools
libvirt-bin libvirt-bin
open-iscsi open-iscsi
openssh-client openssh-client

View File

@ -4,6 +4,7 @@ iptables
ipxe-bootimgs ipxe-bootimgs
gnupg gnupg
libguestfs libguestfs
libguestfs-tools
libvirt libvirt
libvirt-python libvirt-python
net-tools net-tools

View File

@ -567,10 +567,14 @@ else
add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img
fi fi
IRONIC_WHOLEDISK_IMAGE_NAME=${IRONIC_WHOLEDISK_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-uec/-disk}}
IRONIC_PARTITIONED_IMAGE_NAME=${IRONIC_PARTITIONED_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-disk/-uec}}
if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" == "True" ]]; then if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" == "True" ]]; then
IRONIC_IMAGE_NAME=${IRONIC_IMAGE_NAME/-uec/-disk} IRONIC_IMAGE_NAME=$IRONIC_WHOLEDISK_IMAGE_NAME
else else
IRONIC_IMAGE_NAME=${IRONIC_IMAGE_NAME/-disk/-uec} IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME
fi fi
# NOTE(vsaienko) set DEFAULT_IMAGE_NAME here, as it is still used by grenade # NOTE(vsaienko) set DEFAULT_IMAGE_NAME here, as it is still used by grenade
@ -656,7 +660,7 @@ function install_ironic {
if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then
# make sure all needed service were enabled # make sure all needed service were enabled
local req_services="key" local req_services="key"
if [[ "$VIRT_DRIVER" == "ironic" ]]; then if is_service_enabled nova && [[ "$VIRT_DRIVER" == "ironic" ]]; then
req_services+=" nova glance neutron" req_services+=" nova glance neutron"
fi fi
for srv in $req_services; do for srv in $req_services; do
@ -1996,6 +2000,21 @@ function ironic_configure_tempest {
iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref $image_uuid
iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid
image_uuid=$(openstack image show $IRONIC_WHOLEDISK_IMAGE_NAME -f value -c id)
iniset $TEMPEST_CONFIG baremetal whole_disk_image_ref $image_uuid
image_uuid=$(openstack image show $IRONIC_PARTITIONED_IMAGE_NAME -f value -c id)
iniset $TEMPEST_CONFIG baremetal partition_image_ref $image_uuid
iniset $TEMPEST_CONFIG baremetal enabled_drivers $IRONIC_ENABLED_DRIVERS
iniset $TEMPEST_CONFIG baremetal enabled_hardware_types $IRONIC_ENABLED_HARDWARE_TYPES
local adjusted_root_disk_size_gb
if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then
adjusted_root_disk_size_gb=$(( ${IRONIC_VM_SPECS_DISK} - ${IRONIC_VM_EPHEMERAL_DISK} ))
else
adjusted_root_disk_size_gb=$(( ${IRONIC_HW_NODE_DISK} - ${IRONIC_HW_EPHEMERAL_DISK} ))
fi
iniset $TEMPEST_CONFIG baremetal adjusted_root_disk_size_gb $adjusted_root_disk_size_gb
if [[ -n "${IRONIC_TEMPEST_BUILD_TIMEOUT}" ]]; then if [[ -n "${IRONIC_TEMPEST_BUILD_TIMEOUT}" ]]; then
iniset $TEMPEST_CONFIG baremetal unprovision_timeout $IRONIC_TEMPEST_BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal unprovision_timeout $IRONIC_TEMPEST_BUILD_TIMEOUT
iniset $TEMPEST_CONFIG baremetal active_timeout $IRONIC_TEMPEST_BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal active_timeout $IRONIC_TEMPEST_BUILD_TIMEOUT

View File

@ -83,5 +83,19 @@ BaremetalGroup = [
"require a microversion."), "require a microversion."),
cfg.BoolOpt('use_provision_network', cfg.BoolOpt('use_provision_network',
default=False, default=False,
help="Whether the Ironic/Neutron tenant isolation is enabled") help="Whether the Ironic/Neutron tenant isolation is enabled"),
cfg.StrOpt('whole_disk_image_ref',
help="UUID of the wholedisk image to use in the tests."),
cfg.StrOpt('partition_image_ref',
help="UUID of the partitioned image to use in the tests."),
cfg.ListOpt('enabled_drivers',
default=['fake', 'pxe_ipmitool', 'agent_ipmitool'],
help="List of Ironic enabled drivers."),
cfg.ListOpt('enabled_hardware_types',
default=['ipmi'],
help="List of Ironic enabled hardware types."),
cfg.IntOpt('adjusted_root_disk_size_gb',
min=0,
help="Ironic adjusted disk size to use in the standalone tests "
"as instance_info/root_gb value."),
] ]

View File

@ -18,9 +18,21 @@ from six.moves.urllib import parse as urllib
from tempest.lib.common import api_version_utils from tempest.lib.common import api_version_utils
from tempest.lib.common import rest_client from tempest.lib.common import rest_client
# NOTE(vsaienko): concurrent tests work because they are launched in
# separate processes so global variables are not shared among them.
BAREMETAL_MICROVERSION = None BAREMETAL_MICROVERSION = None
def set_baremetal_api_microversion(baremetal_microversion):
global BAREMETAL_MICROVERSION
BAREMETAL_MICROVERSION = baremetal_microversion
def reset_baremetal_api_microversion():
global BAREMETAL_MICROVERSION
BAREMETAL_MICROVERSION = None
def handle_errors(f): def handle_errors(f):
"""A decorator that allows to ignore certain types of errors.""" """A decorator that allows to ignore certain types of errors."""

View File

@ -215,10 +215,14 @@ class BaremetalClient(base.BaremetalClient):
return self._delete_request('ports', uuid) return self._delete_request('ports', uuid)
@base.handle_errors @base.handle_errors
def update_node(self, uuid, **kwargs): def update_node(self, uuid, patch=None, **kwargs):
"""Update the specified node. """Update the specified node.
:param uuid: The unique identifier of the node. :param uuid: The unique identifier of the node.
:param patch: A JSON path that sets values of the specified attributes
to the new ones.
:param **kwargs: Attributes and new values for them, used only when
patch param is not set.
:return: A tuple with the server response and the updated node. :return: A tuple with the server response and the updated node.
""" """
@ -228,8 +232,8 @@ class BaremetalClient(base.BaremetalClient):
'properties/memory_mb', 'properties/memory_mb',
'driver', 'driver',
'instance_uuid') 'instance_uuid')
if not patch:
patch = self._make_patch(node_attributes, **kwargs) patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch) return self._patch_request('nodes', uuid, patch)
@ -271,7 +275,8 @@ class BaremetalClient(base.BaremetalClient):
target) target)
@base.handle_errors @base.handle_errors
def set_node_provision_state(self, node_uuid, state, configdrive=None): def set_node_provision_state(self, node_uuid, state, configdrive=None,
clean_steps=None):
"""Set provision state of the specified node. """Set provision state of the specified node.
:param node_uuid: The unique identifier of the node. :param node_uuid: The unique identifier of the node.
@ -279,8 +284,15 @@ class BaremetalClient(base.BaremetalClient):
(active/rebuild/deleted/inspect/manage/provide). (active/rebuild/deleted/inspect/manage/provide).
:param configdrive: A gzipped, base64-encoded :param configdrive: A gzipped, base64-encoded
configuration drive string. configuration drive string.
:param clean_steps: A list with clean steps to execute.
""" """
data = {'target': state, 'configdrive': configdrive} data = {'target': state}
# NOTE (vsaienk0): Add both here if specified, do not check anything.
# API will return an error in case of invalid parameters.
if configdrive is not None:
data['configdrive'] = configdrive
if clean_steps is not None:
data['clean_steps'] = clean_steps
return self._put_request('nodes/%s/states/provision' % node_uuid, return self._put_request('nodes/%s/states/provision' % node_uuid,
data) data)

View File

@ -22,8 +22,5 @@ class APIMicroversionFixture(fixtures.Fixture):
def _setUp(self): def _setUp(self):
super(APIMicroversionFixture, self)._setUp() super(APIMicroversionFixture, self)._setUp()
base.BAREMETAL_MICROVERSION = self.baremetal_microversion base.set_baremetal_api_microversion(self.baremetal_microversion)
self.addCleanup(self._reset_compute_microversion) self.addCleanup(base.reset_baremetal_api_microversion)
def _reset_compute_microversion(self):
base.BAREMETAL_MICROVERSION = None

View File

@ -14,8 +14,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time
from tempest.common import waiters from tempest.common import waiters
from tempest import config from tempest import config
from tempest.lib.common import api_version_utils
from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager # noqa from tempest.scenario import manager # noqa
from ironic_tempest_plugin import clients from ironic_tempest_plugin import clients
@ -25,6 +29,21 @@ from ironic_tempest_plugin.common import waiters as ironic_waiters
CONF = config.CONF CONF = config.CONF
def retry_on_conflict(func):
def inner(*args, **kwargs):
# TODO(vsaienko): make number of retries and delay between
# them configurable in future.
e = None
for att in range(10):
try:
return func(*args, **kwargs)
except lib_exc.Conflict as e:
time.sleep(1)
raise lib_exc.Conflict(e)
return inner
# power/provision states as of icehouse # power/provision states as of icehouse
class BaremetalPowerStates(object): class BaremetalPowerStates(object):
"""Possible power states of an Ironic node.""" """Possible power states of an Ironic node."""
@ -49,17 +68,26 @@ class BaremetalProvisionStates(object):
DELETING = 'deleting' DELETING = 'deleting'
DELETED = 'deleted' DELETED = 'deleted'
ERROR = 'error' ERROR = 'error'
MANAGEABLE = 'manageable'
class BaremetalScenarioTest(manager.ScenarioTest): class BaremetalScenarioTest(manager.ScenarioTest):
credentials = ['primary', 'admin'] credentials = ['primary', 'admin']
min_microversion = None
max_microversion = api_version_utils.LATEST_MICROVERSION
@classmethod @classmethod
def skip_checks(cls): def skip_checks(cls):
super(BaremetalScenarioTest, cls).skip_checks() super(BaremetalScenarioTest, cls).skip_checks()
if not CONF.service_available.ironic: if not CONF.service_available.ironic:
raise cls.skipException('Ironic is not enabled.') raise cls.skipException('Ironic is not enabled.')
cfg_min_version = CONF.baremetal.min_microversion
cfg_max_version = CONF.baremetal.max_microversion
api_version_utils.check_skip_with_microversion(cls.min_microversion,
cls.max_microversion,
cfg_min_version,
cfg_max_version)
@classmethod @classmethod
def setup_clients(cls): def setup_clients(cls):
@ -73,14 +101,16 @@ class BaremetalScenarioTest(manager.ScenarioTest):
# allow any issues obtaining the node list to raise early # allow any issues obtaining the node list to raise early
cls.baremetal_client.list_nodes() cls.baremetal_client.list_nodes()
def wait_provisioning_state(self, node_id, state, timeout=10, interval=1): @classmethod
def wait_provisioning_state(cls, node_id, state, timeout=10, interval=1):
ironic_waiters.wait_for_bm_node_status( ironic_waiters.wait_for_bm_node_status(
self.baremetal_client, node_id=node_id, attr='provision_state', cls.baremetal_client, node_id=node_id, attr='provision_state',
status=state, timeout=timeout, interval=interval) status=state, timeout=timeout, interval=interval)
def wait_power_state(self, node_id, state): @classmethod
def wait_power_state(cls, node_id, state):
ironic_waiters.wait_for_bm_node_status( ironic_waiters.wait_for_bm_node_status(
self.baremetal_client, node_id=node_id, attr='power_state', cls.baremetal_client, node_id=node_id, attr='power_state',
status=state, timeout=CONF.baremetal.power_timeout) status=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id): def wait_node(self, instance_id):
@ -88,8 +118,9 @@ class BaremetalScenarioTest(manager.ScenarioTest):
ironic_waiters.wait_node_instance_association(self.baremetal_client, ironic_waiters.wait_node_instance_association(self.baremetal_client,
instance_id) instance_id)
def get_node(self, node_id=None, instance_id=None): @classmethod
return utils.get_node(self.baremetal_client, node_id, instance_id) def get_node(cls, node_id=None, instance_id=None):
return utils.get_node(cls.baremetal_client, node_id, instance_id)
def get_ports(self, node_uuid): def get_ports(self, node_uuid):
ports = [] ports = []
@ -107,6 +138,25 @@ class BaremetalScenarioTest(manager.ScenarioTest):
def add_keypair(self): def add_keypair(self):
self.keypair = self.create_keypair() self.keypair = self.create_keypair()
@classmethod
@retry_on_conflict
def update_node_driver(cls, node_id, driver):
_, body = cls.baremetal_client.update_node(
node_id, driver=driver)
return body
@classmethod
@retry_on_conflict
def update_node(cls, node_id, patch):
cls.baremetal_client.update_node(node_id, patch=patch)
@classmethod
@retry_on_conflict
def set_node_provision_state(cls, node_id, state, configdrive=None,
clean_steps=None):
cls.baremetal_client.set_node_provision_state(
node_id, state, configdrive=configdrive, clean_steps=clean_steps)
def verify_connectivity(self, ip=None): def verify_connectivity(self, ip=None):
if ip: if ip:
dest = self.get_remote_client(ip) dest = self.get_remote_client(ip)

View File

@ -0,0 +1,324 @@
#
# Copyright 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_utils import uuidutils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager
from ironic_tempest_plugin.services.baremetal import base
from ironic_tempest_plugin.tests.scenario import baremetal_manager as bm
CONF = config.CONF
class BaremetalStandaloneManager(bm.BaremetalScenarioTest,
manager.NetworkScenarioTest):
credentials = ['primary', 'admin']
# NOTE(vsaienko): Standalone tests are using v1/node/<node_ident>/vifs to
# attach VIF to a node.
min_microversion = '1.28'
@classmethod
def skip_checks(cls):
"""Defines conditions to skip these tests."""
super(BaremetalStandaloneManager, cls).skip_checks()
if CONF.service_available.nova:
raise cls.skipException('Nova is enabled. Stand-alone tests will '
'be skipped.')
@classmethod
def create_networks(cls):
"""Create a network with a subnet connected to a router.
Return existed network specified in compute/fixed_network_name
config option.
TODO(vsaienko): Add network/subnet/router when we setup
ironic-standalone with multitenancy.
:returns: network, subnet, router
"""
network = None
subnet = None
router = None
if CONF.network.shared_physical_network:
if not CONF.compute.fixed_network_name:
m = ('Configuration option "[compute]/fixed_network_name" '
'must be set.')
raise lib_exc.InvalidConfiguration(m)
network = cls.admin_manager.networks_client.list_networks(
name=CONF.compute.fixed_network_name)['networks'][0]
return network, subnet, router
@classmethod
def get_available_nodes(cls):
"""Get all ironic nodes that can be deployed.
We can deploy on nodes when the following conditions are met:
* provision_state is 'available'
* maintenance is False
* No instance_uuid is associated to node.
:returns: a list of Ironic nodes.
"""
fields = ['uuid', 'driver', 'instance_uuid', 'provision_state',
'name', 'maintenance']
_, body = cls.baremetal_client.list_nodes(provision_state='available',
associated=False,
maintenance=False,
fields=','.join(fields))
return body['nodes']
@classmethod
def get_random_available_node(cls):
"""Randomly pick an available node for deployment."""
nodes = cls.get_available_nodes()
if nodes:
return random.choice(nodes)
@classmethod
def create_neutron_port(cls, *args, **kwargs):
"""Creates a neutron port.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#create-port
:returns: server response body.
"""
port = cls.ports_client.create_port(*args, **kwargs)['port']
return port
@classmethod
def _associate_instance_with_node(cls, node_id, instance_uuid):
"""Update instance_uuid for a given node.
:param node_id: Name or UUID of the node.
:param instance_uuid: UUID of the instance to associate.
:returns: server response body.
"""
_, body = cls.baremetal_client.update_node(
node_id, instance_uuid=instance_uuid)
return body
@classmethod
def get_node_vifs(cls, node_id):
"""Return a list of VIFs for a given node.
:param node_id: Name or UUID of the node.
:returns: A list of VIFs associated with the node.
"""
_, body = cls.baremetal_client.vif_list(node_id)
vifs = [v['id'] for v in body['vifs']]
return vifs
@classmethod
def add_floatingip_to_node(cls, node_id):
"""Add floating IP to node.
Create and associate floating IP with node VIF.
:param node_id: Name or UUID of the node.
:returns: IP address of associated floating IP.
"""
vif = cls.get_node_vifs(node_id)[0]
body = cls.floating_ips_client.create_floatingip(
floating_network_id=CONF.network.public_network_id)
floating_ip = body['floatingip']
cls.floating_ips_client.update_floatingip(floating_ip['id'],
port_id=vif)
return floating_ip['floating_ip_address']
@classmethod
def cleanup_floating_ip(cls, ip_address):
"""Removes floating IP."""
body = cls.admin_manager.floating_ips_client.list_floatingips()
floating_ip_id = [f['id'] for f in body['floatingips'] if
f['floating_ip_address'] == ip_address][0]
cls.admin_manager.floating_ips_client.delete_floatingip(floating_ip_id)
@classmethod
@bm.retry_on_conflict
def detach_all_vifs_from_node(cls, node_id):
"""Detach all VIFs from a given node.
:param node_id: Name or UUID of the node.
"""
vifs = cls.get_node_vifs(node_id)
for vif in vifs:
cls.baremetal_client.vif_detach(node_id, vif)
@classmethod
@bm.retry_on_conflict
def vif_attach(cls, node_id, vif_id):
"""Attach VIF to a give node.
:param node_id: Name or UUID of the node.
:param vif_id: Identifier of the VIF to attach.
"""
cls.baremetal_client.vif_attach(node_id, vif_id)
@classmethod
def get_and_reserve_node(cls, node=None):
"""Pick an available node for deployment and reserve it.
Only one instance_uuid may be associated, use this behaviour as
reservation node when tests are launched concurrently. If node is
not passed directly pick random available for deployment node.
:param node: Ironic node to associate instance_uuid with.
:returns: Ironic node.
"""
instance_uuid = uuidutils.generate_uuid()
nodes = []
def _try_to_associate_instance():
n = node or cls.get_random_available_node()
try:
cls._associate_instance_with_node(n['uuid'], instance_uuid)
nodes.append(n)
except lib_exc.Conflict:
return False
return True
if (not test_utils.call_until_true(_try_to_associate_instance,
duration=CONF.baremetal.association_timeout, sleep_for=1)):
msg = ('Timed out waiting to associate instance to ironic node '
'uuid %s' % instance_uuid)
raise lib_exc.TimeoutException(msg)
return nodes[0]
@classmethod
def boot_node(cls, driver, image_ref):
"""Boot ironic node.
The following actions are executed:
* Randomly pick an available node for deployment and reserve it.
* Update node driver.
* Create/Pick networks to boot node in.
* Create Neutron port and attach it to node.
* Update node image_source/root_gb.
* Deploy node.
* Wait until node is deployed.
:param driver: Node driver to use.
:param image_ref: Reference to user image to boot node with.
:returns: Ironic node.
"""
node = cls.get_and_reserve_node()
cls.update_node_driver(node['uuid'], driver)
network, subnet, router = cls.create_networks()
n_port = cls.create_neutron_port(network_id=network['id'])
cls.vif_attach(node_id=node['uuid'], vif_id=n_port['id'])
patch = [{'path': '/instance_info/image_source',
'op': 'add',
'value': image_ref}]
patch.append({'path': '/instance_info/root_gb',
'op': 'add',
'value': CONF.baremetal.adjusted_root_disk_size_gb})
# TODO(vsaienko) add testing for custom configdrive
cls.update_node(node['uuid'], patch=patch)
cls.set_node_provision_state(node['uuid'], 'active')
cls.wait_power_state(node['uuid'], bm.BaremetalPowerStates.POWER_ON)
cls.wait_provisioning_state(node['uuid'],
bm.BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout,
interval=30)
return node
@classmethod
def terminate_node(cls, node_id):
"""Terminate active ironic node.
The following actions are executed:
* Detach all VIFs from the given node.
* Unprovision node.
* Wait until node become available.
:param node_id: Name or UUID for the node.
"""
cls.detach_all_vifs_from_node(node_id)
cls.set_node_provision_state(node_id, 'deleted')
# NOTE(vsaienko) We expect here fast switching from deleted to
# available as automated cleaning is disabled so poll status each 1s.
cls.wait_provisioning_state(
node_id,
[bm.BaremetalProvisionStates.NOSTATE,
bm.BaremetalProvisionStates.AVAILABLE],
timeout=CONF.baremetal.unprovision_timeout,
interval=1)
class BaremetalStandaloneScenarioTest(BaremetalStandaloneManager):
# API microversion to use among all calls
api_microversion = '1.28'
# The node driver to use in the test
driver = None
# User image ref to boot node with.
image_ref = None
# Boolean value specify if image is wholedisk or not.
wholedisk_image = None
mandatory_attr = ['driver', 'image_ref', 'wholedisk_image']
node = None
node_ip = None
@classmethod
def skip_checks(cls):
super(BaremetalStandaloneScenarioTest, cls).skip_checks()
if (cls.driver not in CONF.baremetal.enabled_drivers +
CONF.baremetal.enabled_hardware_types):
raise cls.skipException(
'The driver: %(driver)s used in test is not in the list of '
'enabled_drivers %(enabled_drivers)s or '
'enabled_hardware_types %(enabled_hw_types)s '
'in the tempest config.' % {
'driver': cls.driver,
'enabled_drivers': CONF.baremetal.enabled_drivers,
'enabled_hw_types': CONF.baremetal.enabled_hardware_types})
if not cls.wholedisk_image and CONF.baremetal.use_provision_network:
raise cls.skipException(
'Partitioned images are not supported with multitenancy.')
@classmethod
def resource_setup(cls):
super(BaremetalStandaloneScenarioTest, cls).resource_setup()
base.set_baremetal_api_microversion(cls.api_microversion)
for v in cls.mandatory_attr:
if getattr(cls, v) is None:
raise lib_exc.InvalidConfiguration(
"Mandatory attribute %s not set." % v)
cls.node = cls.boot_node(cls.driver, cls.image_ref)
cls.node_ip = cls.add_floatingip_to_node(cls.node['uuid'])
@classmethod
def resource_cleanup(cls):
cls.cleanup_floating_ip(cls.node_ip)
vifs = cls.get_node_vifs(cls.node['uuid'])
for vif in vifs:
cls.ports_client.delete_port(vif)
cls.terminate_node(cls.node['uuid'])
base.reset_baremetal_api_microversion()
super(BaremetalStandaloneManager, cls).resource_cleanup()

View File

@ -0,0 +1,70 @@
#
# Copyright 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest import test
from ironic_tempest_plugin.tests.scenario import \
baremetal_standalone_manager as bsm
CONF = config.CONF
class BaremetalAgentIpmitoolWholedisk(bsm.BaremetalStandaloneScenarioTest):
driver = 'agent_ipmitool'
image_ref = CONF.baremetal.whole_disk_image_ref
wholedisk_image = True
@test.idempotent_id('defff515-a6ff-44f6-9d8d-2ded51196d98')
@test.services('image', 'network', 'object_storage')
def test_ip_access_to_server(self):
self.ping_ip_address(self.node_ip, should_succeed=True)
class BaremetalAgentIpmitoolPartitioned(bsm.BaremetalStandaloneScenarioTest):
driver = 'agent_ipmitool'
image_ref = CONF.baremetal.partition_image_ref
wholedisk_image = False
@test.idempotent_id('27b86130-d8dc-419d-880a-fbbbe4ce3f8c')
@test.services('image', 'network', 'object_storage')
def test_ip_access_to_server(self):
self.ping_ip_address(self.node_ip, should_succeed=True)
class BaremetalPxeIpmitoolWholedisk(bsm.BaremetalStandaloneScenarioTest):
driver = 'pxe_ipmitool'
image_ref = CONF.baremetal.whole_disk_image_ref
wholedisk_image = True
@test.idempotent_id('d8c5badd-45db-4d05-bbe8-35babbed6e86')
@test.services('image', 'network')
def test_ip_access_to_server(self):
self.ping_ip_address(self.node_ip, should_succeed=True)
class BaremetalPxeIpmitoolPartitioned(bsm.BaremetalStandaloneScenarioTest):
driver = 'pxe_ipmitool'
image_ref = CONF.baremetal.partition_image_ref
wholedisk_image = False
@test.idempotent_id('ea85e19c-6869-4577-b9bb-2eb150f77c90')
@test.services('image', 'network')
def test_ip_access_to_server(self):
self.ping_ip_address(self.node_ip, should_succeed=True)