Remove support for the old bash ramdisk

From now on only rely on the IPA inventory and 2 additional fields:
boot_interface and root_device.

Also updated unit tests to use one inventory example.
Also removed duplicating unit tests and checks in test_process.
Also removed devstack support for the old ramdisk.

Change-Id: Ib382328295fc2c1b9143171b1047304febadcaca
This commit is contained in:
Dmitry Tantsur 2016-05-20 14:35:19 +02:00
parent f66592b4f0
commit 0b58e31e3e
12 changed files with 226 additions and 502 deletions

View File

@ -86,9 +86,6 @@ components. There is a plugin for installing **ironic-inspector** on DevStack.
Example local.conf
------------------
Using IPA
~~~~~~~~~
::
[[local|localrc]]
@ -108,7 +105,6 @@ Using IPA
IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True
IRONIC_BUILD_DEPLOY_RAMDISK=False
IRONIC_INSPECTOR_RAMDISK_ELEMENT=ironic-agent
IRONIC_INSPECTOR_BUILD_RAMDISK=False
VIRT_DRIVER=ironic
@ -136,52 +132,6 @@ Notes
* This configuration disables Heat and Cinder, adjust it if you need these
services.
Using simple ramdisk
~~~~~~~~~~~~~~~~~~~~
.. note::
This ramdisk is deprecated and should not be used.
::
[[local|localrc]]
enable_service ironic ir-api ir-cond
disable_service n-net n-novnc
enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta
enable_service s-proxy s-object s-container s-account
disable_service heat h-api h-api-cfn h-api-cw h-eng
disable_service cinder c-sch c-api c-vol
enable_plugin ironic https://github.com/openstack/ironic
enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector
IRONIC_BAREMETAL_BASIC_OPS=True
IRONIC_VM_COUNT=2
IRONIC_VM_SPECS_RAM=1024
IRONIC_DEPLOY_FLAVOR="fedora deploy-ironic"
IRONIC_INSPECTOR_RAMDISK_FLAVOR="fedora ironic-discoverd-ramdisk"
VIRT_DRIVER=ironic
LOGDAYS=1
LOGFILE=~/logs/stack.sh.log
SCREEN_LOGDIR=~/logs/screen
DEFAULT_INSTANCE_TYPE=baremetal
TEMPEST_ALLOW_TENANT_ISOLATION=False
Notes
-----
* Replace "fedora" with whatever you have
* You need at least 1G of RAM for VMs, default value of 512 MB won't work
* Before restarting stack.sh::
rm -rf /opt/stack/ironic-inspector
Test
----

View File

@ -18,8 +18,6 @@ IRONIC_INSPECTOR_URI="http://$IRONIC_INSPECTOR_HOST:$IRONIC_INSPECTOR_PORT"
IRONIC_INSPECTOR_BUILD_RAMDISK=$(trueorfalse False IRONIC_INSPECTOR_BUILD_RAMDISK)
IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz}
IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz}
IRONIC_INSPECTOR_RAMDISK_ELEMENT=${IRONIC_INSPECTOR_RAMDISK_ELEMENT:-ironic-discoverd-ramdisk}
IRONIC_INSPECTOR_RAMDISK_FLAVOR=${IRONIC_INSPECTOR_RAMDISK_FLAVOR:-fedora $IRONIC_INSPECTOR_RAMDISK_ELEMENT}
IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs}
IRONIC_INSPECTOR_RAMDISK_LOGDIR=${IRONIC_INSPECTOR_RAMDISK_LOGDIR:-$IRONIC_INSPECTOR_DATA_DIR/ramdisk-logs}
IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS=${IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS:-True}
@ -91,11 +89,6 @@ function stop_inspector_dhcp {
screen -S $SCREEN_NAME -p ironic-inspector-dhcp -X kill
}
function inspector_uses_ipa {
[[ $IRONIC_INSPECTOR_RAMDISK_ELEMENT = "ironic-agent" ]] || [[ $IRONIC_INSPECTOR_RAMDISK_FLAVOR =~ (ironic-agent$|^ironic-agent) ]] && return 0
return 1
}
### Configuration
function prepare_tftp {
@ -104,7 +97,6 @@ function prepare_tftp {
IRONIC_INSPECTOR_INITRAMFS_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.initramfs"
IRONIC_INSPECTOR_CALLBACK_URI="$IRONIC_INSPECTOR_INTERNAL_URI/v1/continue"
if inspector_uses_ipa; then
IRONIC_INSPECTOR_KERNEL_CMDLINE="ipa-inspection-callback-url=$IRONIC_INSPECTOR_CALLBACK_URI systemd.journald.forward_to_console=yes"
IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE vga=normal console=tty0 console=ttyS0"
IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-inspection-collectors=$IRONIC_INSPECTOR_COLLECTORS"
@ -125,16 +117,6 @@ function prepare_tftp {
fi
fi
fi
else
IRONIC_INSPECTOR_KERNEL_CMDLINE="discoverd_callback_url=$IRONIC_INSPECTOR_CALLBACK_URI inspector_callback_url=$IRONIC_INSPECTOR_CALLBACK_URI"
if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then
if [[ $(type -P ramdisk-image-create) == "" ]]; then
pip_install diskimage_builder
fi
ramdisk-image-create $IRONIC_INSPECTOR_RAMDISK_FLAVOR \
-o $IRONIC_INSPECTOR_IMAGE_PATH
fi
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
cp $IRONIC_INSPECTOR_KERNEL_PATH $IRONIC_HTTP_DIR/ironic-inspector.kernel

View File

@ -217,22 +217,6 @@ Optionally the following keys might be provided:
* ``logs`` base64-encoded logs from the ramdisk.
The following keys are supported for backward compatibility with the old
bash-based ramdisk, when ``inventory`` is not provided:
* ``cpus`` number of CPU
* ``cpu_arch`` architecture of the CPU
* ``memory_mb`` RAM in MiB
* ``local_gb`` hard drive size in GiB
* ``ipmi_address`` IP address of BMC, may be missing on VM
* ``block_devices`` block devices information for the ``raid_device`` plugin,
dictionary with one key: ``serials`` list of serial numbers of block devices.
.. note::
This list highly depends on enabled plugins, provided above are
expected keys for the default set of plugins. See :ref:`plugins`

View File

@ -132,9 +132,8 @@ As for PXE boot environment, you'll need:
simultaneously cause conflicts - the same IP address is suggested to
several nodes.
* You have to install and configure one of 2 available ramdisks: simple
bash-based (see `Using simple ramdisk`_) or more complex based on
ironic-python-agent_ (See `Using IPA`_).
* You have to install and configure the ramdisk to be run on target machines -
see `Configuring IPA`_.
Here is *inspector.conf* you may end up with::
@ -152,8 +151,8 @@ Here is *inspector.conf* you may end up with::
.. note::
Set ``debug = true`` if you want to see complete logs.
Using IPA
^^^^^^^^^
Configuring IPA
^^^^^^^^^^^^^^^
ironic-python-agent_ is a ramdisk developed for Ironic. During the Liberty
cycle support for **ironic-inspector** was added. This is the default ramdisk
@ -215,34 +214,6 @@ This ramdisk is pluggable: you can insert introspection plugins called
.. _diskimage-builder: https://github.com/openstack/diskimage-builder
.. _ironic-python-agent: https://github.com/openstack/ironic-python-agent
Using simple ramdisk
^^^^^^^^^^^^^^^^^^^^
This ramdisk is deprecated, its use is not recommended.
* Build and put into your TFTP the kernel and ramdisk created using the
diskimage-builder_ `ironic-discoverd-ramdisk element`_::
ramdisk-image-create -o discovery fedora ironic-discoverd-ramdisk
You need diskimage-builder_ 0.1.38 or newer to do it (using the latest one
is always advised).
* Configure your ``$TFTPROOT/pxelinux.cfg/default`` with something like::
default introspect
label introspect
kernel discovery.kernel
append initrd=discovery.initramfs discoverd_callback_url=http://{IP}:5050/v1/continue
ipappend 3
Replace ``{IP}`` with IP of the machine (do not use loopback interface, it
will be accessed by ramdisk on a booting machine).
.. _ironic-discoverd-ramdisk element: https://github.com/openstack/diskimage-builder/tree/master/elements/ironic-discoverd-ramdisk
Managing the **ironic-inspector** database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -46,13 +46,6 @@ class RootDiskSelectionHook(base.ProcessingHook):
node_info=node_info, data=introspection_data)
return
inventory = introspection_data.get('inventory')
if not inventory:
raise utils.Error(
_('Root device selection requires ironic-python-agent '
'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
if 'size' in hints:
# Special case to match IPA behaviour
try:
@ -62,12 +55,9 @@ class RootDiskSelectionHook(base.ProcessingHook):
'an integer, got %s') % hints['size'],
node_info=node_info, data=introspection_data)
disks = inventory.get('disks', [])
if not disks:
raise utils.Error(_('No disks found'),
node_info=node_info, data=introspection_data)
for disk in disks:
inventory = utils.get_inventory(introspection_data,
node_info=node_info)
for disk in inventory['disks']:
properties = disk.copy()
# Root device hints are in GiB, data from IPA is in bytes
properties['size'] //= units.Gi
@ -100,7 +90,8 @@ class SchedulerHook(base.ProcessingHook):
def before_update(self, introspection_data, node_info, **kwargs):
"""Update node with scheduler properties."""
inventory = introspection_data.get('inventory')
inventory = utils.get_inventory(introspection_data,
node_info=node_info)
errors = []
root_disk = introspection_data.get('root_disk')
@ -108,11 +99,10 @@ class SchedulerHook(base.ProcessingHook):
introspection_data['local_gb'] = root_disk['size'] // units.Gi
if CONF.processing.disk_partitioning_spacing:
introspection_data['local_gb'] -= 1
elif inventory:
else:
errors.append(_('root disk is not supplied by the ramdisk and '
'root_disk_selection hook is not enabled'))
if inventory:
try:
introspection_data['cpus'] = int(inventory['cpu']['count'])
introspection_data['cpu_arch'] = six.text_type(
@ -127,21 +117,7 @@ class SchedulerHook(base.ProcessingHook):
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing memory information: %s; '
'introspection requires physical memory size '
'from dmidecode') %
inventory.get('memory'))
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'),
node_info=node_info, data=introspection_data)
missing = [key for key in self.KEYS
if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
missing,
node_info=node_info, data=introspection_data)
'from dmidecode') % inventory.get('memory'))
if errors:
raise utils.Error(_('The following problems encountered: %s') %
@ -184,10 +160,9 @@ class ValidateInterfacesHook(base.ProcessingHook):
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
result = {}
inventory = data.get('inventory', {})
inventory = utils.get_inventory(data)
if inventory:
for iface in inventory.get('interfaces', ()):
for iface in inventory['interfaces']:
name = iface.get('name')
mac = iface.get('mac_address')
ip = iface.get('ipv4_address')
@ -215,11 +190,6 @@ class ValidateInterfacesHook(base.ProcessingHook):
'IP address "%(ip)s"',
{'name': name, 'mac': mac, 'ip': ip}, data=data)
result[name] = {'ip': ip, 'mac': mac}
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'), data=data)
result = data.get('interfaces')
return result

View File

@ -286,10 +286,10 @@ def _finish_set_ipmi_credentials(ironic, node, node_info, introspection_data,
'value': new_username},
{'op': 'add', 'path': '/driver_info/ipmi_password',
'value': new_password}]
if (not ir_utils.get_ipmi_address(node) and
introspection_data.get('ipmi_address')):
new_ipmi_address = utils.get_ipmi_address_from_data(introspection_data)
if not ir_utils.get_ipmi_address(node) and new_ipmi_address:
patch.append({'op': 'add', 'path': '/driver_info/ipmi_address',
'value': introspection_data['ipmi_address']})
'value': new_ipmi_address})
node_info.patch(patch)
for attempt in range(_CREDENTIALS_WAIT_RETRIES):

View File

@ -19,6 +19,7 @@ from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log
from oslo_utils import units
from oslo_utils import uuidutils
from ironic_inspector.common import i18n
@ -78,12 +79,66 @@ class BaseTest(fixtures.TestWithFixtures):
self.assertPatchEqual(actual, expected)
class NodeTest(BaseTest):
class InventoryTest(BaseTest):
def setUp(self):
super(InventoryTest, self).setUp()
# Prepare some realistic inventory
# https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa
self.bmc_address = '1.2.3.4'
self.macs = ['11:22:33:44:55:66', '66:55:44:33:22:11']
self.ips = ['1.2.1.2', '1.2.1.1']
self.inactive_mac = '12:12:21:12:21:12'
self.pxe_mac = self.macs[0]
self.all_macs = self.macs + [self.inactive_mac]
self.pxe_iface_name = 'eth1'
self.data = {
'boot_interface': '01-' + self.pxe_mac.replace(':', '-'),
'inventory': {
'interfaces': [
{'name': 'eth1', 'mac_address': self.macs[0],
'ipv4_address': self.ips[0]},
{'name': 'eth2', 'mac_address': self.inactive_mac},
{'name': 'eth3', 'mac_address': self.macs[1],
'ipv4_address': self.ips[1]},
],
'disks': [
{'name': '/dev/sda', 'model': 'Big Data Disk',
'size': 1000 * units.Gi},
{'name': '/dev/sdb', 'model': 'Small OS Disk',
'size': 20 * units.Gi},
],
'cpu': {
'count': 4,
'architecture': 'x86_64'
},
'memory': {
'physical_mb': 12288
},
'bmc_address': self.bmc_address
},
'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk',
'size': 1000 * units.Gi,
'wwn': None},
}
self.inventory = self.data['inventory']
self.all_interfaces = {
'eth1': {'mac': self.macs[0], 'ip': self.ips[0]},
'eth2': {'mac': self.inactive_mac, 'ip': None},
'eth3': {'mac': self.macs[1], 'ip': self.ips[1]}
}
self.active_interfaces = {
'eth1': {'mac': self.macs[0], 'ip': self.ips[0]},
'eth3': {'mac': self.macs[1], 'ip': self.ips[1]}
}
self.pxe_interfaces = {
self.pxe_iface_name: self.all_interfaces[self.pxe_iface_name]
}
class NodeTest(InventoryTest):
def setUp(self):
super(NodeTest, self).setUp()
self.uuid = uuidutils.generate_uuid()
self.bmc_address = '1.2.3.4'
self.macs = ['11:22:33:44:55:66', '66:55:44:33:22:11']
fake_node = {
'driver': 'pxe_ipmitool',
'driver_info': {'ipmi_address': self.bmc_address},

View File

@ -24,7 +24,6 @@ import unittest
import mock
from oslo_config import cfg
from oslo_utils import units
import requests
from ironic_inspector.common import ironic as ir_utils
@ -70,62 +69,12 @@ class Base(base.NodeTest):
self.cli.node.update.return_value = self.node
self.cli.node.list.return_value = [self.node]
# https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa
self.data = {
'boot_interface': '01-' + self.macs[0].replace(':', '-'),
'inventory': {
'interfaces': [
{'name': 'eth1', 'mac_address': self.macs[0],
'ipv4_address': '1.2.1.2'},
{'name': 'eth2', 'mac_address': '12:12:21:12:21:12'},
{'name': 'eth3', 'mac_address': self.macs[1],
'ipv4_address': '1.2.1.1'},
],
'disks': [
{'name': '/dev/sda', 'model': 'Big Data Disk',
'size': 1000 * units.Gi},
{'name': '/dev/sdb', 'model': 'Small OS Disk',
'size': 20 * units.Gi},
],
'cpu': {
'count': 4,
'architecture': 'x86_64'
},
'memory': {
'physical_mb': 12288
},
'bmc_address': self.bmc_address
},
'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk',
'size': 1000 * units.Gi,
'wwn': None},
}
self.data_old_ramdisk = {
'cpus': 4,
'cpu_arch': 'x86_64',
'memory_mb': 12288,
'local_gb': 464,
'interfaces': {
'eth1': {'mac': self.macs[0], 'ip': '1.2.1.2'},
'eth2': {'mac': '12:12:21:12:21:12'},
'eth3': {'mac': self.macs[1], 'ip': '1.2.1.1'},
},
'boot_interface': '01-' + self.macs[0].replace(':', '-'),
'ipmi_address': self.bmc_address,
}
self.patch = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
]
self.patch_old_ramdisk = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
{'path': '/properties/local_gb', 'value': '464', 'op': 'add'}
]
self.patch_root_hints = [
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
@ -211,27 +160,6 @@ class Test(Base):
status = self.call_get_status(self.uuid)
self.assertEqual({'finished': True, 'error': None}, status)
def test_old_ramdisk(self):
self.call_introspect(self.uuid)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.cli.node.set_power_state.assert_called_once_with(self.uuid,
'reboot')
status = self.call_get_status(self.uuid)
self.assertEqual({'finished': False, 'error': None}, status)
res = self.call_continue(self.data_old_ramdisk)
self.assertEqual({'uuid': self.uuid}, res)
eventlet.greenthread.sleep(DEFAULT_SLEEP)
self.assertCalledWithPatch(self.patch_old_ramdisk,
self.cli.node.update)
self.cli.port.create.assert_called_once_with(
node_uuid=self.uuid, address='11:22:33:44:55:66')
status = self.call_get_status(self.uuid)
self.assertEqual({'finished': True, 'error': None}, status)
def test_setup_ipmi(self):
patch_credentials = [
{'op': 'add', 'path': '/driver_info/ipmi_username',

View File

@ -29,22 +29,6 @@ class TestSchedulerHook(test_base.NodeTest):
def setUp(self):
super(TestSchedulerHook, self).setUp()
self.hook = std_plugins.SchedulerHook()
self.data = {
'inventory': {
'cpu': {'count': 2, 'architecture': 'x86_64'},
'memory': {'physical_mb': 1024},
},
'root_disk': {
'name': '/dev/sda',
'size': 21 * units.Gi
}
}
self.old_data = {
'local_gb': 20,
'memory_mb': 1024,
'cpus': 2,
'cpu_arch': 'x86_64'
}
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node)
@ -53,43 +37,24 @@ class TestSchedulerHook(test_base.NodeTest):
ext = base.processing_hooks_manager()['scheduler']
self.assertIsInstance(ext.obj, std_plugins.SchedulerHook)
def test_compat_missing(self):
for key in self.old_data:
new_data = self.old_data.copy()
del new_data[key]
self.assertRaisesRegexp(utils.Error, key,
self.hook.before_update, new_data,
self.node_info)
def test_no_root_disk(self):
self.assertRaisesRegexp(utils.Error, 'root disk is not supplied',
self.hook.before_update,
{'inventory': {'disks': []}}, self.node_info)
del self.inventory['disks']
self.assertRaisesRegexp(utils.Error, 'disks key is missing or empty',
self.hook.before_update, self.data,
self.node_info)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_ok(self, mock_patch):
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '20', 'op': 'add'}
{'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_compat_ok(self, mock_patch):
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '20', 'op': 'add'}
]
self.hook.before_update(self.old_data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_no_overwrite(self, mock_patch):
CONF.set_override('overwrite_existing', False, 'processing')
@ -98,36 +63,21 @@ class TestSchedulerHook(test_base.NodeTest):
'cpu_arch': 'i686'
}
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '20', 'op': 'add'}
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_compat_root_disk(self, mock_patch):
self.old_data['root_disk'] = {'name': '/dev/sda',
'size': 42 * units.Gi}
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '41', 'op': 'add'}
]
self.hook.before_update(self.old_data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch')
def test_root_disk_no_spacing(self, mock_patch):
CONF.set_override('disk_partitioning_spacing', False, 'processing')
self.data['root_disk'] = {'name': '/dev/sda', 'size': 42 * units.Gi}
patch = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '42', 'op': 'add'}
{'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '1000', 'op': 'add'}
]
self.hook.before_update(self.data, self.node_info)
@ -138,38 +88,9 @@ class TestValidateInterfacesHook(test_base.NodeTest):
def setUp(self):
super(TestValidateInterfacesHook, self).setUp()
self.hook = std_plugins.ValidateInterfacesHook()
self.data = {
'inventory': {
'interfaces': [
{'name': 'em1', 'mac_address': '11:11:11:11:11:11',
'ipv4_address': '1.1.1.1'},
{'name': 'em2', 'mac_address': '22:22:22:22:22:22',
'ipv4_address': '2.2.2.2'},
{'name': 'em3', 'mac_address': '33:33:33:33:33:33',
'ipv4_address': None},
],
},
'boot_interface': '01-22-22-22-22-22-22'
}
self.old_data = {
'interfaces': {
'em1': {'mac': '11:11:11:11:11:11', 'ip': '1.1.1.1'},
'em2': {'mac': '22:22:22:22:22:22', 'ip': '2.2.2.2'},
'em3': {'mac': '33:33:33:33:33:33'}
},
'boot_interface': '01-22-22-22-22-22-22',
}
self.orig_interfaces = self.old_data['interfaces'].copy()
self.orig_interfaces['em3']['ip'] = None
self.pxe_interface = self.old_data['interfaces']['em2']
self.active_interfaces = {
'em1': {'mac': '11:11:11:11:11:11', 'ip': '1.1.1.1'},
'em2': {'mac': '22:22:22:22:22:22', 'ip': '2.2.2.2'},
}
self.existing_ports = [mock.Mock(spec=['address', 'uuid'],
address=a)
for a in ('11:11:11:11:11:11',
for a in (self.macs[1],
'44:44:44:44:44:44')]
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node,
@ -190,29 +111,31 @@ class TestValidateInterfacesHook(test_base.NodeTest):
self.assertRaises(SystemExit, std_plugins.ValidateInterfacesHook)
def test_no_interfaces(self):
self.assertRaisesRegexp(utils.Error, 'No interfaces',
self.assertRaisesRegexp(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {})
self.assertRaisesRegexp(utils.Error, 'No interfaces',
self.assertRaisesRegexp(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {'inventory': {}})
self.assertRaisesRegexp(utils.Error, 'No interfaces',
self.hook.before_processing, {'inventory': {
'interfaces': []
}})
del self.inventory['interfaces']
self.assertRaisesRegexp(utils.Error,
'interfaces key is missing or empty',
self.hook.before_processing, self.data)
def test_only_pxe(self):
self.hook.before_processing(self.data)
self.assertEqual({'em2': self.pxe_interface}, self.data['interfaces'])
self.assertEqual([self.pxe_interface['mac']], self.data['macs'])
self.assertEqual(self.orig_interfaces, self.data['all_interfaces'])
self.assertEqual(self.pxe_interfaces, self.data['interfaces'])
self.assertEqual([self.pxe_mac], self.data['macs'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_pxe_mac_format(self):
self.data['boot_interface'] = '22:22:22:22:22:22'
self.data['boot_interface'] = self.pxe_mac
self.hook.before_processing(self.data)
self.assertEqual({'em2': self.pxe_interface}, self.data['interfaces'])
self.assertEqual([self.pxe_interface['mac']], self.data['macs'])
self.assertEqual(self.orig_interfaces, self.data['all_interfaces'])
self.assertEqual(self.pxe_interfaces, self.data['interfaces'])
self.assertEqual([self.pxe_mac], self.data['macs'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_pxe_not_found(self):
self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff'
@ -227,7 +150,7 @@ class TestValidateInterfacesHook(test_base.NodeTest):
self.assertEqual(sorted(i['mac'] for i in
self.active_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.orig_interfaces, self.data['all_interfaces'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_active(self):
CONF.set_override('add_ports', 'active', 'processing')
@ -237,38 +160,31 @@ class TestValidateInterfacesHook(test_base.NodeTest):
self.assertEqual(sorted(i['mac'] for i in
self.active_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.orig_interfaces, self.data['all_interfaces'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_all(self):
CONF.set_override('add_ports', 'all', 'processing')
self.hook.before_processing(self.data)
self.assertEqual(self.orig_interfaces, self.data['interfaces'])
self.assertEqual(self.all_interfaces, self.data['interfaces'])
self.assertEqual(sorted(i['mac'] for i in
self.orig_interfaces.values()),
self.all_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.orig_interfaces, self.data['all_interfaces'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_malformed_interfaces(self):
self.data = {
'inventory': {
'interfaces': [
self.inventory['interfaces'] = [
# no name
{'mac_address': '11:11:11:11:11:11',
'ipv4_address': '1.1.1.1'},
{'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'},
# empty
{},
],
},
}
]
self.assertRaisesRegexp(utils.Error, 'No interfaces supplied',
self.hook.before_processing, self.data)
def test_skipped_interfaces(self):
CONF.set_override('add_ports', 'all', 'processing')
self.data = {
'inventory': {
'interfaces': [
self.inventory['interfaces'] = [
# local interface (by name)
{'name': 'lo', 'mac_address': '11:11:11:11:11:11',
'ipv4_address': '1.1.1.1'},
@ -280,9 +196,7 @@ class TestValidateInterfacesHook(test_base.NodeTest):
# malformed MAC provided
{'name': 'em4', 'mac_address': 'foobar',
'ipv4_address': '2.2.2.2'},
],
},
}
]
self.assertRaisesRegexp(utils.Error, 'No suitable interfaces found',
self.hook.before_processing, self.data)
@ -294,7 +208,7 @@ class TestValidateInterfacesHook(test_base.NodeTest):
@mock.patch.object(node_cache.NodeInfo, 'delete_port')
def test_keep_present(self, mock_delete_port):
CONF.set_override('keep_ports', 'present', 'processing')
self.data['all_interfaces'] = self.orig_interfaces
self.data['all_interfaces'] = self.all_interfaces
self.hook.before_update(self.data, self.node_info)
mock_delete_port.assert_called_once_with(self.existing_ports[1])
@ -302,7 +216,7 @@ class TestValidateInterfacesHook(test_base.NodeTest):
@mock.patch.object(node_cache.NodeInfo, 'delete_port')
def test_keep_added(self, mock_delete_port):
CONF.set_override('keep_ports', 'added', 'processing')
self.data['macs'] = [self.pxe_interface['mac']]
self.data['macs'] = [self.pxe_mac]
self.hook.before_update(self.data, self.node_info)
mock_delete_port.assert_any_call(self.existing_ports[0])
@ -313,28 +227,21 @@ class TestRootDiskSelection(test_base.NodeTest):
def setUp(self):
super(TestRootDiskSelection, self).setUp()
self.hook = std_plugins.RootDiskSelectionHook()
self.data = {
'inventory': {
'disks': [
{'model': 'Model 1', 'size': 20 * units.Gi,
'name': '/dev/sdb'},
{'model': 'Model 2', 'size': 5 * units.Gi,
'name': '/dev/sda'},
{'model': 'Model 3', 'size': 10 * units.Gi,
'name': '/dev/sdc'},
{'model': 'Model 4', 'size': 4 * units.Gi,
'name': '/dev/sdd'},
{'model': 'Too Small', 'size': 1 * units.Gi,
'name': '/dev/sde'},
self.inventory['disks'] = [
{'model': 'Model 1', 'size': 20 * units.Gi, 'name': '/dev/sdb'},
{'model': 'Model 2', 'size': 5 * units.Gi, 'name': '/dev/sda'},
{'model': 'Model 3', 'size': 10 * units.Gi, 'name': '/dev/sdc'},
{'model': 'Model 4', 'size': 4 * units.Gi, 'name': '/dev/sdd'},
{'model': 'Too Small', 'size': 1 * units.Gi, 'name': '/dev/sde'},
]
}
}
self.matched = self.data['inventory']['disks'][2].copy()
self.matched = self.inventory['disks'][2].copy()
self.node_info = mock.Mock(spec=node_cache.NodeInfo,
uuid=self.uuid,
**{'node.return_value': self.node})
def test_no_hints(self):
del self.data['root_disk']
self.hook.before_update(self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
@ -343,9 +250,10 @@ class TestRootDiskSelection(test_base.NodeTest):
def test_no_inventory(self):
self.node.properties['root_device'] = {'model': 'foo'}
del self.data['inventory']
del self.data['root_disk']
self.assertRaisesRegexp(utils.Error,
'requires ironic-python-agent',
'Hardware inventory is empty or missing',
self.hook.before_update,
self.data, self.node_info)
@ -354,10 +262,10 @@ class TestRootDiskSelection(test_base.NodeTest):
def test_no_disks(self):
self.node.properties['root_device'] = {'size': 10}
self.data['inventory']['disks'] = []
self.inventory['disks'] = []
self.assertRaisesRegexp(utils.Error,
'No disks found',
'disks key is missing or empty',
self.hook.before_update,
self.data, self.node_info)
@ -379,6 +287,7 @@ class TestRootDiskSelection(test_base.NodeTest):
def test_one_fails(self):
self.node.properties['root_device'] = {'size': 10,
'model': 'Model 42'}
del self.data['root_disk']
self.assertRaisesRegexp(utils.Error,
'No disks satisfied root device hints',
@ -402,15 +311,12 @@ class TestRootDiskSelection(test_base.NodeTest):
self.data, self.node_info)
class TestRamdiskError(test_base.BaseTest):
class TestRamdiskError(test_base.InventoryTest):
def setUp(self):
super(TestRamdiskError, self).setUp()
self.msg = 'BOOM'
self.bmc_address = '1.2.3.4'
self.data = {
'error': self.msg,
'ipmi_address': self.bmc_address,
}
self.data['error'] = self.msg
def test_no_logs(self):
self.assertRaisesRegexp(utils.Error,

View File

@ -43,24 +43,9 @@ class BaseTest(test_base.NodeTest):
def setUp(self):
super(BaseTest, self).setUp()
self.started_at = time.time()
self.pxe_mac = self.macs[1]
self.data = {
'ipmi_address': self.bmc_address,
'cpus': 2,
'cpu_arch': 'x86_64',
'memory_mb': 1024,
'local_gb': 20,
'interfaces': {
'em1': {'mac': self.macs[0], 'ip': '1.2.0.1'},
'em2': {'mac': self.macs[1], 'ip': '1.2.0.2'},
'em3': {'mac': 'DE:AD:BE:EF:DE:AD'},
},
'boot_interface': '01-' + self.pxe_mac.replace(':', '-'),
}
self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(),
address=mac) for mac in self.macs]
self.ports = [self.all_ports[1]]
self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD']
self.fake_result_json = 'node json'
self.cli_fixture = self.useFixture(
@ -94,10 +79,6 @@ class TestProcess(BaseProcessTest):
self.assertEqual(self.fake_result_json, res)
# Only boot interface is added by default
self.assertEqual(['em2'], sorted(self.data['interfaces']))
self.assertEqual([self.pxe_mac], self.data['macs'])
self.find_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=mock.ANY)
actual_macs = self.find_mock.call_args[1]['mac']
@ -107,7 +88,7 @@ class TestProcess(BaseProcessTest):
self.node, self.data, self.node_info)
def test_no_ipmi(self):
del self.data['ipmi_address']
del self.inventory['bmc_address']
process.process(self.data)
self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY)
@ -350,15 +331,8 @@ class TestProcessNode(BaseTest):
'processing')
self.validate_attempts = 5
self.data['macs'] = self.macs # validate_interfaces hook
self.data['all_interfaces'] = self.data['interfaces']
self.ports = self.all_ports
self.patch_props = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
{'path': '/properties/local_gb', 'value': '20', 'op': 'add'}
] # scheduler hook
self.new_creds = ('user', 'password')
self.patch_credentials = [
{'op': 'add', 'path': '/driver_info/ipmi_username',
@ -406,24 +380,12 @@ class TestProcessNode(BaseTest):
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.assertFalse(self.cli.node.validate.called)
post_hook_mock.assert_called_once_with(self.data, self.node_info)
finished_mock.assert_called_once_with(mock.ANY)
def test_overwrite_disabled(self):
CONF.set_override('overwrite_existing', False, 'processing')
patch = [
{'op': 'add', 'path': '/properties/cpus', 'value': '2'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'},
]
process._process_node(self.node, self.data, self.node_info)
self.assertCalledWithPatch(patch, self.cli.node.update)
def test_port_failed(self):
self.cli.port.create.side_effect = (
[exceptions.Conflict()] + self.ports[1:])
@ -434,7 +396,6 @@ class TestProcessNode(BaseTest):
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
def test_set_ipmi_credentials(self):
self.node_info.set_option('new_ipmi_credentials', self.new_creds)
@ -486,7 +447,6 @@ class TestProcessNode(BaseTest):
process._process_node(self.node, self.data, self.node_info)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
finished_mock.assert_called_once_with(
mock.ANY,
error='Failed to power off node %s, check its power '
@ -517,22 +477,19 @@ class TestProcessNode(BaseTest):
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertEqual(expected,
json.loads(swift_conn.create_object.call_args[0][1]))
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_store_data_no_logs(self, swift_mock):
CONF.set_override('store_data', 'swift', 'processing')
swift_conn = swift_mock.return_value
name = 'inspector_data-%s' % self.uuid
expected = self.data.copy()
self.data['logs'] = 'something'
process._process_node(self.node, self.data, self.node_info)
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertEqual(expected,
self.assertNotIn('logs',
json.loads(swift_conn.create_object.call_args[0][1]))
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_store_data_location(self, swift_mock):
@ -541,11 +498,8 @@ class TestProcessNode(BaseTest):
'processing')
swift_conn = swift_mock.return_value
name = 'inspector_data-%s' % self.uuid
self.patch_props.append(
{'path': '/extra/inspector_data_object',
'value': name,
'op': 'add'}
)
patch = [{'path': '/extra/inspector_data_object',
'value': name, 'op': 'add'}]
expected = self.data
process._process_node(self.node, self.data, self.node_info)
@ -553,7 +507,7 @@ class TestProcessNode(BaseTest):
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertEqual(expected,
json.loads(swift_conn.create_object.call_args[0][1]))
self.assertCalledWithPatch(self.patch_props, self.cli.node.update)
self.cli.node.update.assert_any_call(self.uuid, patch)
@mock.patch.object(process, '_reapply', autospec=True)
@ -614,7 +568,6 @@ class TestReapplyNode(BaseTest):
'processing')
CONF.set_override('store_data', 'swift', 'processing')
self.data['macs'] = self.macs
self.data['all_interfaces'] = self.data['interfaces']
self.ports = self.all_ports
self.node_info = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at,
@ -667,8 +620,7 @@ class TestReapplyNode(BaseTest):
finished_mock.assert_called_once_with(self.node_info)
# asserting validate_interfaces was called
self.assertEqual({'em2': self.data['interfaces']['em2']},
swifted_data['interfaces'])
self.assertEqual(self.pxe_interfaces, swifted_data['interfaces'])
self.assertEqual([self.pxe_mac], swifted_data['macs'])
# assert ports were created with whatever there was left

View File

@ -205,3 +205,22 @@ def get_valid_macs(data):
return [m['mac']
for m in data.get('all_interfaces', {}).values()
if m.get('mac')]
_INVENTORY_MANDATORY_KEYS = ('disks', 'memory', 'cpu', 'interfaces')
def get_inventory(data, node_info=None):
"""Get and validate the hardware inventory from introspection data."""
inventory = data.get('inventory')
# TODO(dtantsur): validate inventory using JSON schema
if not inventory:
raise Error(_('Hardware inventory is empty or missing'),
data=data, node_info=node_info)
for key in _INVENTORY_MANDATORY_KEYS:
if not inventory.get(key):
raise Error(_('Invalid hardware inventory: %s key is missing '
'or empty') % key, data=data, node_info=node_info)
return inventory

View File

@ -0,0 +1,7 @@
---
prelude: >
Starting with this release only ironic-python-agent (IPA) is supported
as an introspection ramdisk.
upgrade:
- Support for the old bash-based ramdisk was removed. Please switch to IPA
before upgrading.