Remove support for the old ramdisk (DIB deploy-ironic element)
To be specific, removed in this patch are: * vendor passthru for the old ramdisk * support for deploy_key * various helper functions (like notify) * devstack plugin support for the old ramdisk Fixed: * vendor passthru for drivers (add support for IPA to seamicro one) * documentation for iSCSI-based deployment Change-Id: I0fc25c64339bc4c1f03ccb35cbc4efad4a7ad966
This commit is contained in:
parent
1678c9f0e8
commit
259a492243
@ -127,17 +127,16 @@ IRONIC_VM_LOG_CONSOLE=$(trueorfalse True IRONIC_VM_LOG_CONSOLE)
|
||||
IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/}
|
||||
IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE)
|
||||
|
||||
# Use DIB to create deploy ramdisk and kernel.
|
||||
# Whether to build the ramdisk or download a prebuilt one.
|
||||
IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK)
|
||||
|
||||
# Ironic IPA ramdisk type, supported types are: coreos, tinyipa and dib.
|
||||
IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-tinyipa}
|
||||
|
||||
# If not use DIB, these files are used as deploy ramdisk/kernel.
|
||||
# If present, these files are used as deploy ramdisk/kernel.
|
||||
# (The value must be an absolute path)
|
||||
IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-}
|
||||
IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
|
||||
IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
|
||||
|
||||
# NOTE(jroll) this needs to be updated when stable branches are cut
|
||||
IPA_DOWNLOAD_BRANCH=${IPA_DOWNLOAD_BRANCH:-master}
|
||||
@ -172,9 +171,6 @@ IRONIC_DIB_RAMDISK_OPTIONS=${IRONIC_DIB_RAMDISK_OPTIONS:-'ubuntu'}
|
||||
# ``pxe_iscsi_cimc``, ``pxe_agent_cimc``, ``pxe_ucs`` and ``pxe_cimc``.
|
||||
IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh}
|
||||
|
||||
# TODO(agordeev): replace 'ubuntu' with host distro name getting
|
||||
IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
|
||||
|
||||
# Support entry points installation of console scripts
|
||||
IRONIC_BIN_DIR=$(get_python_exec_prefix)
|
||||
|
||||
@ -200,16 +196,6 @@ IRONIC_VBMC_PORT_RANGE_START=${IRONIC_VBMC_PORT_RANGE_START:-6230}
|
||||
IRONIC_VBMC_CONFIG_FILE=${IRONIC_VBMC_CONFIG_FILE:-$HOME/.vbmc/virtualbmc.conf}
|
||||
IRONIC_VBMC_LOGFILE=${IRONIC_VBMC_LOGFILE:-$IRONIC_VM_LOG_DIR/virtualbmc.log}
|
||||
|
||||
# NOTE(lucasagomes): This flag is used to differentiate the nodes that
|
||||
# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers
|
||||
# (which also uses IPA but depends on Swift Temp URLs to work). At present,
|
||||
# all drivers that uses the iSCSI approach for their deployment supports
|
||||
# using both, IPA or bash ramdisks for the deployment. In the future we
|
||||
# want to remove the support for the bash ramdisk in favor of IPA, once
|
||||
# we get there this flag can be removed, and all conditionals that uses
|
||||
# it should just run by default.
|
||||
IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA)
|
||||
|
||||
# The path to the libvirt hooks directory, used if IRONIC_VM_LOG_ROTATE is True
|
||||
IRONIC_LIBVIRT_HOOKS_PATH=${IRONIC_LIBVIRT_HOOKS_PATH:-/etc/libvirt/hooks/}
|
||||
|
||||
@ -274,11 +260,6 @@ function is_deployed_by_ucs {
|
||||
return 1
|
||||
}
|
||||
|
||||
function is_deployed_with_ipa_ramdisk {
|
||||
is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
function setup_virtualbmc {
|
||||
# Install pyghmi from source, if requested, otherwise it will be
|
||||
# downloaded as part of the virtualbmc installation
|
||||
@ -508,9 +489,7 @@ function configure_ironic_conductor {
|
||||
fi
|
||||
|
||||
local pxe_params="nofb nomodeset vga=normal console=${IRONIC_TTY_DEV}"
|
||||
if is_deployed_with_ipa_ramdisk; then
|
||||
pxe_params+=" systemd.journald.forward_to_console=yes ipa-debug=1"
|
||||
fi
|
||||
|
||||
pxe_params+=" $IRONIC_EXTRA_PXE_PARAMS"
|
||||
|
||||
@ -1104,27 +1083,14 @@ function upload_baremetal_ironic_deploy {
|
||||
if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
|
||||
# we can build them only if we're not offline
|
||||
if [ "$OFFLINE" != "True" ]; then
|
||||
if is_deployed_with_ipa_ramdisk; then
|
||||
build_ipa_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
|
||||
else
|
||||
# install diskimage-builder
|
||||
if [[ $(type -P ramdisk-image-create) == "" ]]; then
|
||||
install_diskimage_builder
|
||||
fi
|
||||
ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
|
||||
-o $TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER
|
||||
fi
|
||||
else
|
||||
die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be built in OFFLINE mode"
|
||||
fi
|
||||
else
|
||||
if is_deployed_with_ipa_ramdisk; then
|
||||
# download the agent image tarball
|
||||
wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
|
||||
wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
|
||||
else
|
||||
die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -207,8 +207,6 @@ ironic-python-agent ships with a minimal cleaning configuration, only erasing
|
||||
disks. However, with this ramdisk, you can add your own cleaning steps and/or
|
||||
override default cleaning steps with a custom Hardware Manager.
|
||||
|
||||
In-band cleaning is not supported by the deprecated bash ramdisk.
|
||||
|
||||
Out-of-band
|
||||
-----------
|
||||
Out-of-band are actions performed by your management controller, such as IPMI,
|
||||
|
@ -277,17 +277,21 @@ This process is used with pxe_* family of drivers.
|
||||
Node -> Neutron [label = "DHCP request"];
|
||||
Neutron -> Node [label = "next-server = Conductor"];
|
||||
Node -> Conductor [label = "Attempts to tftpboot from Conductor"];
|
||||
"TFTP/HTTPd" -> Node [label = "Send deploy kernel, ramdisk\nand config"];
|
||||
Node -> Node [label = "Runs deploy\nramdisk"];
|
||||
"TFTP/HTTPd" -> Node [label = "Send deploy kernel, ramdisk and config"];
|
||||
Node -> Node [label = "Runs agent\nramdisk"];
|
||||
Node -> API [label = "lookup()"];
|
||||
API -> Conductor [label = "..."];
|
||||
Conductor -> Node [label = "Pass UUID"];
|
||||
Node -> API [label = "Heartbeat (UUID)"];
|
||||
API -> Conductor [label = "Heartbeat"];
|
||||
Conductor -> Node [label = "Continue deploy: Pass image, disk info"];
|
||||
Node -> Node [label = "Exposes disks\nvia iSCSI"];
|
||||
Node -> API [label = "POST /vendor_passthru?method=pass_deploy_info"];
|
||||
API -> Conductor [label = "Continue deploy"];
|
||||
Conductor -> Node [label = "iSCSI attach"];
|
||||
Conductor -> Node [label = "Copies user image"];
|
||||
Conductor -> Node [label = "iSCSI detach"];
|
||||
Conductor -> Node [label = "Sends 'DONE' message"];
|
||||
Conductor -> Conductor [label = "Mark node as\nACTIVE"];
|
||||
Node -> Node [label = "Terminates iSCSI endpoint"];
|
||||
Conductor -> Neutron [label = "Clear DHCPBOOT"];
|
||||
Conductor -> Node [label = "Reboot"];
|
||||
Node -> Node [label = "Reboots into\nuser instance"];
|
||||
}
|
||||
|
||||
|
@ -33,11 +33,6 @@ app = {
|
||||
# IPA ramdisk methods
|
||||
'/v1/drivers/[a-z0-9_]*/vendor_passthru/lookup',
|
||||
'/v1/nodes/[a-z0-9\-]+/vendor_passthru/heartbeat',
|
||||
# DIB ramdisk methods
|
||||
# NOTE(yuriyz): support URL without 'v1' for backward compatibility
|
||||
# with old DIB ramdisks.
|
||||
'(?:/v1)?/nodes/[a-z0-9\-]+/vendor_passthru/pass_(?:deploy|'
|
||||
'bootloader_install)_info',
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -413,9 +413,7 @@ class BootInterface(object):
|
||||
Different implementations might want to boot the ramdisk in
|
||||
different ways by passing parameters to them. For example,
|
||||
|
||||
- When DIB ramdisk is booted to deploy a node, it takes the
|
||||
parameters iscsi_target_iqn, deployment_id, ironic_api_url, etc.
|
||||
- When Agent ramdisk is booted to deploy a node, it takes the
|
||||
When Agent ramdisk is booted to deploy a node, it takes the
|
||||
parameters ipa-driver-name, ipa-api-url, root_device, etc.
|
||||
|
||||
Other implementations can make use of ramdisk_params to pass such
|
||||
|
@ -44,9 +44,7 @@ class PXEDracDriver(base.BaseDriver):
|
||||
self.management = management.DracManagement()
|
||||
self.iscsi_vendor = iscsi_deploy.VendorPassthru()
|
||||
self.drac_vendor = vendor_passthru.DracVendorPassthru()
|
||||
self.mapping = {'pass_deploy_info': self.iscsi_vendor,
|
||||
'heartbeat': self.iscsi_vendor,
|
||||
'pass_bootloader_install_info': self.iscsi_vendor,
|
||||
self.mapping = {'heartbeat': self.iscsi_vendor,
|
||||
'get_bios_config': self.drac_vendor,
|
||||
'set_bios_config': self.drac_vendor,
|
||||
'commit_bios_config': self.drac_vendor,
|
||||
|
@ -16,21 +16,12 @@ AMT Vendor Methods
|
||||
|
||||
from ironic.common import boot_devices
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules import deploy_utils
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
|
||||
|
||||
class AMTPXEVendorPassthru(iscsi_deploy.VendorPassthru):
|
||||
|
||||
@base.passthru(['POST'])
|
||||
@task_manager.require_exclusive_lock
|
||||
def pass_deploy_info(self, task, **kwargs):
|
||||
if deploy_utils.get_boot_option(task.node) == "netboot":
|
||||
task.driver.management.ensure_next_boot_device(task.node,
|
||||
boot_devices.PXE)
|
||||
super(AMTPXEVendorPassthru, self).pass_deploy_info(task, **kwargs)
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
def continue_deploy(self, task, **kwargs):
|
||||
if deploy_utils.get_boot_option(task.node) == "netboot":
|
||||
|
@ -17,7 +17,6 @@
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
@ -309,16 +308,6 @@ def switch_pxe_config(path, root_uuid_or_disk_id, boot_mode,
|
||||
_replace_boot_line(path, boot_mode, is_whole_disk_image, trusted_boot)
|
||||
|
||||
|
||||
def notify(address, port):
|
||||
"""Notify a node that it becomes ready to reboot."""
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect((address, port))
|
||||
s.send('done')
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
|
||||
def get_dev(address, port, iqn, lun):
|
||||
"""Returns a device path for given parameters."""
|
||||
dev = ("/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s"
|
||||
@ -435,22 +424,6 @@ def _iscsi_setup_and_handle_errors(address, port, iqn, lun):
|
||||
delete_iscsi(address, port, iqn)
|
||||
|
||||
|
||||
def notify_ramdisk_to_proceed(address):
|
||||
"""Notifies the ramdisk waiting for instructions from Ironic.
|
||||
|
||||
DIB ramdisk (from init script) makes vendor passhthrus and listens
|
||||
on port 10000 for Ironic to notify back the completion of the task.
|
||||
This method connects to port 10000 of the bare metal running the
|
||||
ramdisk and then sends some data to notify the ramdisk to proceed
|
||||
with it's next task.
|
||||
|
||||
:param address: The IP address of the node.
|
||||
"""
|
||||
# Ensure the node started netcat on the port after POST the request.
|
||||
time.sleep(3)
|
||||
notify(address, 10000)
|
||||
|
||||
|
||||
def check_for_missing_params(info_dict, error_msg, param_prefix=''):
|
||||
"""Check for empty params in the provided dictionary.
|
||||
|
||||
@ -1008,6 +981,8 @@ def build_agent_options(node):
|
||||
# NOTE: The below entry is a temporary workaround for bug/1433812
|
||||
'coreos.configdrive': 0,
|
||||
}
|
||||
# TODO(dtantsur): deprecate in favor of reading root hints directly from a
|
||||
# node record.
|
||||
root_device = parse_root_device_hints(node)
|
||||
if root_device:
|
||||
agent_config_opts['root_device'] = root_device
|
||||
@ -1044,15 +1019,6 @@ def prepare_inband_cleaning(task, manage_boot=True):
|
||||
|
||||
if manage_boot:
|
||||
ramdisk_opts = build_agent_options(task.node)
|
||||
|
||||
# TODO(rameshg87): Below code is to make sure that bash ramdisk
|
||||
# invokes pass_deploy_info vendor passthru when it is booted
|
||||
# for cleaning. Remove the below code once we stop supporting
|
||||
# bash ramdisk in Ironic. Do a late import to avoid circular
|
||||
# import.
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
ramdisk_opts.update(
|
||||
iscsi_deploy.build_deploy_ramdisk_options(task.node))
|
||||
task.driver.boot.prepare_ramdisk(task, ramdisk_opts)
|
||||
|
||||
manager_utils.node_power_action(task, states.REBOOT)
|
||||
@ -1147,8 +1113,6 @@ def parse_instance_info(node):
|
||||
" in node's instance_info")
|
||||
check_for_missing_params(i_info, error_msg)
|
||||
|
||||
# Internal use only
|
||||
i_info['deploy_key'] = info.get('deploy_key')
|
||||
i_info['swap_mb'] = int(info.get('swap_mb', 0))
|
||||
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
|
||||
err_msg_invalid = _("Cannot validate parameter for driver deploy. "
|
||||
|
@ -74,23 +74,6 @@ class VendorPassthru(iscsi_deploy.VendorPassthru):
|
||||
return
|
||||
super(VendorPassthru, self).validate(task, method, **kwargs)
|
||||
|
||||
@base.passthru(['POST'])
|
||||
@task_manager.require_exclusive_lock
|
||||
def pass_deploy_info(self, task, **kwargs):
|
||||
"""Continues the deployment of baremetal node over iSCSI.
|
||||
|
||||
This method continues the deployment of the baremetal node over iSCSI
|
||||
from where the deployment ramdisk has left off.
|
||||
This updates boot mode and secure boot settings, if required.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:param **kwargs: kwargs for performing iscsi deployment.
|
||||
:raises: InvalidState
|
||||
"""
|
||||
ilo_common.update_boot_mode(task)
|
||||
ilo_common.update_secure_boot_mode(task, True)
|
||||
super(VendorPassthru, self).pass_deploy_info(task, **kwargs)
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
def continue_deploy(self, task, **kwargs):
|
||||
"""Method invoked when deployed with the IPA ramdisk.
|
||||
|
@ -25,9 +25,6 @@ from six.moves.urllib import parse
|
||||
from ironic.common import dhcp_factory
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common.i18n import _LE
|
||||
from ironic.common.i18n import _LI
|
||||
from ironic.common.i18n import _LW
|
||||
from ironic.common import keystone
|
||||
from ironic.common import states
|
||||
from ironic.common import utils
|
||||
@ -185,26 +182,26 @@ def destroy_images(node_uuid):
|
||||
InstanceImageCache().clean_up()
|
||||
|
||||
|
||||
def get_deploy_info(node, **kwargs):
|
||||
def get_deploy_info(node, address, iqn, port=None, lun='1'):
|
||||
"""Returns the information required for doing iSCSI deploy in a dictionary.
|
||||
|
||||
:param node: ironic node object
|
||||
:param kwargs: the keyword args passed from the conductor node.
|
||||
:param address: iSCSI address
|
||||
:param iqn: iSCSI iqn for the target disk
|
||||
:param port: iSCSI port, defaults to one specified in the configuration
|
||||
:param lun: iSCSI lun, defaults to '1'
|
||||
:raises: MissingParameterValue, if some required parameters were not
|
||||
passed.
|
||||
:raises: InvalidParameterValue, if any of the parameters have invalid
|
||||
value.
|
||||
"""
|
||||
deploy_key = kwargs.get('key')
|
||||
i_info = deploy_utils.parse_instance_info(node)
|
||||
if i_info['deploy_key'] != deploy_key:
|
||||
raise exception.InvalidParameterValue(_("Deploy key does not match"))
|
||||
|
||||
params = {
|
||||
'address': kwargs.get('address'),
|
||||
'port': kwargs.get('port', CONF.iscsi.portal_port),
|
||||
'iqn': kwargs.get('iqn'),
|
||||
'lun': kwargs.get('lun', '1'),
|
||||
'address': address,
|
||||
'port': port or CONF.iscsi.portal_port,
|
||||
'iqn': iqn,
|
||||
'lun': lun,
|
||||
'image_path': _get_image_file_path(node.uuid),
|
||||
'node_uuid': node.uuid}
|
||||
|
||||
@ -261,7 +258,6 @@ def continue_deploy(task, **kwargs):
|
||||
node = task.node
|
||||
|
||||
params = get_deploy_info(node, **kwargs)
|
||||
ramdisk_error = kwargs.get('error')
|
||||
|
||||
def _fail_deploy(task, msg):
|
||||
"""Fail the deploy after logging and setting error states."""
|
||||
@ -270,10 +266,6 @@ def continue_deploy(task, **kwargs):
|
||||
destroy_images(task.node.uuid)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
if ramdisk_error:
|
||||
msg = _('Error returned from deploy ramdisk: %s') % ramdisk_error
|
||||
_fail_deploy(task, msg)
|
||||
|
||||
# NOTE(lucasagomes): Let's make sure we don't log the full content
|
||||
# of the config drive here because it can be up to 64MB in size,
|
||||
# so instead let's log "***" in case config drive is enabled.
|
||||
@ -338,12 +330,11 @@ def do_agent_iscsi_deploy(task, agent_client):
|
||||
during the deploy.
|
||||
"""
|
||||
node = task.node
|
||||
iscsi_options = build_deploy_ramdisk_options(node)
|
||||
i_info = deploy_utils.parse_instance_info(node)
|
||||
wipe_disk_metadata = not i_info['preserve_ephemeral']
|
||||
|
||||
iqn = iscsi_options['iscsi_target_iqn']
|
||||
portal_port = iscsi_options['iscsi_portal_port']
|
||||
iqn = 'iqn.2008-10.org.openstack:%s' % node.uuid
|
||||
portal_port = CONF.iscsi.portal_port
|
||||
result = agent_client.start_iscsi_target(
|
||||
node, iqn,
|
||||
portal_port,
|
||||
@ -358,17 +349,7 @@ def do_agent_iscsi_deploy(task, agent_client):
|
||||
address = parse.urlparse(node.driver_internal_info['agent_url'])
|
||||
address = address.hostname
|
||||
|
||||
# TODO(lucasagomes): The 'error' and 'key' parameters in the
|
||||
# dictionary below are just being passed because it's needed for
|
||||
# the continue_deploy() method, we are fooling it
|
||||
# for now. The agent driver doesn't use/need those. So we need to
|
||||
# refactor this bits here later.
|
||||
iscsi_params = {'error': result['command_error'],
|
||||
'iqn': iqn,
|
||||
'key': iscsi_options['deployment_key'],
|
||||
'address': address}
|
||||
|
||||
uuid_dict_returned = continue_deploy(task, **iscsi_params)
|
||||
uuid_dict_returned = continue_deploy(task, iqn=iqn, address=address)
|
||||
root_uuid_or_disk_id = uuid_dict_returned.get(
|
||||
'root uuid', uuid_dict_returned.get('disk identifier'))
|
||||
|
||||
@ -394,54 +375,6 @@ def _get_boot_mode(node):
|
||||
return "bios"
|
||||
|
||||
|
||||
def build_deploy_ramdisk_options(node):
|
||||
"""Build the ramdisk config options for a node
|
||||
|
||||
This method builds the ramdisk options for a node,
|
||||
given all the required parameters for doing iscsi deploy.
|
||||
|
||||
:param node: a single Node.
|
||||
:returns: A dictionary of options to be passed to ramdisk for performing
|
||||
the deploy.
|
||||
"""
|
||||
# NOTE: we should strip '/' from the end because this is intended for
|
||||
# hardcoded ramdisk script
|
||||
ironic_api = (CONF.conductor.api_url or
|
||||
keystone.get_service_url()).rstrip('/')
|
||||
|
||||
deploy_key = utils.random_alnum(32)
|
||||
i_info = node.instance_info
|
||||
i_info['deploy_key'] = deploy_key
|
||||
node.instance_info = i_info
|
||||
node.save()
|
||||
|
||||
# XXX(jroll) DIB relies on boot_option=local to decide whether or not to
|
||||
# lay down a bootloader. Hack this for now; fix it for real in Liberty.
|
||||
# See also bug #1441556.
|
||||
boot_option = deploy_utils.get_boot_option(node)
|
||||
if node.driver_internal_info.get('is_whole_disk_image'):
|
||||
boot_option = 'netboot'
|
||||
|
||||
deploy_options = {
|
||||
'deployment_id': node['uuid'],
|
||||
'deployment_key': deploy_key,
|
||||
'iscsi_target_iqn': 'iqn.2008-10.org.openstack:%s' % node.uuid,
|
||||
'iscsi_portal_port': CONF.iscsi.portal_port,
|
||||
'ironic_api_url': ironic_api,
|
||||
'disk': CONF.pxe.disk_devices,
|
||||
'boot_option': boot_option,
|
||||
'boot_mode': _get_boot_mode(node),
|
||||
# NOTE: The below entry is a temporary workaround for bug/1433812
|
||||
'coreos.configdrive': 0,
|
||||
}
|
||||
|
||||
root_device = deploy_utils.parse_root_device_hints(node)
|
||||
if root_device:
|
||||
deploy_options['root_device'] = root_device
|
||||
|
||||
return deploy_options
|
||||
|
||||
|
||||
def validate(task):
|
||||
"""Validates the pre-requisites for iSCSI deploy.
|
||||
|
||||
@ -470,93 +403,6 @@ def validate(task):
|
||||
deploy_utils.parse_instance_info(task.node)
|
||||
|
||||
|
||||
def validate_pass_bootloader_info_input(task, input_params):
|
||||
"""Validates the input sent with bootloader install info passthru.
|
||||
|
||||
This method validates the input sent with bootloader install info
|
||||
passthru.
|
||||
|
||||
:param task: A TaskManager object.
|
||||
:param input_params: A dictionary of params sent as input to passthru.
|
||||
:raises: InvalidParameterValue, if deploy key passed doesn't match the
|
||||
one stored in instance_info.
|
||||
:raises: MissingParameterValue, if some input is missing.
|
||||
"""
|
||||
params = {'address': input_params.get('address'),
|
||||
'key': input_params.get('key'),
|
||||
'status': input_params.get('status')}
|
||||
msg = _("Some mandatory input missing in 'pass_bootloader_info' "
|
||||
"vendor passthru from ramdisk.")
|
||||
deploy_utils.check_for_missing_params(params, msg)
|
||||
|
||||
deploy_key = task.node.instance_info['deploy_key']
|
||||
if deploy_key != input_params.get('key'):
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Deploy key %(key_sent)s does not match "
|
||||
"with %(expected_key)s") %
|
||||
{'key_sent': input_params.get('key'), 'expected_key': deploy_key})
|
||||
|
||||
|
||||
def validate_bootloader_install_status(task, input_params):
|
||||
"""Validate if bootloader was installed.
|
||||
|
||||
This method first validates if deploy key sent in vendor passthru
|
||||
was correct one, and then validates whether bootloader installation
|
||||
was successful or not.
|
||||
|
||||
:param task: A TaskManager object.
|
||||
:param input_params: A dictionary of params sent as input to passthru.
|
||||
:raises: InstanceDeployFailure, if bootloader installation was
|
||||
reported from ramdisk as failure.
|
||||
"""
|
||||
node = task.node
|
||||
if input_params['status'] != 'SUCCEEDED':
|
||||
msg = (_('Failed to install bootloader on node %(node)s. '
|
||||
'Error: %(error)s.') %
|
||||
{'node': node.uuid, 'error': input_params.get('error')})
|
||||
LOG.error(msg)
|
||||
deploy_utils.set_failed_state(task, msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
LOG.info(_LI('Bootloader successfully installed on node %s'), node.uuid)
|
||||
|
||||
|
||||
def finish_deploy(task, address):
|
||||
"""Notifies the ramdisk to reboot the node and makes the instance active.
|
||||
|
||||
This method notifies the ramdisk to proceed to reboot and then
|
||||
makes the instance active.
|
||||
|
||||
:param task: a TaskManager object.
|
||||
:param address: The IP address of the bare metal node.
|
||||
:raises: InstanceDeployFailure, if notifying ramdisk failed.
|
||||
"""
|
||||
node = task.node
|
||||
try:
|
||||
deploy_utils.notify_ramdisk_to_proceed(address)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Deploy failed for instance %(instance)s. '
|
||||
'Error: %(error)s'),
|
||||
{'instance': node.instance_uuid, 'error': e})
|
||||
msg = (_('Failed to notify ramdisk to reboot after bootloader '
|
||||
'installation. Error: %s') % e)
|
||||
deploy_utils.set_failed_state(task, msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
# TODO(lucasagomes): When deploying a node with the DIB ramdisk
|
||||
# Ironic will not power control the node at the end of the deployment,
|
||||
# it's the DIB ramdisk that reboots the node. But, for the SSH driver
|
||||
# some changes like setting the boot device only gets applied when the
|
||||
# machine is powered off and on again. So the code below is enforcing
|
||||
# it. For Liberty we need to change the DIB ramdisk so that Ironic
|
||||
# always controls the power state of the node for all drivers.
|
||||
if deploy_utils.get_boot_option(node) == "local" and 'ssh' in node.driver:
|
||||
manager_utils.node_power_action(task, states.REBOOT)
|
||||
|
||||
LOG.info(_LI('Deployment to node %s done'), node.uuid)
|
||||
task.process_event('done')
|
||||
|
||||
|
||||
class ISCSIDeploy(base.DeployInterface):
|
||||
"""PXE Deploy Interface for deploy-related actions."""
|
||||
|
||||
@ -585,12 +431,10 @@ class ISCSIDeploy(base.DeployInterface):
|
||||
def deploy(self, task):
|
||||
"""Start deployment of the task's node.
|
||||
|
||||
Fetches instance image, creates a temporary keystone token file,
|
||||
updates the DHCP port options for next boot, and issues a reboot
|
||||
request to the power driver.
|
||||
Fetches instance image, updates the DHCP port options for next boot,
|
||||
and issues a reboot request to the power driver.
|
||||
This causes the node to boot into the deployment ramdisk and triggers
|
||||
the next phase of PXE-based deployment via
|
||||
VendorPassthru.pass_deploy_info().
|
||||
the next phase of PXE-based deployment via agent heartbeats.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:returns: deploy state DEPLOYWAIT.
|
||||
@ -629,22 +473,14 @@ class ISCSIDeploy(base.DeployInterface):
|
||||
if node.provision_state == states.ACTIVE:
|
||||
task.driver.boot.prepare_instance(task)
|
||||
else:
|
||||
deploy_opts = build_deploy_ramdisk_options(node)
|
||||
|
||||
# NOTE(lucasagomes): We are going to extend the normal PXE config
|
||||
# to also contain the agent options so it could be used for
|
||||
# both the DIB ramdisk and the IPA ramdisk
|
||||
agent_opts = deploy_utils.build_agent_options(node)
|
||||
deploy_opts.update(agent_opts)
|
||||
|
||||
deploy_opts = deploy_utils.build_agent_options(node)
|
||||
task.driver.boot.prepare_ramdisk(task, deploy_opts)
|
||||
|
||||
def clean_up(self, task):
|
||||
"""Clean up the deployment environment for the task's node.
|
||||
|
||||
Unlinks TFTP and instance images and triggers image cache cleanup.
|
||||
Removes the TFTP configuration files for this node. As a precaution,
|
||||
this method also ensures the keystone auth token file was removed.
|
||||
Removes the TFTP configuration files for this node.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
"""
|
||||
@ -664,15 +500,8 @@ class ISCSIDeploy(base.DeployInterface):
|
||||
:raises NodeCleaningFailure: if the clean steps are not yet
|
||||
available (cached), for example, when a node has just been
|
||||
enrolled and has not been cleaned yet.
|
||||
:returns: A list of clean step dictionaries. If bash ramdisk is
|
||||
used for this node, it returns an empty list.
|
||||
:returns: A list of clean step dictionaries.
|
||||
"""
|
||||
# TODO(rameshg87): Remove the below code once we stop supporting
|
||||
# bash ramdisk in Ironic. No need to log warning because we have
|
||||
# already logged it in pass_deploy_info.
|
||||
if 'agent_url' not in task.node.driver_internal_info:
|
||||
return []
|
||||
|
||||
steps = deploy_utils.agent_get_clean_steps(
|
||||
task, interface='deploy',
|
||||
override_priorities={
|
||||
@ -716,148 +545,9 @@ class ISCSIDeploy(base.DeployInterface):
|
||||
class VendorPassthru(agent_base_vendor.BaseAgentVendor):
|
||||
"""Interface to mix IPMI and PXE vendor-specific interfaces."""
|
||||
|
||||
def validate(self, task, method, **kwargs):
|
||||
"""Validates the inputs for a vendor passthru.
|
||||
|
||||
If invalid, raises an exception; otherwise returns None.
|
||||
|
||||
Valid methods:
|
||||
* pass_deploy_info
|
||||
* pass_bootloader_install_info
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:param method: method to be validated.
|
||||
:param kwargs: kwargs containins the method's parameters.
|
||||
:raises: InvalidParameterValue if any parameters is invalid.
|
||||
"""
|
||||
if method == 'pass_deploy_info':
|
||||
# TODO(rameshg87): Don't validate deploy info if bash ramdisk
|
||||
# booted during cleaning. It will be handled in pass_deploy_info
|
||||
# method. Remove the below code once we stop supporting bash
|
||||
# ramdisk in Ironic.
|
||||
if task.node.provision_state != states.CLEANWAIT:
|
||||
deploy_utils.validate_capabilities(task.node)
|
||||
get_deploy_info(task.node, **kwargs)
|
||||
elif method == 'pass_bootloader_install_info':
|
||||
validate_pass_bootloader_info_input(task, kwargs)
|
||||
|
||||
@base.passthru(['POST'])
|
||||
@task_manager.require_exclusive_lock
|
||||
def pass_bootloader_install_info(self, task, **kwargs):
|
||||
"""Accepts the results of bootloader installation.
|
||||
|
||||
This method acts as a vendor passthru and accepts the result of
|
||||
the bootloader installation. If bootloader installation was
|
||||
successful, then it notifies the bare metal to proceed to reboot
|
||||
and makes the instance active. If the bootloader installation failed,
|
||||
then it sets provisioning as failed and powers off the node.
|
||||
|
||||
:param task: A TaskManager object.
|
||||
:param kwargs: The arguments sent with vendor passthru. The expected
|
||||
kwargs are::
|
||||
|
||||
'key': The deploy key for authorization
|
||||
'status': 'SUCCEEDED' or 'FAILED'
|
||||
'error': The error message if status == 'FAILED'
|
||||
'address': The IP address of the ramdisk
|
||||
|
||||
"""
|
||||
LOG.warning(_LW("The node %s is using the bash deploy ramdisk for "
|
||||
"its deployment. This deploy ramdisk has been "
|
||||
"deprecated. Please use the ironic-python-agent "
|
||||
"(IPA) ramdisk instead."), task.node.uuid)
|
||||
task.process_event('resume')
|
||||
LOG.debug('Continuing the deployment on node %s', task.node.uuid)
|
||||
validate_bootloader_install_status(task, kwargs)
|
||||
finish_deploy(task, kwargs['address'])
|
||||
|
||||
def _initiate_cleaning(self, task):
|
||||
"""Initiates the steps required to start cleaning for the node.
|
||||
|
||||
This method polls each interface of the driver for getting the
|
||||
clean steps and notifies Ironic conductor to resume cleaning.
|
||||
On error, it sets the node to CLEANFAIL state and populates
|
||||
node.last_error with the error message.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
"""
|
||||
LOG.warning(
|
||||
_LW("Bash deploy ramdisk doesn't support in-band cleaning. "
|
||||
"Please use the ironic-python-agent (IPA) ramdisk "
|
||||
"instead for node %s. "), task.node.uuid)
|
||||
try:
|
||||
manager_utils.set_node_cleaning_steps(task)
|
||||
self.notify_conductor_resume_clean(task)
|
||||
except Exception as e:
|
||||
last_error = (
|
||||
_('Encountered exception for node %(node)s '
|
||||
'while initiating cleaning. Error: %(error)s') %
|
||||
{'node': task.node.uuid, 'error': e})
|
||||
return manager_utils.cleaning_error_handler(task, last_error)
|
||||
|
||||
@base.passthru(['POST'])
|
||||
@task_manager.require_exclusive_lock
|
||||
def pass_deploy_info(self, task, **kwargs):
|
||||
"""Continues the deployment of baremetal node over iSCSI.
|
||||
|
||||
This method continues the deployment of the baremetal node over iSCSI
|
||||
from where the deployment ramdisk has left off.
|
||||
|
||||
:param task: a TaskManager instance containing the node to act on.
|
||||
:param kwargs: kwargs for performing iscsi deployment.
|
||||
:raises: InvalidState
|
||||
"""
|
||||
node = task.node
|
||||
LOG.warning(_LW("The node %s is using the bash deploy ramdisk for "
|
||||
"its deployment. This deploy ramdisk has been "
|
||||
"deprecated. Please use the ironic-python-agent "
|
||||
"(IPA) ramdisk instead."), node.uuid)
|
||||
|
||||
# TODO(rameshg87): Remove the below code once we stop supporting
|
||||
# bash ramdisk in Ironic.
|
||||
if node.provision_state == states.CLEANWAIT:
|
||||
return self._initiate_cleaning(task)
|
||||
|
||||
task.process_event('resume')
|
||||
LOG.debug('Continuing the deployment on node %s', node.uuid)
|
||||
|
||||
is_whole_disk_image = node.driver_internal_info['is_whole_disk_image']
|
||||
uuid_dict_returned = continue_deploy(task, **kwargs)
|
||||
root_uuid_or_disk_id = uuid_dict_returned.get(
|
||||
'root uuid', uuid_dict_returned.get('disk identifier'))
|
||||
|
||||
# save the node's root disk UUID so that another conductor could
|
||||
# rebuild the PXE config file. Due to a shortcoming in Nova objects,
|
||||
# we have to assign to node.driver_internal_info so the node knows it
|
||||
# has changed.
|
||||
driver_internal_info = node.driver_internal_info
|
||||
driver_internal_info['root_uuid_or_disk_id'] = root_uuid_or_disk_id
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
||||
try:
|
||||
task.driver.boot.prepare_instance(task)
|
||||
|
||||
if deploy_utils.get_boot_option(node) == "local":
|
||||
if not is_whole_disk_image:
|
||||
LOG.debug('Installing the bootloader on node %s',
|
||||
node.uuid)
|
||||
deploy_utils.notify_ramdisk_to_proceed(kwargs['address'])
|
||||
task.process_event('wait')
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Deploy failed for instance %(instance)s. '
|
||||
'Error: %(error)s'),
|
||||
{'instance': node.instance_uuid, 'error': e})
|
||||
msg = _('Failed to continue iSCSI deployment.')
|
||||
deploy_utils.set_failed_state(task, msg)
|
||||
else:
|
||||
finish_deploy(task, kwargs.get('address'))
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
def continue_deploy(self, task, **kwargs):
|
||||
"""Method invoked when deployed with the IPA ramdisk.
|
||||
"""Method invoked when deployed using iSCSI.
|
||||
|
||||
This method is invoked during a heartbeat from an agent when
|
||||
the node is in wait-call-back state. This deploys the image on
|
||||
|
@ -77,9 +77,7 @@ class PXEAndIPMIToolDriver(base.BaseDriver):
|
||||
self.ipmi_vendor = ipmitool.VendorPassthru()
|
||||
self.mapping = {'send_raw': self.ipmi_vendor,
|
||||
'bmc_reset': self.ipmi_vendor,
|
||||
'heartbeat': self.iscsi_vendor,
|
||||
'pass_deploy_info': self.iscsi_vendor,
|
||||
'pass_bootloader_install_info': self.iscsi_vendor}
|
||||
'heartbeat': self.iscsi_vendor}
|
||||
self.driver_passthru_mapping = {'lookup': self.iscsi_vendor}
|
||||
self.vendor = utils.MixinVendorInterface(
|
||||
self.mapping,
|
||||
@ -137,8 +135,6 @@ class PXEAndIPMINativeDriver(base.BaseDriver):
|
||||
'send_raw': self.ipminative_vendor,
|
||||
'bmc_reset': self.ipminative_vendor,
|
||||
'heartbeat': self.iscsi_vendor,
|
||||
'pass_bootloader_install_info': self.iscsi_vendor,
|
||||
'pass_deploy_info': self.iscsi_vendor,
|
||||
}
|
||||
self.driver_passthru_mapping = {'lookup': self.iscsi_vendor}
|
||||
self.vendor = utils.MixinVendorInterface(self.mapping,
|
||||
@ -168,11 +164,13 @@ class PXEAndSeaMicroDriver(base.BaseDriver):
|
||||
self.deploy = iscsi_deploy.ISCSIDeploy()
|
||||
self.management = seamicro.Management()
|
||||
self.seamicro_vendor = seamicro.VendorPassthru()
|
||||
self.pxe_vendor = iscsi_deploy.VendorPassthru()
|
||||
self.mapping = {'pass_deploy_info': self.pxe_vendor,
|
||||
self.iscsi_vendor = iscsi_deploy.VendorPassthru()
|
||||
self.mapping = {'heartbeat': self.iscsi_vendor,
|
||||
'attach_volume': self.seamicro_vendor,
|
||||
'set_node_vlan_id': self.seamicro_vendor}
|
||||
self.vendor = utils.MixinVendorInterface(self.mapping)
|
||||
self.driver_passthru_mapping = {'lookup': self.iscsi_vendor}
|
||||
self.vendor = utils.MixinVendorInterface(self.mapping,
|
||||
self.driver_passthru_mapping)
|
||||
self.console = seamicro.ShellinaboxConsole()
|
||||
|
||||
|
||||
|
@ -37,8 +37,7 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
self.context, driver='pxe_amt', driver_info=INFO_DICT)
|
||||
|
||||
def test_vendor_routes(self):
|
||||
expected = ['heartbeat', 'pass_deploy_info',
|
||||
'pass_bootloader_install_info']
|
||||
expected = ['heartbeat']
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
vendor_routes = task.driver.vendor.vendor_routes
|
||||
@ -53,45 +52,6 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
self.assertIsInstance(driver_routes, dict)
|
||||
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
|
||||
|
||||
@mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info',
|
||||
spec_set=True, autospec=True)
|
||||
def test_vendorpassthru_pass_deploy_info_netboot(self,
|
||||
mock_pxe_vendorpassthru,
|
||||
mock_ensure):
|
||||
kwargs = {'address': '123456'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.provision_state = states.DEPLOYWAIT
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.node.instance_info['capabilities'] = {
|
||||
"boot_option": "netboot"
|
||||
}
|
||||
task.driver.vendor.pass_deploy_info(task, **kwargs)
|
||||
mock_ensure.assert_called_with(
|
||||
task.driver.management, task.node, boot_devices.PXE)
|
||||
mock_pxe_vendorpassthru.assert_called_once_with(
|
||||
task.driver.vendor, task, **kwargs)
|
||||
|
||||
@mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info',
|
||||
spec_set=True, autospec=True)
|
||||
def test_vendorpassthru_pass_deploy_info_localboot(self,
|
||||
mock_pxe_vendorpassthru,
|
||||
mock_ensure):
|
||||
kwargs = {'address': '123456'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.provision_state = states.DEPLOYWAIT
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.node.instance_info['capabilities'] = {"boot_option": "local"}
|
||||
task.driver.vendor.pass_deploy_info(task, **kwargs)
|
||||
self.assertFalse(mock_ensure.called)
|
||||
mock_pxe_vendorpassthru.assert_called_once_with(
|
||||
task.driver.vendor, task, **kwargs)
|
||||
|
||||
@mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'continue_deploy',
|
||||
|
@ -109,26 +109,6 @@ class VendorPassthruTestCase(db_base.DbTestCase):
|
||||
validate_image_prop_mock.assert_called_once_with(
|
||||
task.context, {'image_source': 'foo'}, [])
|
||||
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
|
||||
autospec=True)
|
||||
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
|
||||
autospec=True)
|
||||
def test_pass_deploy_info(self, func_update_boot_mode,
|
||||
func_update_secure_boot_mode,
|
||||
vendorpassthru_mock):
|
||||
kwargs = {'address': '123456'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.provision_state = states.DEPLOYWAIT
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.driver.vendor.pass_deploy_info(task, **kwargs)
|
||||
func_update_boot_mode.assert_called_once_with(task)
|
||||
func_update_secure_boot_mode.assert_called_once_with(task, True)
|
||||
vendorpassthru_mock.assert_called_once_with(
|
||||
mock.ANY, task, **kwargs)
|
||||
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'continue_deploy',
|
||||
spec_set=True, autospec=True)
|
||||
@mock.patch.object(ilo_common, 'update_secure_boot_mode', autospec=True)
|
||||
|
@ -38,7 +38,6 @@ from ironic.conductor import utils as manager_utils
|
||||
from ironic.drivers.modules import agent_client
|
||||
from ironic.drivers.modules import deploy_utils as utils
|
||||
from ironic.drivers.modules import image_cache
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
from ironic.drivers.modules import pxe
|
||||
from ironic.tests import base as tests_base
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
@ -357,7 +356,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'logout_iscsi', 'delete_iscsi', 'notify']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
|
||||
disk_utils_name_list = ['is_block_device', 'get_image_mb',
|
||||
'make_partitions', 'populate_image', 'mkfs',
|
||||
@ -501,7 +500,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
efi_system_part_uuid = '9036-482'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'logout_iscsi', 'delete_iscsi', 'notify']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
|
||||
disk_utils_name_list = ['get_image_mb', 'make_partitions',
|
||||
'is_block_device', 'populate_image', 'mkfs',
|
||||
@ -592,7 +591,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'notify', 'logout_iscsi', 'delete_iscsi']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
|
||||
disk_utils_name_list = ['make_partitions', 'get_image_mb',
|
||||
'is_block_device', 'populate_image',
|
||||
@ -661,7 +660,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'logout_iscsi', 'delete_iscsi', 'notify']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
|
||||
disk_utils_name_list = ['get_image_mb', 'make_partitions',
|
||||
'is_block_device', 'populate_image', 'mkfs',
|
||||
@ -742,7 +741,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'delete_iscsi', 'logout_iscsi', 'notify']
|
||||
'delete_iscsi', 'logout_iscsi']
|
||||
disk_utils_name_list = ['make_partitions', 'get_image_mb',
|
||||
'is_block_device', 'populate_image', 'mkfs',
|
||||
'block_uuid', 'get_dev_block_size']
|
||||
@ -817,7 +816,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
|
||||
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'logout_iscsi', 'delete_iscsi', 'notify']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
disk_utils_name_list = ['is_block_device', 'populate_image',
|
||||
'get_image_mb', 'destroy_disk_metadata', 'dd',
|
||||
'block_uuid', 'make_partitions',
|
||||
@ -886,7 +885,7 @@ class PhysicalWorkTestCase(tests_base.TestCase):
|
||||
|
||||
dev = '/dev/fake'
|
||||
utils_name_list = ['get_dev', 'discovery', 'login_iscsi',
|
||||
'logout_iscsi', 'delete_iscsi', 'notify']
|
||||
'logout_iscsi', 'delete_iscsi']
|
||||
disk_utils_name_list = ['is_block_device', 'populate_image']
|
||||
|
||||
utils_mock = self._mock_calls(utils_name_list, utils)
|
||||
@ -1768,16 +1767,13 @@ class AgentMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
|
||||
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
|
||||
autospec=True)
|
||||
@mock.patch.object(utils, 'build_agent_options', autospec=True)
|
||||
@mock.patch.object(utils, 'prepare_cleaning_ports', autospec=True)
|
||||
def _test_prepare_inband_cleaning(
|
||||
self, prepare_cleaning_ports_mock, iscsi_build_options_mock,
|
||||
self, prepare_cleaning_ports_mock,
|
||||
build_options_mock, power_mock, prepare_ramdisk_mock,
|
||||
manage_boot=True):
|
||||
build_options_mock.return_value = {'a': 'b'}
|
||||
iscsi_build_options_mock.return_value = {'c': 'd'}
|
||||
with task_manager.acquire(
|
||||
self.context, self.node.uuid, shared=False) as task:
|
||||
self.assertEqual(
|
||||
@ -1791,7 +1787,7 @@ class AgentMethodsTestCase(db_base.DbTestCase):
|
||||
'agent_erase_devices_zeroize'))
|
||||
if manage_boot:
|
||||
prepare_ramdisk_mock.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, {'a': 'b', 'c': 'd'})
|
||||
mock.ANY, mock.ANY, {'a': 'b'})
|
||||
build_options_mock.assert_called_once_with(task.node)
|
||||
else:
|
||||
self.assertFalse(prepare_ramdisk_mock.called)
|
||||
|
@ -23,7 +23,6 @@ from ironic_lib import utils as ironic_utils
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import fileutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from ironic.common import dhcp_factory
|
||||
from ironic.common import driver_factory
|
||||
@ -37,7 +36,6 @@ from ironic.conductor import utils as manager_utils
|
||||
from ironic.drivers.modules import agent_base_vendor
|
||||
from ironic.drivers.modules import agent_client
|
||||
from ironic.drivers.modules import deploy_utils
|
||||
from ironic.drivers.modules import fake
|
||||
from ironic.drivers.modules import iscsi_deploy
|
||||
from ironic.drivers.modules import pxe
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
@ -160,111 +158,13 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
mock_unlink.assert_called_once_with('/path/uuid/disk')
|
||||
mock_rmtree.assert_called_once_with('/path/uuid')
|
||||
|
||||
def _test_build_deploy_ramdisk_options(self, mock_alnum, api_url,
|
||||
expected_root_device=None,
|
||||
expected_boot_option='netboot',
|
||||
expected_boot_mode='bios'):
|
||||
fake_key = '0123456789ABCDEFGHIJKLMNOPQRSTUV'
|
||||
fake_disk = 'fake-disk'
|
||||
|
||||
self.config(disk_devices=fake_disk, group='pxe')
|
||||
|
||||
mock_alnum.return_value = fake_key
|
||||
|
||||
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
|
||||
expected_opts = {
|
||||
'iscsi_target_iqn': expected_iqn,
|
||||
'iscsi_portal_port': 3260,
|
||||
'deployment_id': self.node.uuid,
|
||||
'deployment_key': fake_key,
|
||||
'disk': fake_disk,
|
||||
'ironic_api_url': api_url,
|
||||
'boot_option': expected_boot_option,
|
||||
'boot_mode': expected_boot_mode,
|
||||
'coreos.configdrive': 0,
|
||||
}
|
||||
|
||||
if expected_root_device:
|
||||
expected_opts['root_device'] = expected_root_device
|
||||
|
||||
opts = iscsi_deploy.build_deploy_ramdisk_options(self.node)
|
||||
|
||||
self.assertEqual(expected_opts, opts)
|
||||
mock_alnum.assert_called_once_with(32)
|
||||
# assert deploy_key was injected in the node
|
||||
self.assertIn('deploy_key', self.node.instance_info)
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
@mock.patch.object(utils, 'random_alnum', autospec=True)
|
||||
def test_build_deploy_ramdisk_options(self, mock_alnum, mock_get_url):
|
||||
fake_api_url = 'http://127.0.0.1:6385'
|
||||
self.config(api_url=fake_api_url, group='conductor')
|
||||
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url)
|
||||
|
||||
# As we are getting the Ironic api url from the config file
|
||||
# assert keystone wasn't called
|
||||
self.assertFalse(mock_get_url.called)
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
@mock.patch.object(utils, 'random_alnum', autospec=True)
|
||||
def test_build_deploy_ramdisk_options_keystone(self, mock_alnum,
|
||||
mock_get_url):
|
||||
fake_api_url = 'http://127.0.0.1:6385'
|
||||
mock_get_url.return_value = fake_api_url
|
||||
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url)
|
||||
|
||||
# As the Ironic api url is not specified in the config file
|
||||
# assert we are getting it from keystone
|
||||
mock_get_url.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
@mock.patch.object(utils, 'random_alnum', autospec=True)
|
||||
def test_build_deploy_ramdisk_options_root_device(self, mock_alnum,
|
||||
mock_get_url):
|
||||
self.node.properties['root_device'] = {'wwn': 123456}
|
||||
expected = 'wwn=123456'
|
||||
fake_api_url = 'http://127.0.0.1:6385'
|
||||
self.config(api_url=fake_api_url, group='conductor')
|
||||
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
|
||||
expected_root_device=expected)
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
@mock.patch.object(utils, 'random_alnum', autospec=True)
|
||||
def test_build_deploy_ramdisk_options_boot_option(self, mock_alnum,
|
||||
mock_get_url):
|
||||
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
|
||||
expected = 'local'
|
||||
fake_api_url = 'http://127.0.0.1:6385'
|
||||
self.config(api_url=fake_api_url, group='conductor')
|
||||
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
|
||||
expected_boot_option=expected)
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
@mock.patch.object(utils, 'random_alnum', autospec=True)
|
||||
def test_build_deploy_ramdisk_options_whole_disk_image(self, mock_alnum,
|
||||
mock_get_url):
|
||||
"""Tests a hack to boot_option for whole disk images.
|
||||
|
||||
This hack is in place to fix bug #1441556.
|
||||
"""
|
||||
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
|
||||
dii = self.node.driver_internal_info
|
||||
dii['is_whole_disk_image'] = True
|
||||
self.node.driver_internal_info = dii
|
||||
self.node.save()
|
||||
expected = 'netboot'
|
||||
fake_api_url = 'http://127.0.0.1:6385'
|
||||
self.config(api_url=fake_api_url, group='conductor')
|
||||
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
|
||||
expected_boot_option=expected)
|
||||
|
||||
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
|
||||
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def test_continue_deploy_fail(self, deploy_mock, power_mock,
|
||||
mock_image_cache, mock_disk_layout):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb'}
|
||||
deploy_mock.side_effect = iter([
|
||||
exception.InstanceDeployFailure("test deploy error")])
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
@ -286,39 +186,13 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
mock_image_cache.return_value.clean_up.assert_called_once_with()
|
||||
self.assertFalse(mock_disk_layout.called)
|
||||
|
||||
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
|
||||
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def test_continue_deploy_ramdisk_fails(self, deploy_mock, power_mock,
|
||||
mock_image_cache, mock_disk_layout):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789',
|
||||
'error': 'test ramdisk error'}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
iscsi_deploy.continue_deploy,
|
||||
task, **kwargs)
|
||||
self.assertIsNotNone(task.node.last_error)
|
||||
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
|
||||
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
|
||||
power_mock.assert_called_once_with(task, states.POWER_OFF)
|
||||
mock_image_cache.assert_called_once_with()
|
||||
mock_image_cache.return_value.clean_up.assert_called_once_with()
|
||||
self.assertFalse(deploy_mock.called)
|
||||
self.assertFalse(mock_disk_layout.called)
|
||||
|
||||
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
|
||||
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def test_continue_deploy_fail_no_root_uuid_or_disk_id(
|
||||
self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb'}
|
||||
deploy_mock.return_value = {}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
@ -345,7 +219,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def test_continue_deploy_fail_empty_root_uuid(
|
||||
self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb'}
|
||||
deploy_mock.return_value = {'root uuid': ''}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
@ -374,7 +248,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def test_continue_deploy(self, deploy_mock, power_mock, mock_image_cache,
|
||||
mock_deploy_info, mock_log, mock_disk_layout):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb'}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
@ -427,7 +301,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
def test_continue_deploy_whole_disk_image(
|
||||
self, deploy_mock, power_mock, mock_image_cache, mock_deploy_info,
|
||||
mock_log):
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
|
||||
kwargs = {'address': '123456', 'iqn': 'aaa-bbb'}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
@ -467,10 +341,9 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
extra_instance_info = {}
|
||||
|
||||
instance_info = self.node.instance_info
|
||||
instance_info['deploy_key'] = 'key'
|
||||
instance_info.update(extra_instance_info)
|
||||
self.node.instance_info = instance_info
|
||||
kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn', 'key': 'key'}
|
||||
kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn'}
|
||||
ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs)
|
||||
self.assertEqual('1.1.1.1', ret_val['address'])
|
||||
self.assertEqual('target-iqn', ret_val['iqn'])
|
||||
@ -505,13 +378,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
self.assertEqual(3266, ret_val['port'])
|
||||
|
||||
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
|
||||
autospec=True)
|
||||
def test_do_agent_iscsi_deploy_okay(self, build_options_mock,
|
||||
continue_deploy_mock):
|
||||
build_options_mock.return_value = {'deployment_key': 'abcdef',
|
||||
'iscsi_target_iqn': 'iqn-qweqwe',
|
||||
'iscsi_portal_port': 3260}
|
||||
def test_do_agent_iscsi_deploy_okay(self, continue_deploy_mock):
|
||||
agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient)
|
||||
agent_client_mock.start_iscsi_target.return_value = {
|
||||
'command_status': 'SUCCESS', 'command_error': None}
|
||||
@ -520,31 +387,25 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
self.node.save()
|
||||
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
|
||||
continue_deploy_mock.return_value = uuid_dict_returned
|
||||
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
ret_val = iscsi_deploy.do_agent_iscsi_deploy(
|
||||
task, agent_client_mock)
|
||||
build_options_mock.assert_called_once_with(task.node)
|
||||
agent_client_mock.start_iscsi_target.assert_called_once_with(
|
||||
task.node, 'iqn-qweqwe', 3260, wipe_disk_metadata=True)
|
||||
task.node, expected_iqn, 3260, wipe_disk_metadata=True)
|
||||
continue_deploy_mock.assert_called_once_with(
|
||||
task, error=None, iqn='iqn-qweqwe', key='abcdef',
|
||||
address='1.2.3.4')
|
||||
task, iqn=expected_iqn, address='1.2.3.4')
|
||||
self.assertEqual(
|
||||
'some-root-uuid',
|
||||
task.node.driver_internal_info['root_uuid_or_disk_id'])
|
||||
self.assertEqual(ret_val, uuid_dict_returned)
|
||||
|
||||
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
|
||||
autospec=True)
|
||||
def test_do_agent_iscsi_deploy_preserve_ephemeral(self, build_options_mock,
|
||||
def test_do_agent_iscsi_deploy_preserve_ephemeral(self,
|
||||
continue_deploy_mock):
|
||||
"""Ensure the disk is not wiped if preserve_ephemeral is True."""
|
||||
build_options_mock.return_value = {'deployment_key': 'abcdef',
|
||||
'iscsi_target_iqn': 'iqn-qweqwe',
|
||||
'iscsi_portal_port': 3260}
|
||||
agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient)
|
||||
agent_client_mock.start_iscsi_target.return_value = {
|
||||
'command_status': 'SUCCESS', 'command_error': None}
|
||||
@ -554,158 +415,37 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
|
||||
self.node.save()
|
||||
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
|
||||
continue_deploy_mock.return_value = uuid_dict_returned
|
||||
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.instance_info['preserve_ephemeral'] = True
|
||||
iscsi_deploy.do_agent_iscsi_deploy(
|
||||
task, agent_client_mock)
|
||||
build_options_mock.assert_called_once_with(task.node)
|
||||
agent_client_mock.start_iscsi_target.assert_called_once_with(
|
||||
task.node, 'iqn-qweqwe', 3260, wipe_disk_metadata=False)
|
||||
task.node, expected_iqn, 3260, wipe_disk_metadata=False)
|
||||
|
||||
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
|
||||
autospec=True)
|
||||
def test_do_agent_iscsi_deploy_start_iscsi_failure(self,
|
||||
build_options_mock):
|
||||
build_options_mock.return_value = {'deployment_key': 'abcdef',
|
||||
'iscsi_target_iqn': 'iqn-qweqwe',
|
||||
'iscsi_portal_port': 3260}
|
||||
def test_do_agent_iscsi_deploy_start_iscsi_failure(self):
|
||||
agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient)
|
||||
agent_client_mock.start_iscsi_target.return_value = {
|
||||
'command_status': 'FAILED', 'command_error': 'booom'}
|
||||
self.node.provision_state = states.DEPLOYING
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
iscsi_deploy.do_agent_iscsi_deploy,
|
||||
task, agent_client_mock)
|
||||
build_options_mock.assert_called_once_with(task.node)
|
||||
agent_client_mock.start_iscsi_target.assert_called_once_with(
|
||||
task.node, 'iqn-qweqwe', 3260, wipe_disk_metadata=True)
|
||||
task.node, expected_iqn, 3260, wipe_disk_metadata=True)
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
|
||||
self.assertIsNotNone(self.node.last_error)
|
||||
|
||||
def test_validate_pass_bootloader_info_input(self):
|
||||
params = {'key': 'some-random-key', 'address': '1.2.3.4',
|
||||
'error': '', 'status': 'SUCCEEDED'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.instance_info['deploy_key'] = 'some-random-key'
|
||||
# Assert that the method doesn't raise
|
||||
iscsi_deploy.validate_pass_bootloader_info_input(task, params)
|
||||
|
||||
def test_validate_pass_bootloader_info_missing_status(self):
|
||||
params = {'key': 'some-random-key', 'address': '1.2.3.4'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.MissingParameterValue,
|
||||
iscsi_deploy.validate_pass_bootloader_info_input,
|
||||
task, params)
|
||||
|
||||
def test_validate_pass_bootloader_info_missing_key(self):
|
||||
params = {'status': 'SUCCEEDED', 'address': '1.2.3.4'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.MissingParameterValue,
|
||||
iscsi_deploy.validate_pass_bootloader_info_input,
|
||||
task, params)
|
||||
|
||||
def test_validate_pass_bootloader_info_missing_address(self):
|
||||
params = {'status': 'SUCCEEDED', 'key': 'some-random-key'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.MissingParameterValue,
|
||||
iscsi_deploy.validate_pass_bootloader_info_input,
|
||||
task, params)
|
||||
|
||||
def test_validate_pass_bootloader_info_input_invalid_key(self):
|
||||
params = {'key': 'some-other-key', 'address': '1.2.3.4',
|
||||
'status': 'SUCCEEDED'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.instance_info['deploy_key'] = 'some-random-key'
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
iscsi_deploy.validate_pass_bootloader_info_input,
|
||||
task, params)
|
||||
|
||||
def test_validate_bootloader_install_status(self):
|
||||
kwargs = {'key': 'abcdef', 'status': 'SUCCEEDED', 'error': ''}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.instance_info['deploy_key'] = 'abcdef'
|
||||
# Nothing much to assert except that it shouldn't raise.
|
||||
iscsi_deploy.validate_bootloader_install_status(task, kwargs)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
|
||||
def test_validate_bootloader_install_status_install_failed(
|
||||
self, set_fail_state_mock):
|
||||
kwargs = {'key': 'abcdef', 'status': 'FAILED', 'error': 'some-error'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.provision_state = states.DEPLOYING
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.node.instance_info['deploy_key'] = 'abcdef'
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
iscsi_deploy.validate_bootloader_install_status,
|
||||
task, kwargs)
|
||||
set_fail_state_mock.assert_called_once_with(task, mock.ANY)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
|
||||
autospec=True)
|
||||
def test_finish_deploy(self, notify_mock):
|
||||
self.node.provision_state = states.DEPLOYING
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
iscsi_deploy.finish_deploy(task, '1.2.3.4')
|
||||
notify_mock.assert_called_once_with('1.2.3.4')
|
||||
self.assertEqual(states.ACTIVE, task.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
|
||||
autospec=True)
|
||||
def test_finish_deploy_notify_fails(self, notify_mock,
|
||||
set_fail_state_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
notify_mock.side_effect = RuntimeError()
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
iscsi_deploy.finish_deploy, task, '1.2.3.4')
|
||||
set_fail_state_mock.assert_called_once_with(task, mock.ANY)
|
||||
|
||||
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
|
||||
autospec=True)
|
||||
def test_finish_deploy_ssh_with_local_boot(self, notify_mock,
|
||||
node_power_mock):
|
||||
instance_info = dict(INST_INFO_DICT)
|
||||
instance_info['capabilities'] = {'boot_option': 'local'}
|
||||
n = {
|
||||
'uuid': uuidutils.generate_uuid(),
|
||||
'driver': 'fake_ssh',
|
||||
'instance_info': instance_info,
|
||||
'provision_state': states.DEPLOYING,
|
||||
'target_provision_state': states.ACTIVE,
|
||||
}
|
||||
mgr_utils.mock_the_extension_manager(driver="fake_ssh")
|
||||
node = obj_utils.create_test_node(self.context, **n)
|
||||
|
||||
with task_manager.acquire(self.context, node.uuid,
|
||||
shared=False) as task:
|
||||
iscsi_deploy.finish_deploy(task, '1.2.3.4')
|
||||
notify_mock.assert_called_once_with('1.2.3.4')
|
||||
self.assertEqual(states.ACTIVE, task.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
|
||||
node_power_mock.assert_called_once_with(task, states.REBOOT)
|
||||
|
||||
@mock.patch.object(keystone, 'get_service_url', autospec=True)
|
||||
def test_validate_good_api_url_from_config_file(self, mock_ks):
|
||||
# not present in the keystone catalog
|
||||
@ -799,12 +539,9 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
|
||||
task.driver.boot, task)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
|
||||
autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
|
||||
def test_prepare_node_deploying(self, mock_prepare_ramdisk,
|
||||
mock_iscsi_options, mock_agent_options):
|
||||
mock_iscsi_options.return_value = {'a': 'b'}
|
||||
mock_agent_options):
|
||||
mock_agent_options.return_value = {'c': 'd'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
@ -812,10 +549,9 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
|
||||
|
||||
task.driver.deploy.prepare(task)
|
||||
|
||||
mock_iscsi_options.assert_called_once_with(task.node)
|
||||
mock_agent_options.assert_called_once_with(task.node)
|
||||
mock_prepare_ramdisk.assert_called_once_with(
|
||||
task.driver.boot, task, {'a': 'b', 'c': 'd'})
|
||||
task.driver.boot, task, {'c': 'd'})
|
||||
|
||||
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
|
||||
@ -894,18 +630,6 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
|
||||
'erase_devices': 10})
|
||||
self.assertEqual(mock_steps, steps)
|
||||
|
||||
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
|
||||
autospec=True)
|
||||
def test_get_clean_steps_no_agent_url(self, mock_get_clean_steps):
|
||||
# Test getting clean steps
|
||||
self.node.driver_internal_info = {}
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
steps = task.driver.deploy.get_clean_steps(task)
|
||||
|
||||
self.assertEqual([], steps)
|
||||
self.assertFalse(mock_get_clean_steps.called)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True)
|
||||
def test_execute_clean_step(self, agent_execute_clean_step_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
@ -935,218 +659,8 @@ class TestVendorPassthru(db_base.DbTestCase):
|
||||
self.task.driver = self.driver
|
||||
self.task.context = self.context
|
||||
|
||||
def test_validate_good(self):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
task.node.instance_info['deploy_key'] = 'fake-56789'
|
||||
task.driver.vendor.validate(task, method='pass_deploy_info',
|
||||
address='123456', iqn='aaa-bbb',
|
||||
key='fake-56789')
|
||||
|
||||
def test_validate_pass_deploy_info_during_cleaning(self):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
task.node.provision_state = states.CLEANWAIT
|
||||
# Assert that it doesn't raise.
|
||||
self.assertIsNone(
|
||||
task.driver.vendor.validate(task, method='pass_deploy_info',
|
||||
address='123456', iqn='aaa-bbb',
|
||||
key='fake-56789'))
|
||||
|
||||
def test_validate_fail(self):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
task.driver.vendor.validate,
|
||||
task, method='pass_deploy_info',
|
||||
key='fake-56789')
|
||||
|
||||
def test_validate_key_notmatch(self):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
task.driver.vendor.validate,
|
||||
task, method='pass_deploy_info',
|
||||
address='123456', iqn='aaa-bbb',
|
||||
key='fake-12345')
|
||||
|
||||
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
|
||||
'notify_conductor_resume_clean',
|
||||
autospec=True)
|
||||
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
|
||||
def test__initiate_cleaning(self, log_mock, set_node_cleaning_steps_mock,
|
||||
notify_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
task.driver.vendor._initiate_cleaning(task)
|
||||
|
||||
log_mock.warning.assert_called_once_with(mock.ANY, mock.ANY)
|
||||
set_node_cleaning_steps_mock.assert_called_once_with(task)
|
||||
notify_mock.assert_called_once_with(self.driver.vendor, task)
|
||||
|
||||
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
|
||||
'notify_conductor_resume_clean',
|
||||
autospec=True)
|
||||
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
|
||||
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
|
||||
def test__initiate_cleaning_exception(
|
||||
self, log_mock, set_node_cleaning_steps_mock,
|
||||
cleaning_error_handler_mock, notify_mock):
|
||||
set_node_cleaning_steps_mock.side_effect = RuntimeError()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
task.driver.vendor._initiate_cleaning(task)
|
||||
|
||||
log_mock.warning.assert_called_once_with(mock.ANY, mock.ANY)
|
||||
set_node_cleaning_steps_mock.assert_called_once_with(task)
|
||||
cleaning_error_handler_mock.assert_called_once_with(task, mock.ANY)
|
||||
self.assertFalse(notify_mock.called)
|
||||
|
||||
@mock.patch.object(fake.FakeBoot, 'prepare_instance', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
|
||||
autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
|
||||
def _test_pass_deploy_info_deploy(self, is_localboot, mock_deploy,
|
||||
mock_image_cache,
|
||||
notify_mock,
|
||||
fakeboot_prepare_instance_mock):
|
||||
# set local boot
|
||||
i_info = self.node.instance_info
|
||||
if is_localboot:
|
||||
i_info['capabilities'] = '{"boot_option": "local"}'
|
||||
|
||||
i_info['deploy_key'] = 'fake-56789'
|
||||
self.node.instance_info = i_info
|
||||
|
||||
self.node.power_state = states.POWER_ON
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
|
||||
root_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
|
||||
mock_deploy.return_value = {'root uuid': root_uuid}
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.vendor.pass_deploy_info(
|
||||
task, address='123456', iqn='aaa-bbb', key='fake-56789')
|
||||
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
self.assertIn('root_uuid_or_disk_id', self.node.driver_internal_info)
|
||||
self.assertIsNone(self.node.last_error)
|
||||
mock_image_cache.assert_called_once_with()
|
||||
mock_image_cache.return_value.clean_up.assert_called_once_with()
|
||||
notify_mock.assert_called_once_with('123456')
|
||||
fakeboot_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
|
||||
|
||||
@mock.patch.object(fake.FakeBoot, 'prepare_instance', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
|
||||
autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
|
||||
@mock.patch.object(deploy_utils, 'deploy_disk_image', autospec=True)
|
||||
def _test_pass_deploy_info_whole_disk_image(self, is_localboot,
|
||||
mock_deploy,
|
||||
mock_image_cache,
|
||||
notify_mock,
|
||||
fakeboot_prep_inst_mock):
|
||||
i_info = self.node.instance_info
|
||||
# set local boot
|
||||
if is_localboot:
|
||||
i_info['capabilities'] = '{"boot_option": "local"}'
|
||||
|
||||
i_info['deploy_key'] = 'fake-56789'
|
||||
self.node.instance_info = i_info
|
||||
|
||||
self.node.power_state = states.POWER_ON
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
|
||||
disk_id = '0x12345678'
|
||||
mock_deploy.return_value = {'disk identifier': disk_id}
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.node.driver_internal_info['is_whole_disk_image'] = True
|
||||
task.driver.vendor.pass_deploy_info(task, address='123456',
|
||||
iqn='aaa-bbb',
|
||||
key='fake-56789')
|
||||
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
self.assertIsNone(self.node.last_error)
|
||||
mock_image_cache.assert_called_once_with()
|
||||
mock_image_cache.return_value.clean_up.assert_called_once_with()
|
||||
notify_mock.assert_called_once_with('123456')
|
||||
fakeboot_prep_inst_mock.assert_called_once_with(mock.ANY, task)
|
||||
|
||||
def test_pass_deploy_info_deploy(self):
|
||||
self._test_pass_deploy_info_deploy(False)
|
||||
self.assertEqual(states.ACTIVE, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
|
||||
def test_pass_deploy_info_localboot(self):
|
||||
self._test_pass_deploy_info_deploy(True)
|
||||
self.assertEqual(states.DEPLOYWAIT, self.node.provision_state)
|
||||
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
|
||||
|
||||
def test_pass_deploy_info_whole_disk_image(self):
|
||||
self._test_pass_deploy_info_whole_disk_image(False)
|
||||
self.assertEqual(states.ACTIVE, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
|
||||
def test_pass_deploy_info_whole_disk_image_localboot(self):
|
||||
self._test_pass_deploy_info_whole_disk_image(True)
|
||||
self.assertEqual(states.ACTIVE, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
|
||||
def test_pass_deploy_info_invalid(self):
|
||||
self.node.power_state = states.POWER_ON
|
||||
self.node.provision_state = states.AVAILABLE
|
||||
self.node.target_provision_state = states.NOSTATE
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertRaises(exception.InvalidState,
|
||||
task.driver.vendor.pass_deploy_info,
|
||||
task, address='123456', iqn='aaa-bbb',
|
||||
key='fake-56789', error='test ramdisk error')
|
||||
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.AVAILABLE, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info')
|
||||
def test_pass_deploy_info_lock_elevated(self, mock_deploy_info):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.vendor.pass_deploy_info(
|
||||
task, address='123456', iqn='aaa-bbb', key='fake-56789')
|
||||
|
||||
# lock elevated w/o exception
|
||||
self.assertEqual(1, mock_deploy_info.call_count,
|
||||
"pass_deploy_info was not called once.")
|
||||
|
||||
@mock.patch.object(iscsi_deploy.VendorPassthru,
|
||||
'_initiate_cleaning', autospec=True)
|
||||
def test_pass_deploy_info_cleaning(self, initiate_cleaning_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.node.provision_state = states.CLEANWAIT
|
||||
task.driver.vendor.pass_deploy_info(
|
||||
task, address='123456', iqn='aaa-bbb', key='fake-56789')
|
||||
initiate_cleaning_mock.assert_called_once_with(
|
||||
task.driver.vendor, task)
|
||||
# Asserting if we are still on CLEANWAIT state confirms that
|
||||
# we return from pass_deploy_info method after initiating
|
||||
# cleaning.
|
||||
self.assertEqual(states.CLEANWAIT, task.node.provision_state)
|
||||
|
||||
def test_vendor_routes(self):
|
||||
expected = ['heartbeat', 'pass_deploy_info',
|
||||
'pass_bootloader_install_info']
|
||||
expected = ['heartbeat']
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
vendor_routes = task.driver.vendor.vendor_routes
|
||||
@ -1161,21 +675,6 @@ class TestVendorPassthru(db_base.DbTestCase):
|
||||
self.assertIsInstance(driver_routes, dict)
|
||||
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
|
||||
|
||||
@mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
|
||||
autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
|
||||
def test_pass_bootloader_install_info(self, finish_deploy_mock,
|
||||
validate_input_mock):
|
||||
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
|
||||
finish_deploy_mock.assert_called_once_with(task, '123456')
|
||||
validate_input_mock.assert_called_once_with(task, kwargs)
|
||||
|
||||
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
|
||||
'reboot_and_finish_deploy', autospec=True)
|
||||
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
|
||||
|
@ -115,7 +115,7 @@ class PXEDriversTestCase(testtools.TestCase):
|
||||
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
|
||||
self.assertIsInstance(driver.management, seamicro.Management)
|
||||
self.assertIsInstance(driver.seamicro_vendor, seamicro.VendorPassthru)
|
||||
self.assertIsInstance(driver.pxe_vendor, iscsi_deploy.VendorPassthru)
|
||||
self.assertIsInstance(driver.iscsi_vendor, iscsi_deploy.VendorPassthru)
|
||||
self.assertIsInstance(driver.vendor, utils.MixinVendorInterface)
|
||||
self.assertIsInstance(driver.console, seamicro.ShellinaboxConsole)
|
||||
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
prelude: >
|
||||
Starting with this release IPA is the only deployment and inspection
|
||||
ramdisk supported by Ironic.
|
||||
upgrade:
|
||||
- Support for the old ramdisk ("deploy-ironic" diskimage-builder element)
|
||||
was removed. Please switch to IPA before upgrading.
|
Loading…
Reference in New Issue
Block a user