Bye-bye iSCSI deploy, you served us well

The iSCSI deploy was very easy to start with, but it has since become
apparently that it suffers from scalability and maintenance issues.
It was deprecated in the Victoria cycle and can now be removed.

Hide the guide to upgrade to hardware types since it's very outdated.

I had to remove the iBMC diagram since my SVG-fu is not enough to fix it.

Change-Id: I2cd6bf7b27fe0be2c08104b0cc37654b506b2e62
This commit is contained in:
Dmitry Tantsur 2021-05-03 17:03:29 +02:00
parent e79f163837
commit 929907d684
57 changed files with 116 additions and 4455 deletions

View File

@ -2,7 +2,7 @@
"default_bios_interface": "no-bios",
"default_boot_interface": "pxe",
"default_console_interface": "no-console",
"default_deploy_interface": "iscsi",
"default_deploy_interface": "direct",
"default_inspect_interface": "no-inspect",
"default_management_interface": "ipmitool",
"default_network_interface": "flat",
@ -21,7 +21,7 @@
"no-console"
],
"enabled_deploy_interfaces": [
"iscsi",
"ansible",
"direct"
],
"enabled_inspect_interfaces": [

View File

@ -106,7 +106,7 @@
"default_bios_interface": "no-bios",
"default_boot_interface": "pxe",
"default_console_interface": "no-console",
"default_deploy_interface": "iscsi",
"default_deploy_interface": "direct",
"default_inspect_interface": "no-inspect",
"default_management_interface": "ipmitool",
"default_network_interface": "flat",
@ -125,7 +125,7 @@
"no-console"
],
"enabled_deploy_interfaces": [
"iscsi",
"ansible",
"direct"
],
"enabled_inspect_interfaces": [

View File

@ -119,7 +119,7 @@
"console_enabled": false,
"console_interface": "no-console",
"created_at": "2016-08-18T22:28:48.643434+11:11",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"deploy_step": {},
"driver": "ipmi",
"driver_info": {

View File

@ -2,7 +2,6 @@
ipmitool [default]
ipxe [platform:dpkg default]
ipxe-bootimgs [platform:rpm default]
open-iscsi [platform:dpkg default]
socat [default]
xinetd [default]
tftpd-hpa [platform:dpkg default]

View File

@ -76,7 +76,7 @@ not compatible with them. There are three ways to deal with this situation:
baremetal node set test --driver ipmi \
--boot-interface pxe \
--deploy-interface iscsi \
--deploy-interface direct \
--management-interface ipmitool \
--power-interface ipmitool

View File

@ -312,15 +312,6 @@ boot_up_seq GET Query boot up sequence
get_raid_controller_list GET Query RAID controller summary info
======================== ============ ======================================
PXE Boot and iSCSI Deploy Process with Ironic Standalone Environment
====================================================================
.. figure:: ../../images/ironic_standalone_with_ibmc_driver.svg
:width: 960px
:align: left
:alt: Ironic standalone with iBMC driver node
.. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
.. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/

View File

@ -96,7 +96,7 @@ Interface Supported Implementations
``bios`` ``idrac-wsman``, ``idrac-redfish``, ``no-bios``
``boot`` ``ipxe``, ``pxe``, ``idrac-redfish-virtual-media``
``console`` ``no-console``
``deploy`` ``iscsi``, ``direct``, ``ansible``, ``ramdisk``
``deploy`` ``direct``, ``ansible``, ``ramdisk``
``inspect`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``,
``inspector``, ``no-inspect``
``management`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``

View File

@ -1097,8 +1097,9 @@ Netboot with glance and swift
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> Swift [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to disk"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> Swift [label = "Uploads the boot ISO"];
Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
@ -1222,8 +1223,9 @@ Netboot in swiftless deploy for intermediate images
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to root partition"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];
@ -1303,8 +1305,9 @@ Netboot with HTTP(S) based deploy
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> Swift [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to disk"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> Swift [label = "Uploads the boot ISO"];
Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
@ -1381,8 +1384,9 @@ Netboot in standalone ironic
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to root partition"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];

View File

@ -17,26 +17,11 @@ For more information see the
Drivers
=======
Starting with the Kilo release all deploy interfaces (except for fake ones)
are using IPA. There are two types of them:
* For nodes using the :ref:`iscsi-deploy` interface, IPA exposes the root hard
drive as an iSCSI share and calls back to the ironic conductor. The
conductor mounts the share and copies an image there. It then signals back
to IPA for post-installation actions like setting up a bootloader for local
boot support.
* For nodes using the :ref:`direct-deploy` interface, the conductor prepares
a swift temporary URL for an image. IPA then handles the whole deployment
process: downloading an image from swift, putting it on the machine and doing
any post-deploy actions.
Which one to choose depends on your environment. :ref:`iscsi-deploy` puts
higher load on conductors, :ref:`direct-deploy` currently requires the whole
image to fit in the node's memory, except when using raw images. It also
requires :doc:`/install/configure-glance-swift`.
.. todo: other differences?
Starting with the Kilo release all deploy interfaces (except for fake ones) are
using IPA. For nodes using the :ref:`direct-deploy` interface, the conductor
prepares a swift temporary URL or a local HTTP URL for the image. IPA then
handles the whole deployment process: downloading an image from swift, putting
it on the machine and doing any post-deploy actions.
Requirements
------------

View File

@ -88,7 +88,7 @@ interfaces enabled for ``irmc`` hardware type.
enabled_bios_interfaces = irmc
enabled_boot_interfaces = irmc-virtual-media,irmc-pxe
enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_inspect_interfaces = irmc,inspector,no-inspect
enabled_management_interfaces = irmc
enabled_network_interfaces = flat,neutron

View File

@ -24,8 +24,8 @@ common, and usually requires bootstrapping using PXE first.
The ``pxe`` boot interface works by preparing a PXE/iPXE environment for a
node on the file system, then instructing the DHCP provider (for example,
the Networking service) to boot the node from it. See
:ref:`iscsi-deploy-example` and :ref:`direct-deploy-example` for a better
understanding of the whole deployment process.
ref:`direct-deploy-example` for a better understanding of the whole deployment
process.
.. note::
Both PXE and iPXE are configured differently, when UEFI boot is used

View File

@ -105,7 +105,7 @@ section of ironic's configuration file:
[DEFAULT]
...
enabled_deploy_interfaces = iscsi,direct,ansible
enabled_deploy_interfaces = direct,ansible
...
Once enabled, you can specify this deploy interface when creating or updating
@ -133,26 +133,3 @@ Ramdisk deploy
The ramdisk interface is intended to provide a mechanism to "deploy" an
instance where the item to be deployed is in reality a ramdisk. It is
documented separately, see :doc:`/admin/ramdisk-boot`.
.. _iscsi-deploy:
iSCSI deploy
============
.. warning::
This deploy interface is deprecated and will be removed in the Xena release
cycle. Please use `direct deploy`_ instead.
With ``iscsi`` deploy interface, the deploy ramdisk publishes the node's hard
drive as an iSCSI_ share. The ironic-conductor then copies the image to this
share. See :ref:`iSCSI deploy diagram <iscsi-deploy-example>` for a detailed
explanation of how this deploy interface works.
This interface is used by default, if enabled (see
:ref:`enable-hardware-interfaces`). You can specify it explicitly
when creating or updating a node::
baremetal node create --driver ipmi --deploy-interface iscsi
baremetal node set <NODE> --deploy-interface iscsi
.. _iSCSI: https://en.wikipedia.org/wiki/ISCSI

View File

@ -41,13 +41,13 @@ BIOS, and RAID interfaces.
Agent steps
-----------
All deploy interfaces based on ironic-python-agent (i.e. ``direct``, ``iscsi``
and ``ansible`` and any derivatives) expose the following deploy steps:
All deploy interfaces based on ironic-python-agent (i.e. ``direct``,
``ansible`` and any derivatives) expose the following deploy steps:
``deploy.deploy`` (priority 100)
In this step the node is booted using a provisioning image.
``deploy.write_image`` (priority 80)
An out-of-band (``iscsi``, ``ansible``) or in-band (``direct``) step that
An out-of-band (``ansible``) or in-band (``direct``) step that
downloads and writes the image to the node.
``deploy.tear_down_agent`` (priority 40)
In this step the provisioning image is shut down.
@ -57,7 +57,7 @@ and ``ansible`` and any derivatives) expose the following deploy steps:
``deploy.boot_instance`` (priority 20)
In this step the node is booted into the user image.
Additionally, the ``iscsi`` and ``direct`` deploy interfaces have:
Additionally, the ``direct`` deploy interfaces has:
``deploy.prepare_instance_boot`` (priority 60)
In this step the boot device is configured and the bootloader is installed.

View File

@ -210,7 +210,7 @@ Example of node CRUD notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -444,7 +444,7 @@ node maintenance notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -534,7 +534,7 @@ level, "error" has ERROR. Example of node console notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -617,7 +617,7 @@ ironic-conductor is attempting to change the node::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -695,7 +695,7 @@ prior to the correction::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -787,7 +787,7 @@ indicate a node's provision states before state change, "event" is the FSM
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",

View File

@ -18,7 +18,7 @@ non-default interfaces, it must be enabled and set for a node to be utilized:
[DEFAULT]
...
enabled_deploy_interfaces = iscsi,direct,ramdisk
enabled_deploy_interfaces = direct,ramdisk
...
Once enabled and the conductor(s) have been restarted, the interface can

View File

@ -420,13 +420,10 @@ Overall:
timers to help ensure a deployment does not fail due to a short-lived
transitory network connectivity failure in the form of a switch port having
moved to a temporary blocking state. Where applicable and possible,
many of these patches have been backported to supported releases,
however users of the iSCSI deployment interface will see the least
capability for these sorts of situations to be handled
automatically. These patches also require that the switchport has an
eventual fallback to a non-bonded mode. If the port remains in a blocking
state, then traffic will be unable to flow and the deloyment is likely to
time out.
many of these patches have been backported to supported releases.
These patches also require that the switchport has an eventual fallback to a
non-bonded mode. If the port remains in a blocking state, then traffic will
be unable to flow and the deployment is likely to time out.
* If you must use LACP, consider ``passive`` LACP negotiation settings
in the network switch as opposed to ``active``. The difference being with
passive the connected workload is likely a server where it should likely
@ -543,16 +540,10 @@ Again, these sorts of cases will depend upon the exact configuration of the
deployment, but hopefully these are areas where these actions can occur.
* Conversion to raw image files upon download to the conductor, from the
``[DEFAULT]force_raw_images`` option, in particular with the ``iscsi``
deployment interface. Users using glance and the ``direct`` deployment
interface may also experience issues here as the conductor will cache
the image to be written which takes place when the
``[agent]image_download_source`` is set to ``http`` instead of ``swift``.
* Write of a QCOW2 file over the ``iscsi`` deployment interface from the
conductor to the node being deployed can result in large amounts of
"white space" to be written to be transmitted over the wire and written
to the end device.
``[DEFAULT]force_raw_images`` option. Users using Glance may also experience
issues here as the conductor will cache the image to be written which takes
place when the ``[agent]image_download_source`` is set to ``http`` instead of
``swift``.
.. note::
The QCOW2 image conversion utility does consume quite a bit of memory
@ -560,9 +551,8 @@ deployment, but hopefully these are areas where these actions can occur.
is because the files are not sequential in nature, and must be re-assembled
from an internal block mapping. Internally Ironic limits this to 1GB
of RAM. Operators performing large numbers of deployments may wish to
explore the ``direct`` deployment interface in these sorts of cases in
order to minimize the conductor becoming a limiting factor due to memory
and network IO.
disable raw images in these sorts of cases in order to minimize the
conductor becoming a limiting factor due to memory and network IO.
Why are my nodes stuck in a "wait" state?
=========================================

View File

@ -10,7 +10,7 @@ be asked by API consumers to perform work for which the underlying tools
require large amounts of memory.
The biggest example of this is image conversion. Images not in a raw format
need to be written out to disk (local files or remote in iscsi deploy) which
need to be written out to disk for conversion (when requested) which
requires the conversion process to generate an in-memory map to re-assemble
the image contents into a coherent stream of data. This entire process also
stresses the kernel buffers and cache.

View File

@ -420,8 +420,8 @@ Ironic
------
Create devstack/local.conf with minimal settings required to enable Ironic.
An example local.conf that enables both ``direct`` and ``iscsi``
:doc:`deploy interfaces </admin/interfaces/deploy>` and uses the ``ipmi``
An example local.conf that enables the ``direct``
:doc:`deploy interface </admin/interfaces/deploy>` and uses the ``ipmi``
hardware type by default::
cd devstack
@ -468,8 +468,6 @@ hardware type by default::
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The 'ipmi' hardware type's default deploy interface is 'iscsi'.
# This would change the default to 'direct':
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.
@ -516,9 +514,8 @@ directory you cloned DevStack::
An example local.conf that enables the ironic tempest plugin and Ironic can be
found below. The ``TEMPEST_PLUGINS`` variable needs to have the absolute path
to the ironic-tempest-plugin folder, otherwise the plugin won't be installed.
Ironic will have enabled both ``direct`` and
``iscsi`` :doc:`deploy interfaces </admin/interfaces/deploy>` and uses the
``ipmi`` hardware type by default::
Ironic will have enabled the ``direct`` :doc:`deploy interface
</admin/interfaces/deploy>` and uses the ``ipmi`` hardware type by default::
cd devstack
cat >local.conf <<END
@ -564,8 +561,6 @@ Ironic will have enabled both ``direct`` and
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The 'ipmi' hardware type's default deploy interface is 'iscsi'.
# This would change the default to 'direct':
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

View File

@ -69,7 +69,6 @@ description for DevStack is at :ref:`deploy_devstack`.
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The default deploy interface is 'iscsi', you can use 'direct' with
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

View File

@ -93,7 +93,6 @@ configured in Neutron.
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The default deploy interface is 'iscsi', you can use 'direct' with
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 130 KiB

View File

@ -40,7 +40,6 @@ Upgrade Guide
:maxdepth: 2
admin/upgrade-guide
admin/upgrade-to-hardware-types
User Guide
==========

View File

@ -1,5 +0,0 @@
Configuring iSCSI-based drivers
-------------------------------
Ensure that the ``qemu-img`` and ``iscsiadm`` tools are installed on the
**ironic-conductor** host(s).

View File

@ -93,10 +93,8 @@ provisioning will happen in a multi-tenant environment (which means using the
* TFTP
* egress port used for the Bare Metal service (6385 by default)
* ingress port used for ironic-python-agent (9999 by default)
* if using :ref:`iscsi-deploy`, the ingress port used for iSCSI
(3260 by default)
* if using :ref:`direct-deploy`, the egress port used for the Object
Storage service (typically 80 or 443)
Storage service or the local HTTP server (typically 80 or 443)
* if using iPXE, the egress port used for the HTTP server running
on the ironic-conductor nodes (typically 80).

View File

@ -78,7 +78,7 @@ console
deploy
defines how the image gets transferred to the target disk. See
:doc:`/admin/interfaces/deploy` for an explanation of the difference
between supported deploy interfaces ``direct`` and ``iscsi``.
between supported deploy interfaces.
The deploy interfaces can be enabled as follows:
@ -86,13 +86,10 @@ deploy
[DEFAULT]
enabled_hardware_types = ipmi,redfish
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct,ramdisk
Additionally,
* the ``iscsi`` deploy interface requires :doc:`configure-iscsi`
* the ``direct`` deploy interface requires the Object Storage service
.. note::
The ``direct`` deploy interface requires the Object Storage service
or an HTTP service
inspect
implements fetching hardware information from nodes. Can be implemented
@ -186,7 +183,7 @@ IPMI and Redfish, with a few additional features:
enabled_hardware_types = ipmi,redfish
enabled_boot_interfaces = pxe
enabled_console_interfaces = ipmitool-socat,no-console
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_inspect_interfaces = inspector
enabled_management_interfaces = ipmitool,redfish
enabled_network_interfaces = flat,neutron
@ -222,7 +219,7 @@ respectively:
[DEFAULT]
enabled_hardware_types = redfish
enabled_deploy_interfaces = iscsi
enabled_deploy_interfaces = ansible
enabled_power_interfaces = redfish
enabled_management_interfaces = redfish
@ -241,13 +238,13 @@ respectively:
[DEFAULT]
enabled_hardware_types = redfish
enabled_deploy_interfaces = iscsi
enabled_deploy_interfaces = ansible
enabled_power_interfaces = redfish
enabled_management_interfaces = redfish
This is because the ``redfish`` hardware type will have different enabled
*deploy* interfaces on these conductors. It would have been fine, if the second
conductor had ``enabled_deploy_interfaces = direct`` instead of ``iscsi``.
conductor had ``enabled_deploy_interfaces = direct`` instead of ``ansible``.
This situation is not detected by the Bare Metal service, but it can cause
inconsistent behavior in the API, when node functionality will depend on

View File

@ -572,7 +572,7 @@ interfaces for a hardware type (for your deployment):
+-------------------------------+----------------+
| default_boot_interface | pxe |
| default_console_interface | no-console |
| default_deploy_interface | iscsi |
| default_deploy_interface | direct |
| default_inspect_interface | no-inspect |
| default_management_interface | ipmitool |
| default_network_interface | flat |
@ -581,7 +581,7 @@ interfaces for a hardware type (for your deployment):
| default_vendor_interface | no-vendor |
| enabled_boot_interfaces | pxe |
| enabled_console_interfaces | no-console |
| enabled_deploy_interfaces | iscsi, direct |
| enabled_deploy_interfaces | direct |
| enabled_inspect_interfaces | no-inspect |
| enabled_management_interfaces | ipmitool |
| enabled_network_interfaces | flat, noop |
@ -627,10 +627,10 @@ Consider the following configuration (shortened for simplicity):
[DEFAULT]
enabled_hardware_types = ipmi,redfish
enabled_console_interfaces = no-console,ipmitool-shellinabox
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_management_interfaces = ipmitool,redfish
enabled_power_interfaces = ipmitool,redfish
default_deploy_interface = direct
default_deploy_interface = ansible
A new node is created with the ``ipmi`` driver and no interfaces specified:
@ -654,7 +654,7 @@ Then the defaults for the interfaces that will be used by the node in this
example are calculated as follows:
deploy
An explicit value of ``direct`` is provided for
An explicit value of ``ansible`` is provided for
``default_deploy_interface``, so it is used.
power
No default is configured. The ``ipmi`` hardware type supports only

View File

@ -99,18 +99,6 @@ implementation is available for the hardware, it is recommended using it
for better scalability and security. Otherwise, it is recommended to use iPXE,
when it is supported by target hardware.
Deploy interface
~~~~~~~~~~~~~~~~
There are two deploy interfaces in-tree, ``iscsi`` and ``direct``. See
:doc:`../../admin/interfaces/deploy` for explanation of the difference.
With the ``iscsi`` deploy method, most of the deployment operations happen on
the conductor. If the Object Storage service (swift) or RadosGW is present in
the environment, it is recommended to use the ``direct`` deploy method for
better scalability and reliability.
.. TODO(dtantsur): say something about the ansible deploy, when it's in
Hardware specifications
~~~~~~~~~~~~~~~~~~~~~~~
@ -328,11 +316,6 @@ the space requirements are different:
``image_download_source`` can also be provided in the node's
``driver_info`` or ``instance_info``. See :ref:`image_download_source`.
* The ``iscsi`` deploy method always requires caching of the whole instance
image locally during the deployment. The image has to be converted to the raw
format, which may increase the required amount of disk space, as well as the
CPU load.
* When network boot is used, the instance image kernel and ramdisk are cached
locally while the instance is active.

View File

@ -7,4 +7,3 @@ Set up the drivers for the Bare Metal service
enabling-drivers
configure-pxe
configure-ipmi
configure-iscsi

View File

@ -260,7 +260,7 @@ options.
.. _direct-deploy-example:
Example 1: PXE Boot and Direct Deploy Process
Example: PXE Boot and Direct Deploy Process
---------------------------------------------
This process is how :ref:`direct-deploy` works.
@ -318,63 +318,5 @@ This process is how :ref:`direct-deploy` works.
(From a `talk`_ and `slides`_)
.. _iscsi-deploy-example:
Example 2: PXE Boot and iSCSI Deploy Process
--------------------------------------------
This process is how the currently deprecated :ref:`iscsi-deploy` works.
.. seqdiag::
:scale: 75
diagram {
Nova; API; Conductor; Neutron; HTTPStore; "TFTP/HTTPd"; Node;
activation = none;
span_height = 1;
edge_length = 250;
default_note_color = white;
default_fontsize = 14;
Nova -> API [label = "Set instance_info\n(image_source,\nroot_gb, etc.)"];
Nova -> API [label = "Validate power and deploy\ninterfaces"];
Nova -> API [label = "Plug VIFs to the node"];
Nova -> API [label = "Set provision_state,\noptionally pass configdrive"];
API -> Conductor [label = "do_node_deploy()"];
Conductor -> Conductor [label = "Validate power and deploy interfaces"];
Conductor -> HTTPStore [label = "Store configdrive if configdrive_use_swift \noption is set"];
Conductor -> Node [label = "POWER OFF"];
Conductor -> Neutron [label = "Attach provisioning network to port(s)"];
Conductor -> Neutron [label = "Update DHCP boot options"];
Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ndeployment"];
Conductor -> Node [label = "Set PXE boot device \nthrough the BMC"];
Conductor -> Conductor [label = "Cache deploy\nkernel, ramdisk,\ninstance images"];
Conductor -> Node [label = "REBOOT"];
Node -> Neutron [label = "DHCP request"];
Neutron -> Node [label = "next-server = Conductor"];
Node -> Node [label = "Runs agent\nramdisk"];
Node -> API [label = "lookup()"];
API -> Node [label = "Pass UUID"];
Node -> API [label = "Heartbeat (UUID)"];
API -> Conductor [label = "Heartbeat"];
Conductor -> Node [label = "Send IPA a command to expose disks via iSCSI"];
Conductor -> Node [label = "iSCSI attach"];
Conductor -> Node [label = "Copies user image and configdrive, if present"];
Conductor -> Node [label = "iSCSI detach"];
Conductor -> Conductor [label = "Delete instance\nimage from cache"];
Conductor -> Node [label = "Install boot loader, if requested"];
Conductor -> Neutron [label = "Update DHCP boot options"];
Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ninstance image"];
Conductor -> Node [label = "Set boot device either to PXE or to disk"];
Conductor -> Node [label = "Collect ramdisk logs"];
Conductor -> Node [label = "POWER OFF"];
Conductor -> Neutron [label = "Detach provisioning network\nfrom port(s)"];
Conductor -> Neutron [label = "Bind tenant port"];
Conductor -> Node [label = "POWER ON"];
Conductor -> Conductor [label = "Mark node as\nACTIVE"];
}
(From a `talk`_ and `slides`_)
.. _talk: https://www.openstack.org/summit/vancouver-2015/summit-videos/presentation/isn-and-039t-it-ironic-the-bare-metal-cloud
.. _slides: http://www.slideshare.net/devananda1/isnt-it-ironic-managing-a-bare-metal-cloud-osl-tes-2015

View File

@ -2,9 +2,6 @@
# This file should be owned by (and only-writable by) the root user
[Filters]
# ironic/drivers/modules/deploy_utils.py
iscsiadm: CommandFilter, iscsiadm, root
# ironic/common/utils.py
mount: CommandFilter, mount, root
umount: CommandFilter, umount, root

View File

@ -64,8 +64,6 @@ dbapi = db_api.get_instance()
# object, in case it is lazy loaded. The attribute will be accessed when needed
# by doing getattr on the object
ONLINE_MIGRATIONS = (
# Added in Victoria, remove when removing iscsi deploy.
(dbapi, 'migrate_from_iscsi_deploy'),
# NOTE(rloo): Don't remove this; it should always be last
(dbapi, 'update_to_latest_versions'),
)

View File

@ -35,7 +35,6 @@ from ironic.conf import ilo
from ironic.conf import inspector
from ironic.conf import ipmi
from ironic.conf import irmc
from ironic.conf import iscsi
from ironic.conf import metrics
from ironic.conf import metrics_statsd
from ironic.conf import molds
@ -51,6 +50,7 @@ from ironic.conf import xclarity
CONF = cfg.CONF
agent.register_opts(CONF)
anaconda.register_opts(CONF)
ansible.register_opts(CONF)
api.register_opts(CONF)
audit.register_opts(CONF)
@ -69,8 +69,6 @@ ilo.register_opts(CONF)
inspector.register_opts(CONF)
ipmi.register_opts(CONF)
irmc.register_opts(CONF)
iscsi.register_opts(CONF)
anaconda.register_opts(CONF)
metrics.register_opts(CONF)
metrics_statsd.register_opts(CONF)
molds.register_opts(CONF)

View File

@ -1,44 +0,0 @@
# Copyright 2016 Intel Corporation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.PortOpt('portal_port',
default=3260,
mutable=True,
help=_('The port number on which the iSCSI portal listens '
'for incoming connections.')),
cfg.StrOpt('conv_flags',
mutable=True,
help=_('Flags that need to be sent to the dd command, '
'to control the conversion of the original file '
'when copying to the host. It can contain several '
'options separated by commas.')),
cfg.IntOpt('verify_attempts',
default=3,
min=1,
mutable=True,
help=_('Maximum attempts to verify an iSCSI connection is '
'active, sleeping 1 second between attempts. Defaults '
'to 3.')),
]
def register_opts(conf):
conf.register_opts(opts, group='iscsi')

View File

@ -34,7 +34,6 @@ _opts = [
('inspector', ironic.conf.inspector.list_opts()),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('iscsi', ironic.conf.iscsi.opts),
('anaconda', ironic.conf.anaconda.opts),
('metrics', ironic.conf.metrics.opts),
('metrics_statsd', ironic.conf.metrics_statsd.opts),

View File

@ -973,18 +973,6 @@ class Connection(object, metaclass=abc.ABCMeta):
of migrated objects.
"""
@abc.abstractmethod
def migrate_from_iscsi_deploy(self, context, max_count):
"""Tries to migrate away from the iscsi deploy interface.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
@abc.abstractmethod
def set_node_traits(self, node_id, traits, version):
"""Replace all of the node traits with specified list of traits.

View File

@ -1578,59 +1578,6 @@ class Connection(api.Connection):
return total_to_migrate, total_migrated
@oslo_db_api.retry_on_deadlock
def migrate_from_iscsi_deploy(self, context, max_count, force=False):
"""Tries to migrate away from the iscsi deploy interface.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
# TODO(dtantsur): maybe change to force=True by default in W?
if not force:
if 'direct' not in CONF.enabled_deploy_interfaces:
LOG.warning('The direct deploy interface is not enabled, will '
'not migrate nodes to it. Run with --option '
'force=true to override.')
return 0, 0
if CONF.default_deploy_interface == 'iscsi':
LOG.warning('The iscsi deploy interface is the default, will '
'not migrate nodes away from it. Run with '
'--option force=true to override.')
return 0, 0
if CONF.agent.image_download_source == 'swift':
LOG.warning('The direct deploy interface is using swift, will '
'not migrate nodes to it. Run with --option '
'force=true to override.')
return 0, 0
total_to_migrate = (model_query(models.Node)
.filter_by(deploy_interface='iscsi')
.count())
if not total_to_migrate:
return 0, 0
max_to_migrate = max_count or total_to_migrate
with _session_for_write():
query = (model_query(models.Node.id)
.filter_by(deploy_interface='iscsi')
.slice(0, max_to_migrate))
ids = [row[0] for row in query]
num_migrated = (model_query(models.Node)
.filter_by(deploy_interface='iscsi')
.filter(models.Node.id.in_(ids))
.update({'deploy_interface': 'direct'},
synchronize_session=False))
return total_to_migrate, num_migrated
@staticmethod
def _verify_max_traits_per_node(node_id, num_traits):
"""Verify that an operation would not exceed the per-node trait limit.

View File

@ -23,7 +23,6 @@ from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules.network import flat as flat_net
from ironic.drivers.modules.network import neutron
from ironic.drivers.modules.network import noop as noop_net
@ -49,9 +48,9 @@ class GenericHardware(hardware_type.AbstractHardwareType):
@property
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
return [agent.AgentDeploy, iscsi_deploy.ISCSIDeploy,
ansible_deploy.AnsibleDeploy, pxe.PXERamdiskDeploy,
pxe.PXEAnacondaDeploy, agent.CustomAgentDeploy]
return [agent.AgentDeploy, ansible_deploy.AnsibleDeploy,
pxe.PXERamdiskDeploy, pxe.PXEAnacondaDeploy,
agent.CustomAgentDeploy]
@property
def supported_inspect_interfaces(self):

View File

@ -342,34 +342,6 @@ class AgentClient(object):
{'cmd': method, 'node': node.uuid})
return None
@METRICS.timer('AgentClient.start_iscsi_target')
def start_iscsi_target(self, node, iqn,
portal_port=DEFAULT_IPA_PORTAL_PORT,
wipe_disk_metadata=False):
"""Expose the node's disk as an ISCSI target.
:param node: an Ironic node object
:param iqn: iSCSI target IQN
:param portal_port: iSCSI portal port
:param wipe_disk_metadata: True if the agent should wipe first the
disk magic strings like the partition
table, RAID or filesystem signature.
:raises: IronicException when failed to issue the request or there was
a malformed response from the agent.
:raises: AgentAPIError when agent failed to execute specified command.
:raises: AgentInProgress when the command fails to execute as the agent
is presently executing the prior command.
:returns: A dict containing command response from agent.
See :func:`get_commands_status` for a command result sample.
"""
params = {'iqn': iqn,
'portal_port': portal_port,
'wipe_disk_metadata': wipe_disk_metadata}
return self._command(node=node,
method='iscsi.start_iscsi_target',
params=params,
wait=True)
@METRICS.timer('AgentClient.install_bootloader')
def install_bootloader(self, node, root_uuid, target_boot_mode,
efi_system_part_uuid=None,

View File

@ -1,813 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import time
from urllib import parse as urlparse
from ironic_lib import disk_utils
from ironic_lib import metrics_utils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
def _save_disk_layout(node, i_info):
"""Saves the disk layout.
The disk layout used for deployment of the node, is saved.
:param node: the node of interest
:param i_info: instance information (a dictionary) for the node, containing
disk layout information
"""
driver_internal_info = node.driver_internal_info
driver_internal_info['instance'] = {}
for param in DISK_LAYOUT_PARAMS:
driver_internal_info['instance'][param] = i_info[param]
node.driver_internal_info = driver_internal_info
node.save()
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
run_as_root=True,
attempts=5,
delay_on_retry=True)
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
attempts=5,
delay_on_retry=True)
error_occurred = False
try:
# Ensure the login complete
verify_iscsi_connection(target_iqn)
# force iSCSI initiator to re-read luns
force_iscsi_lun_update(target_iqn)
# ensure file system sees the block device
check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn)
except (exception.InstanceDeployFailure,
processutils.ProcessExecutionError) as e:
with excutils.save_and_reraise_exception():
error_occurred = True
LOG.error("Failed to login to an iSCSI target due to %s", e)
finally:
if error_occurred:
try:
logout_iscsi(portal_address, portal_port, target_iqn)
delete_iscsi(portal_address, portal_port, target_iqn)
except processutils.ProcessExecutionError as e:
LOG.warning("An error occurred when trying to cleanup "
"failed ISCSI session error %s", e)
def check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn):
"""Ensure the file system sees the iSCSI block device."""
check_dir = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-1" % (portal_address,
portal_port,
target_iqn)
total_checks = CONF.iscsi.verify_attempts
for attempt in range(total_checks):
if os.path.exists(check_dir):
break
time.sleep(1)
if LOG.isEnabledFor(logging.DEBUG):
existing_devs = ', '.join(glob.iglob('/dev/disk/by-path/*iscsi*'))
LOG.debug("iSCSI connection not seen by file system. Rechecking. "
"Attempt %(attempt)d out of %(total)d. Available iSCSI "
"devices: %(devs)s.",
{"attempt": attempt + 1,
"total": total_checks,
"devs": existing_devs})
else:
msg = _("iSCSI connection was not seen by the file system after "
"attempting to verify %d times.") % total_checks
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def verify_iscsi_connection(target_iqn):
"""Verify iscsi connection."""
LOG.debug("Checking for iSCSI target to become active.")
total_checks = CONF.iscsi.verify_attempts
for attempt in range(total_checks):
out, _err = utils.execute('iscsiadm',
'-m', 'node',
'-S',
run_as_root=True)
if target_iqn in out:
break
time.sleep(1)
LOG.debug("iSCSI connection not active. Rechecking. Attempt "
"%(attempt)d out of %(total)d",
{"attempt": attempt + 1, "total": total_checks})
else:
msg = _("iSCSI connection did not become active after attempting to "
"verify %d times.") % total_checks
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def force_iscsi_lun_update(target_iqn):
"""force iSCSI initiator to re-read luns."""
LOG.debug("Re-reading iSCSI luns.")
utils.execute('iscsiadm',
'-m', 'node',
'-T', target_iqn,
'-R',
run_as_root=True)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
attempts=5,
delay_on_retry=True)
def delete_iscsi(portal_address, portal_port, target_iqn):
"""Delete the iSCSI target."""
# Retry delete until it succeeds (exit code 0) or until there is
# no longer a target to delete (exit code 21).
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'-o', 'delete',
run_as_root=True,
check_exit_code=[0, 21],
attempts=5,
delay_on_retry=True)
@contextlib.contextmanager
def _iscsi_setup_and_handle_errors(address, port, iqn, lun):
"""Function that yields an iSCSI target device to work on.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
"""
dev = ("/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s"
% (address, port, iqn, lun))
discovery(address, port)
login_iscsi(address, port, iqn)
if not disk_utils.is_block_device(dev):
raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
% dev)
try:
yield dev
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error("Deploy to address %s failed.", address)
LOG.error("Command: %s", err.cmd)
LOG.error("StdOut: %r", err.stdout)
LOG.error("StdErr: %r", err.stderr)
except exception.InstanceDeployFailure as e:
with excutils.save_and_reraise_exception():
LOG.error("Deploy to address %s failed.", address)
LOG.error(e)
finally:
logout_iscsi(address, port, iqn)
delete_iscsi(address, port, iqn)
def deploy_partition_image(
address, port, iqn, lun, image_path,
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
preserve_ephemeral=False, configdrive=None,
boot_option=None, boot_mode="bios", disk_label=None,
cpu_arch=""):
"""All-in-one function to deploy a partition image to a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever
content it had (if the partition table has
not changed).
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param boot_option: Can be "local" or "netboot".
"netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:param disk_label: The disk label to be used when creating the
partition table. Valid values are: "msdos",
"gpt" or None; If None ironic will figure it
out according to the boot_mode parameter.
:param cpu_arch: Architecture of the node being deployed to.
:raises: InstanceDeployFailure if image virtual size is bigger than root
partition size.
:returns: a dictionary containing the following keys:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
"""
# NOTE(dtantsur): CONF.default_boot_option is mutable, don't use it in
# the function signature!
boot_option = boot_option or deploy_utils.get_default_boot_option()
image_mb = disk_utils.get_image_mb(image_path)
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. Image '
'virtual size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
with _iscsi_setup_and_handle_errors(address, port, iqn, lun) as dev:
uuid_dict_returned = disk_utils.work_on_disk(
dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, image_path,
node_uuid, preserve_ephemeral=preserve_ephemeral,
configdrive=configdrive, boot_option=boot_option,
boot_mode=boot_mode, disk_label=disk_label, cpu_arch=cpu_arch)
return uuid_dict_returned
def deploy_disk_image(address, port, iqn, lun,
image_path, node_uuid, configdrive=None,
conv_flags=None):
"""All-in-one function to deploy a whole disk image to a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param node_uuid: node's uuid.
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param conv_flags: Optional. Add a flag that will modify the behaviour of
the image copy to disk.
:returns: a dictionary containing the key 'disk identifier' to identify
the disk which was used for deployment.
"""
with _iscsi_setup_and_handle_errors(address, port, iqn,
lun) as dev:
disk_utils.populate_image(image_path, dev, conv_flags=conv_flags)
if configdrive:
disk_utils.create_config_drive_partition(node_uuid, dev,
configdrive)
disk_identifier = disk_utils.get_disk_identifier(dev)
return {'disk identifier': disk_identifier}
@METRICS.timer('check_image_size')
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
Does nothing for whole-disk images.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
if task.node.driver_internal_info['is_whole_disk_image']:
# The root partition is already created and populated, no use
# validating its size
return
i_info = deploy_utils.parse_instance_info(task.node)
image_path = deploy_utils._get_image_file_path(task.node.uuid)
image_mb = disk_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. Image '
'virtual size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
@METRICS.timer('get_deploy_info')
def get_deploy_info(node, address, iqn, port=None, lun='1', conv_flags=None):
"""Returns the information required for doing iSCSI deploy in a dictionary.
:param node: ironic node object
:param address: iSCSI address
:param iqn: iSCSI iqn for the target disk
:param port: iSCSI port, defaults to one specified in the configuration
:param lun: iSCSI lun, defaults to '1'
:param conv_flags: flag that will modify the behaviour of the image copy
to disk.
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
i_info = deploy_utils.parse_instance_info(node)
params = {
'address': address,
'port': port or CONF.iscsi.portal_port,
'iqn': iqn,
'lun': lun,
'image_path': deploy_utils._get_image_file_path(node.uuid),
'node_uuid': node.uuid}
is_whole_disk_image = node.driver_internal_info['is_whole_disk_image']
if not is_whole_disk_image:
params.update({'root_mb': i_info['root_mb'],
'swap_mb': i_info['swap_mb'],
'ephemeral_mb': i_info['ephemeral_mb'],
'preserve_ephemeral': i_info['preserve_ephemeral'],
'boot_option': deploy_utils.get_boot_option(node),
'boot_mode': boot_mode_utils.get_boot_mode(node)})
cpu_arch = node.properties.get('cpu_arch')
if cpu_arch is not None:
params['cpu_arch'] = cpu_arch
# Append disk label if specified
disk_label = deploy_utils.get_disk_label(node)
if disk_label is not None:
params['disk_label'] = disk_label
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(
_("Parameters %s were not passed to ironic"
" for deploy.") % missing)
# configdrive is nullable
params['configdrive'] = i_info.get('configdrive')
if is_whole_disk_image:
return params
if conv_flags:
params['conv_flags'] = conv_flags
# ephemeral_format is nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
return params
@METRICS.timer('continue_deploy')
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:raises: InvalidState if the event is not allowed by the associated
state machine.
:returns: a dictionary containing the following keys:
For partition image:
* 'root uuid': UUID of root partition
* 'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
.. note:: If key exists but value is None, it means partition
doesn't exist.
For whole disk image:
* 'disk identifier': ID of the disk to which image was deployed.
"""
node = task.node
params = get_deploy_info(node, **kwargs)
def _fail_deploy(task, msg, raise_exception=True):
"""Fail the deploy after logging and setting error states."""
if isinstance(msg, Exception):
msg = (_('Deploy failed for instance %(instance)s. '
'Error: %(error)s') %
{'instance': node.instance_uuid, 'error': msg})
deploy_utils.set_failed_state(task, msg)
deploy_utils.destroy_images(task.node.uuid)
if raise_exception:
raise exception.InstanceDeployFailure(msg)
# NOTE(lucasagomes): Let's make sure we don't log the full content
# of the config drive here because it can be up to 64MB in size,
# so instead let's log "***" in case config drive is enabled.
if LOG.isEnabledFor(logging.logging.DEBUG):
log_params = {
k: params[k] if k != 'configdrive' else '***'
for k in params
}
LOG.debug('Continuing deployment for node %(node)s, params %(params)s',
{'node': node.uuid, 'params': log_params})
uuid_dict_returned = {}
try:
if node.driver_internal_info['is_whole_disk_image']:
uuid_dict_returned = deploy_disk_image(**params)
else:
uuid_dict_returned = deploy_partition_image(**params)
except exception.IronicException as e:
with excutils.save_and_reraise_exception():
LOG.error('Deploy of instance %(instance)s on node %(node)s '
'failed: %(error)s', {'instance': node.instance_uuid,
'node': node.uuid, 'error': e})
_fail_deploy(task, e, raise_exception=False)
except Exception as e:
LOG.exception('Deploy of instance %(instance)s on node %(node)s '
'failed with exception',
{'instance': node.instance_uuid, 'node': node.uuid})
_fail_deploy(task, e)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
if not root_uuid_or_disk_id:
msg = (_("Couldn't determine the UUID of the root "
"partition or the disk identifier after deploying "
"node %s") % node.uuid)
LOG.error(msg)
_fail_deploy(task, msg)
if params.get('preserve_ephemeral', False):
# Save disk layout information, to check that they are unchanged
# for any future rebuilds
_save_disk_layout(node, deploy_utils.parse_instance_info(node))
deploy_utils.destroy_images(node.uuid)
return uuid_dict_returned
@METRICS.timer('do_agent_iscsi_deploy')
def do_agent_iscsi_deploy(task, agent_client):
"""Method invoked when deployed with the agent ramdisk.
This method is invoked by drivers for doing iSCSI deploy
using agent ramdisk. This method assumes that the agent
is booted up on the node and is heartbeating.
:param task: a TaskManager object containing the node.
:param agent_client: an instance of agent_client.AgentClient
which will be used during iscsi deploy
(for exposing node's target disk via iSCSI,
for install boot loader, etc).
:returns: a dictionary containing the following keys:
For partition image:
* 'root uuid': UUID of root partition
* 'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
.. note:: If key exists but value is None, it means partition
doesn't exist.
For whole disk image:
* 'disk identifier': ID of the disk to which image was deployed.
:raises: InstanceDeployFailure if it encounters some error
during the deploy.
"""
node = task.node
i_info = deploy_utils.parse_instance_info(node)
wipe_disk_metadata = not i_info['preserve_ephemeral']
iqn = 'iqn.2008-10.org.openstack:%s' % node.uuid
portal_port = CONF.iscsi.portal_port
conv_flags = CONF.iscsi.conv_flags
result = agent_client.start_iscsi_target(
node, iqn,
portal_port,
wipe_disk_metadata=wipe_disk_metadata)
if result['command_status'] == 'FAILED':
msg = (_("Failed to start the iSCSI target to deploy the "
"node %(node)s. Error: %(error)s") %
{'node': node.uuid, 'error': result['command_error']})
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(reason=msg)
address = urlparse.urlparse(node.driver_internal_info['agent_url'])
address = address.hostname
uuid_dict_returned = continue_deploy(task, iqn=iqn, address=address,
conv_flags=conv_flags)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
# TODO(lucasagomes): Move this bit saving the root_uuid to
# continue_deploy()
driver_internal_info = node.driver_internal_info
driver_internal_info['root_uuid_or_disk_id'] = root_uuid_or_disk_id
node.driver_internal_info = driver_internal_info
node.save()
return uuid_dict_returned
@METRICS.timer('validate')
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if the URL of the Ironic API service is not
configured in config file and is not accessible via Keystone
catalog.
:raises: MissingParameterValue if no ports are enrolled for the given node.
"""
# TODO(lucasagomes): Validate the format of the URL
deploy_utils.get_ironic_api_url()
# Validate the root device hints
deploy_utils.get_root_device_for_deploy(task.node)
deploy_utils.parse_instance_info(task.node)
class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
base.DeployInterface):
"""iSCSI Deploy Interface for deploy-related actions."""
has_decomposed_deploy_steps = True
supported = False
def get_properties(self):
return agent_base.VENDOR_PROPERTIES
@METRICS.timer('ISCSIDeploy.validate')
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue.
:raises: MissingParameterValue
"""
task.driver.boot.validate(task)
node = task.node
# Check the boot_mode, boot_option and disk_label capabilities values.
deploy_utils.validate_capabilities(node)
# Edit early if we are not writing a volume as the validate
# tasks evaluate root device hints.
if not task.driver.storage.should_write_image(task):
LOG.debug('Skipping complete deployment interface validation '
'for node %s as it is set to boot from a remote '
'volume.', node.uuid)
return
# TODO(rameshg87): iscsi_ilo driver used to call this function. Remove
# and copy-paste it's contents here.
validate(task)
@METRICS.timer('ISCSIDeploy.deploy')
@base.deploy_step(priority=100)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Start deployment of the task's node.
Fetches instance image, updates the DHCP port options for next boot,
and issues a reboot request to the power driver.
This causes the node to boot into the deployment ramdisk and triggers
the next phase of PXE-based deployment via agent heartbeats.
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DEPLOYWAIT.
"""
node = task.node
if manager_utils.is_fast_track(task):
# NOTE(mgoddard): For fast track we can mostly skip this step and
# proceed to the next step (i.e. write_image).
LOG.debug('Performing a fast track deployment for %(node)s.',
{'node': task.node.uuid})
deploy_utils.cache_instance_image(task.context, node)
check_image_size(task)
# NOTE(dtantsur): while the node is up and heartbeating, we don't
# necessary have the deploy steps cached. Force a refresh here.
self.refresh_steps(task, 'deploy')
elif task.driver.storage.should_write_image(task):
# Standard deploy process
deploy_utils.cache_instance_image(task.context, node)
check_image_size(task)
# Check if the driver has already performed a reboot in a previous
# deploy step.
if not task.node.driver_internal_info.get('deployment_reboot',
False):
manager_utils.node_power_action(task, states.REBOOT)
info = task.node.driver_internal_info
info.pop('deployment_reboot', None)
info.pop('deployment_uuids', None)
task.node.driver_internal_info = info
task.node.save()
return states.DEPLOYWAIT
@METRICS.timer('ISCSIDeploy.write_image')
@base.deploy_step(priority=80)
@task_manager.require_exclusive_lock
def write_image(self, task):
"""Method invoked when deployed using iSCSI.
This method is invoked during a heartbeat from an agent when
the node is in wait-call-back state. This deploys the image on
the node and then configures the node to boot according to the
desired boot option (netboot or localboot).
:param task: a TaskManager object containing the node.
:param kwargs: the kwargs passed from the heartbeat method.
:raises: InstanceDeployFailure, if it encounters some error during
the deploy.
"""
if not task.driver.storage.should_write_image(task):
LOG.debug('Skipping write_image for node %s', task.node.uuid)
return
node = task.node
LOG.debug('Continuing the deployment on node %s', node.uuid)
if utils.is_memory_insufficent():
# Insufficent memory, but we can just let the agent heartbeat
# again in order to initiate deployment when the situation has
# changed.
LOG.warning('Insufficent memory to write image for node '
'%(node)s. Skipping until next heartbeat.',
{'node': node.uuid})
info = node.driver_internal_info
info['skip_current_deploy_step'] = False
node.driver_internal_info = info
node.last_error = "Deploy delayed due to insufficent memory"
node.save()
return states.DEPLOYWAIT
uuid_dict_returned = do_agent_iscsi_deploy(task, self._client)
utils.set_node_nested_field(node, 'driver_internal_info',
'deployment_uuids', uuid_dict_returned)
node.save()
@METRICS.timer('ISCSIDeploy.prepare_instance_boot')
@base.deploy_step(priority=60)
def prepare_instance_boot(self, task):
if not task.driver.storage.should_write_image(task):
task.driver.boot.prepare_instance(task)
return
node = task.node
try:
uuid_dict_returned = node.driver_internal_info['deployment_uuids']
except KeyError:
raise exception.InstanceDeployFailure(
_('Invalid internal state: the write_image deploy step has '
'not been called before prepare_instance_boot'))
root_uuid = uuid_dict_returned.get('root uuid')
efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid')
prep_boot_part_uuid = uuid_dict_returned.get(
'PrEP Boot partition uuid')
self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid,
prep_boot_part_uuid=prep_boot_part_uuid)
@METRICS.timer('ISCSIDeploy.prepare')
@task_manager.require_exclusive_lock
def prepare(self, task):
"""Prepare the deployment environment for this task's node.
Generates the TFTP configuration for PXE-booting both the deployment
and user images, fetches the TFTP image from Glance and add it to the
local cache.
:param task: a TaskManager instance containing the node to act on.
:raises: NetworkError: if the previous cleaning ports cannot be removed
or if new cleaning ports cannot be created.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
:raises: StorageError If the storage driver is unable to attach the
configured volumes.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
:raises: any boot interface's prepare_ramdisk exceptions.
"""
node = task.node
deploy_utils.populate_storage_driver_internal_info(task)
if node.provision_state in [states.ACTIVE, states.ADOPTING]:
task.driver.boot.prepare_instance(task)
else:
if node.provision_state == states.DEPLOYING:
fast_track_deploy = manager_utils.is_fast_track(task)
if fast_track_deploy:
# The agent has already recently checked in and we are
# configured to take that as an indicator that we can
# skip ahead.
LOG.debug('The agent for node %(node)s has recently '
'checked in, and the node power will remain '
'unmodified.',
{'node': task.node.uuid})
else:
# Adding the node to provisioning network so that the dhcp
# options get added for the provisioning port.
manager_utils.node_power_action(task, states.POWER_OFF)
# NOTE(vdrok): in case of rebuild, we have tenant network
# already configured, unbind tenant ports if present
if task.driver.storage.should_write_image(task):
if not fast_track_deploy:
power_state_to_restore = (
manager_utils.power_on_node_if_needed(task))
task.driver.network.unconfigure_tenant_networks(task)
task.driver.network.add_provisioning_network(task)
if not fast_track_deploy:
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
task.driver.storage.attach_volumes(task)
if (not task.driver.storage.should_write_image(task)
or fast_track_deploy):
# We have nothing else to do as this is handled in the
# backend storage system, and we can return to the caller
# as we do not need to boot the agent to deploy.
# Alternatively, we are in a fast track deployment
# and have nothing else to do.
return
deploy_opts = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)
@METRICS.timer('ISCSIDeploy.clean_up')
def clean_up(self, task):
"""Clean up the deployment environment for the task's node.
Unlinks TFTP and instance images and triggers image cache cleanup.
Removes the TFTP configuration files for this node.
:param task: a TaskManager instance containing the node to act on.
"""
deploy_utils.destroy_images(task.node.uuid)
super(ISCSIDeploy, self).clean_up(task)
if utils.pop_node_nested_field(task.node, 'driver_internal_info',
'deployment_uuids'):
task.node.save()

View File

@ -163,7 +163,7 @@ class Conductor(base.IronicObject, object_base.VersionedObjectDictCompat):
be a dictionary conaining "hardware_type", "interface_type",
"interface_name" and "default", e.g.
{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True}
'interface_name': 'direct', 'default': True}
"""
self.dbapi.register_conductor_hardware_interfaces(self.id, interfaces)

View File

@ -159,7 +159,7 @@ class TestCase(oslo_test_base.BaseTestCase):
values = ['fake']
if iface == 'deploy':
values.extend(['iscsi', 'direct', 'anaconda'])
values.extend(['direct', 'anaconda'])
elif iface == 'boot':
values.append('pxe')
elif iface == 'storage':

View File

@ -46,14 +46,14 @@ class TestListDrivers(base.BaseApiTest):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': self.hw1, 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': False},
'interface_name': 'ansible', 'default': False},
{'hardware_type': self.hw1, 'interface_type': 'deploy',
'interface_name': 'direct', 'default': True}]
)
self.dbapi.register_conductor_hardware_interfaces(
c1.id,
[{'hardware_type': self.hw2, 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': False},
'interface_name': 'ansible', 'default': False},
{'hardware_type': self.hw2, 'interface_type': 'deploy',
'interface_name': 'direct', 'default': True}]
)
@ -124,7 +124,7 @@ class TestListDrivers(base.BaseApiTest):
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'iscsi',
'interface_name': 'ansible',
'default': False,
},
{
@ -238,7 +238,7 @@ class TestListDrivers(base.BaseApiTest):
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'iscsi',
'interface_name': 'ansible',
'default': False,
},
{

View File

@ -61,7 +61,7 @@ class HashRingManagerTestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -107,7 +107,7 @@ class HashRingManagerTestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c1.id,
[{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -118,7 +118,7 @@ class HashRingManagerTestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c2.id,
[{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'hardware-type', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)

View File

@ -411,14 +411,14 @@ class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['agent', 'iscsi']),
('deploy', ['direct', 'ansible']),
)),
collections.OrderedDict((
('management', ['fake']),
('deploy', ['agent', 'fake']),
('deploy', ['direct', 'fake']),
)),
]
default_mock.side_effect = ('fake', 'agent', 'fake', 'agent')
default_mock.side_effect = ('fake', 'direct', 'fake', 'direct')
expected_calls = [
mock.call(
mock.ANY,
@ -432,11 +432,11 @@ class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
'default': False},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'agent',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'iscsi',
'interface_name': 'ansible',
'default': False},
{'hardware_type': 'manual-management',
'interface_type': 'management',
@ -444,7 +444,7 @@ class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
'interface_name': 'agent',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
@ -471,7 +471,7 @@ class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['agent', 'iscsi']),
('deploy', ['direct', 'ansible']),
)),
]
default_mock.side_effect = exception.NoValidDefaultForInterface("boo")

View File

@ -713,7 +713,7 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
IFACE_UPDATE_DICT = {
'boot_interface': UpdateInterfaces('pxe', 'fake'),
'console_interface': UpdateInterfaces('no-console', 'fake'),
'deploy_interface': UpdateInterfaces('iscsi', 'fake'),
'deploy_interface': UpdateInterfaces('direct', 'fake'),
'inspect_interface': UpdateInterfaces('no-inspect', 'fake'),
'management_interface': UpdateInterfaces(None, 'fake'),
'network_interface': UpdateInterfaces('noop', 'flat'),
@ -984,7 +984,7 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
deploy_interface='fake',
extra={'test': 'one'})
node.deploy_interface = 'iscsi'
node.deploy_interface = 'direct'
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_node,
self.context, node)

View File

@ -84,7 +84,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -101,7 +101,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'other-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'other-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -124,7 +124,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -143,7 +143,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -160,7 +160,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -183,7 +183,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)
@ -198,7 +198,7 @@ class RPCAPITestCase(db_base.DbTestCase):
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True},
'interface_name': 'ansible', 'default': True},
{'hardware_type': 'fake-driver', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False}]
)

View File

@ -237,73 +237,3 @@ class UpdateToLatestVersionsTestCase(base.DbTestCase):
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(self.node_ver, node.version)
class MigrateFromIscsiTestCase(base.DbTestCase):
def setUp(self):
super(MigrateFromIscsiTestCase, self).setUp()
self.context = context.get_admin_context()
self.dbapi = db_api.get_instance()
self.config(enabled_deploy_interfaces='direct')
def test_empty_db(self):
self.assertEqual(
(0, 0), self.dbapi.migrate_from_iscsi_deploy(self.context, 10))
def test_already_direct_exists(self):
utils.create_test_node(deploy_interface='direct')
self.assertEqual(
(0, 0), self.dbapi.update_to_latest_versions(self.context, 10))
def test_migrate_by_2(self):
utils.create_test_node(deploy_interface='direct')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(3, 2), self.dbapi.migrate_from_iscsi_deploy(self.context, 2))
self.assertEqual(
(1, 1), self.dbapi.migrate_from_iscsi_deploy(self.context, 2))
def test_migrate_all(self):
utils.create_test_node(deploy_interface='direct')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(3, 3), self.dbapi.migrate_from_iscsi_deploy(self.context, 0))
def test_migration_impossible(self):
self.config(enabled_deploy_interfaces='iscsi')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(0, 0), self.dbapi.migrate_from_iscsi_deploy(self.context, 0))
def test_migration_impossible2(self):
self.config(image_download_source='swift', group='agent')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(0, 0), self.dbapi.migrate_from_iscsi_deploy(self.context, 0))
def test_migration_impossible3(self):
self.config(default_deploy_interface='iscsi')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(0, 0), self.dbapi.migrate_from_iscsi_deploy(self.context, 0))
def test_force_migration(self):
self.config(enabled_deploy_interfaces='iscsi')
utils.create_test_node(deploy_interface='direct')
for _i in range(3):
uuid = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuid, deploy_interface='iscsi')
self.assertEqual(
(3, 3), self.dbapi.migrate_from_iscsi_deploy(self.context, 0,
force=True))

View File

@ -59,7 +59,7 @@ class DbConductorTestCase(base.DbTestCase):
def test_register_conductor_hardware_interfaces(self):
c = self._create_test_cdr()
interfaces = ['direct', 'iscsi']
interfaces = ['direct', 'ansible']
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'generic', 'interface_type': 'deploy',
@ -74,10 +74,10 @@ class DbConductorTestCase(base.DbTestCase):
self.assertEqual('generic', ci2.hardware_type)
self.assertEqual('deploy', ci1.interface_type)
self.assertEqual('deploy', ci2.interface_type)
self.assertEqual('direct', ci1.interface_name)
self.assertEqual('iscsi', ci2.interface_name)
self.assertFalse(ci1.default)
self.assertTrue(ci2.default)
self.assertEqual('ansible', ci1.interface_name)
self.assertEqual('direct', ci2.interface_name)
self.assertTrue(ci1.default)
self.assertFalse(ci2.default)
def test_register_conductor_hardware_interfaces_duplicate(self):
c = self._create_test_cdr()
@ -85,7 +85,7 @@ class DbConductorTestCase(base.DbTestCase):
{'hardware_type': 'generic', 'interface_type': 'deploy',
'interface_name': 'direct', 'default': False},
{'hardware_type': 'generic', 'interface_type': 'deploy',
'interface_name': 'iscsi', 'default': True}
'interface_name': 'ansible', 'default': True}
]
self.dbapi.register_conductor_hardware_interfaces(c.id, interfaces)
ifaces = self.dbapi.list_conductor_hardware_interfaces(c.id)
@ -100,7 +100,7 @@ class DbConductorTestCase(base.DbTestCase):
def test_unregister_conductor_hardware_interfaces(self):
c = self._create_test_cdr()
interfaces = ['direct', 'iscsi']
interfaces = ['direct', 'ansible']
self.dbapi.register_conductor_hardware_interfaces(
c.id,
[{'hardware_type': 'generic', 'interface_type': 'deploy',

View File

@ -1387,7 +1387,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ilo5'],
enabled_boot_interfaces=['ilo-uefi-https'],
enabled_console_interfaces=['ilo'],
enabled_deploy_interfaces=['iscsi'],
enabled_deploy_interfaces=['direct'],
enabled_inspect_interfaces=['ilo'],
enabled_management_interfaces=['ilo5'],
enabled_power_interfaces=['ilo'],

View File

@ -1535,7 +1535,7 @@ class Ilo5ManagementTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ilo5'],
enabled_boot_interfaces=['ilo-virtual-media'],
enabled_console_interfaces=['ilo'],
enabled_deploy_interfaces=['iscsi'],
enabled_deploy_interfaces=['direct'],
enabled_inspect_interfaces=['ilo'],
enabled_management_interfaces=['ilo5'],
enabled_power_interfaces=['ilo'],

View File

@ -53,7 +53,7 @@ class Ilo5RAIDTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ilo5'],
enabled_boot_interfaces=['ilo-virtual-media'],
enabled_console_interfaces=['ilo'],
enabled_deploy_interfaces=['iscsi'],
enabled_deploy_interfaces=['direct'],
enabled_inspect_interfaces=['ilo'],
enabled_management_interfaces=['ilo5'],
enabled_power_interfaces=['ilo'],

View File

@ -464,46 +464,6 @@ class TestAgentClient(base.TestCase):
timeout=CONF.agent.command_timeout,
verify='/path/to/agent.crt')
def test_start_iscsi_target(self):
self.client._command = mock.MagicMock(spec_set=[])
iqn = 'fake-iqn'
port = agent_client.DEFAULT_IPA_PORTAL_PORT
wipe_disk_metadata = False
params = {'iqn': iqn, 'portal_port': port,
'wipe_disk_metadata': wipe_disk_metadata}
self.client.start_iscsi_target(self.node, iqn)
self.client._command.assert_called_once_with(
node=self.node, method='iscsi.start_iscsi_target',
params=params, wait=True)
def test_start_iscsi_target_custom_port(self):
self.client._command = mock.MagicMock(spec_set=[])
iqn = 'fake-iqn'
port = 3261
wipe_disk_metadata = False
params = {'iqn': iqn, 'portal_port': port,
'wipe_disk_metadata': wipe_disk_metadata}
self.client.start_iscsi_target(self.node, iqn, portal_port=port)
self.client._command.assert_called_once_with(
node=self.node, method='iscsi.start_iscsi_target',
params=params, wait=True)
def test_start_iscsi_target_wipe_disk_metadata(self):
self.client._command = mock.MagicMock(spec_set=[])
iqn = 'fake-iqn'
port = agent_client.DEFAULT_IPA_PORTAL_PORT
wipe_disk_metadata = True
params = {'iqn': iqn, 'portal_port': port,
'wipe_disk_metadata': wipe_disk_metadata}
self.client.start_iscsi_target(self.node, iqn,
wipe_disk_metadata=wipe_disk_metadata)
self.client._command.assert_called_once_with(
node=self.node, method='iscsi.start_iscsi_target',
params=params, wait=True)
def _test_install_bootloader(self, root_uuid, efi_system_part_uuid=None,
prep_boot_part_uuid=None):
self.client._command = mock.MagicMock(spec_set=[])

File diff suppressed because it is too large Load Diff

View File

@ -79,8 +79,7 @@ class PXEBootTestCase(db_base.DbTestCase):
self.config(enabled_boot_interfaces=[self.boot_interface,
'ipxe', 'fake'])
self.config(enabled_deploy_interfaces=['fake', 'direct', 'iscsi',
'anaconda'])
self.config(enabled_deploy_interfaces=['fake', 'direct', 'anaconda'])
self.node = obj_utils.create_test_node(
self.context,
driver=self.driver,

View File

@ -166,9 +166,9 @@ class TestConductorObject(db_base.DbTestCase):
def test_register_hardware_interfaces(self):
host = self.fake_conductor['hostname']
self.config(default_deploy_interface='iscsi')
self.config(default_deploy_interface='ansible')
arg = [{"hardware_type": "hardware-type", "interface_type": "deploy",
"interface_name": "iscsi", "default": True},
"interface_name": "ansible", "default": True},
{"hardware_type": "hardware-type", "interface_type": "deploy",
"interface_name": "direct", "default": False}]
with mock.patch.object(self.dbapi, 'get_conductor',

View File

@ -0,0 +1,5 @@
---
upgrade:
- |
The deprecated ``iscsi`` deploy interface has been removed. Please update
to a different deploy interface before upgrading.

View File

@ -90,7 +90,6 @@ ironic.hardware.interfaces.deploy =
custom-agent = ironic.drivers.modules.agent:CustomAgentDeploy
direct = ironic.drivers.modules.agent:AgentDeploy
fake = ironic.drivers.modules.fake:FakeDeploy
iscsi = ironic.drivers.modules.iscsi_deploy:ISCSIDeploy
ramdisk = ironic.drivers.modules.pxe:PXERamdiskDeploy
ironic.hardware.interfaces.inspect =