Bye-bye iSCSI deploy, you served us well

The iSCSI deploy was very easy to start with, but it has since become
apparently that it suffers from scalability and maintenance issues.
It was deprecated in the Victoria cycle and can now be removed.

Hide the guide to upgrade to hardware types since it's very outdated.

I had to remove the iBMC diagram since my SVG-fu is not enough to fix it.

Change-Id: I2cd6bf7b27fe0be2c08104b0cc37654b506b2e62
changes/82/789382/5
Dmitry Tantsur 2 years ago
parent e79f163837
commit 929907d684

@ -2,7 +2,7 @@
"default_bios_interface": "no-bios",
"default_boot_interface": "pxe",
"default_console_interface": "no-console",
"default_deploy_interface": "iscsi",
"default_deploy_interface": "direct",
"default_inspect_interface": "no-inspect",
"default_management_interface": "ipmitool",
"default_network_interface": "flat",
@ -21,7 +21,7 @@
"no-console"
],
"enabled_deploy_interfaces": [
"iscsi",
"ansible",
"direct"
],
"enabled_inspect_interfaces": [

@ -106,7 +106,7 @@
"default_bios_interface": "no-bios",
"default_boot_interface": "pxe",
"default_console_interface": "no-console",
"default_deploy_interface": "iscsi",
"default_deploy_interface": "direct",
"default_inspect_interface": "no-inspect",
"default_management_interface": "ipmitool",
"default_network_interface": "flat",
@ -125,7 +125,7 @@
"no-console"
],
"enabled_deploy_interfaces": [
"iscsi",
"ansible",
"direct"
],
"enabled_inspect_interfaces": [

@ -119,7 +119,7 @@
"console_enabled": false,
"console_interface": "no-console",
"created_at": "2016-08-18T22:28:48.643434+11:11",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"deploy_step": {},
"driver": "ipmi",
"driver_info": {

@ -2,7 +2,6 @@
ipmitool [default]
ipxe [platform:dpkg default]
ipxe-bootimgs [platform:rpm default]
open-iscsi [platform:dpkg default]
socat [default]
xinetd [default]
tftpd-hpa [platform:dpkg default]

@ -76,7 +76,7 @@ not compatible with them. There are three ways to deal with this situation:
baremetal node set test --driver ipmi \
--boot-interface pxe \
--deploy-interface iscsi \
--deploy-interface direct \
--management-interface ipmitool \
--power-interface ipmitool

@ -312,15 +312,6 @@ boot_up_seq GET Query boot up sequence
get_raid_controller_list GET Query RAID controller summary info
======================== ============ ======================================
PXE Boot and iSCSI Deploy Process with Ironic Standalone Environment
====================================================================
.. figure:: ../../images/ironic_standalone_with_ibmc_driver.svg
:width: 960px
:align: left
:alt: Ironic standalone with iBMC driver node
.. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
.. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/

@ -96,7 +96,7 @@ Interface Supported Implementations
``bios`` ``idrac-wsman``, ``idrac-redfish``, ``no-bios``
``boot`` ``ipxe``, ``pxe``, ``idrac-redfish-virtual-media``
``console`` ``no-console``
``deploy`` ``iscsi``, ``direct``, ``ansible``, ``ramdisk``
``deploy`` ``direct``, ``ansible``, ``ramdisk``
``inspect`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``,
``inspector``, ``no-inspect``
``management`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``

@ -1097,8 +1097,9 @@ Netboot with glance and swift
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> Swift [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to disk"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> Swift [label = "Uploads the boot ISO"];
Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
@ -1222,8 +1223,9 @@ Netboot in swiftless deploy for intermediate images
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to root partition"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];
@ -1303,8 +1305,9 @@ Netboot with HTTP(S) based deploy
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> Swift [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to disk"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> Swift [label = "Uploads the boot ISO"];
Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"];
@ -1381,8 +1384,9 @@ Netboot in standalone ironic
IPA -> Conductor [label = "Lookup node"];
Conductor -> IPA [label = "Provides node UUID"];
IPA -> Conductor [label = "Heartbeat"];
Conductor -> IPA [label = "Exposes the disk over iSCSI"];
Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"];
Conductor -> IPA [label = "Sends the user image HTTP(S) URL"];
IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"];
IPA -> IPA [label = "Writes user image to root partition"];
Conductor -> Conductor [label = "Generates the boot ISO"];
Conductor -> ConductorWebserver [label = "Uploads the boot ISO"];
Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"];

@ -17,26 +17,11 @@ For more information see the
Drivers
=======
Starting with the Kilo release all deploy interfaces (except for fake ones)
are using IPA. There are two types of them:
* For nodes using the :ref:`iscsi-deploy` interface, IPA exposes the root hard
drive as an iSCSI share and calls back to the ironic conductor. The
conductor mounts the share and copies an image there. It then signals back
to IPA for post-installation actions like setting up a bootloader for local
boot support.
* For nodes using the :ref:`direct-deploy` interface, the conductor prepares
a swift temporary URL for an image. IPA then handles the whole deployment
process: downloading an image from swift, putting it on the machine and doing
any post-deploy actions.
Which one to choose depends on your environment. :ref:`iscsi-deploy` puts
higher load on conductors, :ref:`direct-deploy` currently requires the whole
image to fit in the node's memory, except when using raw images. It also
requires :doc:`/install/configure-glance-swift`.
.. todo: other differences?
Starting with the Kilo release all deploy interfaces (except for fake ones) are
using IPA. For nodes using the :ref:`direct-deploy` interface, the conductor
prepares a swift temporary URL or a local HTTP URL for the image. IPA then
handles the whole deployment process: downloading an image from swift, putting
it on the machine and doing any post-deploy actions.
Requirements
------------

@ -88,7 +88,7 @@ interfaces enabled for ``irmc`` hardware type.
enabled_bios_interfaces = irmc
enabled_boot_interfaces = irmc-virtual-media,irmc-pxe
enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_inspect_interfaces = irmc,inspector,no-inspect
enabled_management_interfaces = irmc
enabled_network_interfaces = flat,neutron

@ -24,8 +24,8 @@ common, and usually requires bootstrapping using PXE first.
The ``pxe`` boot interface works by preparing a PXE/iPXE environment for a
node on the file system, then instructing the DHCP provider (for example,
the Networking service) to boot the node from it. See
:ref:`iscsi-deploy-example` and :ref:`direct-deploy-example` for a better
understanding of the whole deployment process.
ref:`direct-deploy-example` for a better understanding of the whole deployment
process.
.. note::
Both PXE and iPXE are configured differently, when UEFI boot is used

@ -105,7 +105,7 @@ section of ironic's configuration file:
[DEFAULT]
...
enabled_deploy_interfaces = iscsi,direct,ansible
enabled_deploy_interfaces = direct,ansible
...
Once enabled, you can specify this deploy interface when creating or updating
@ -133,26 +133,3 @@ Ramdisk deploy
The ramdisk interface is intended to provide a mechanism to "deploy" an
instance where the item to be deployed is in reality a ramdisk. It is
documented separately, see :doc:`/admin/ramdisk-boot`.
.. _iscsi-deploy:
iSCSI deploy
============
.. warning::
This deploy interface is deprecated and will be removed in the Xena release
cycle. Please use `direct deploy`_ instead.
With ``iscsi`` deploy interface, the deploy ramdisk publishes the node's hard
drive as an iSCSI_ share. The ironic-conductor then copies the image to this
share. See :ref:`iSCSI deploy diagram <iscsi-deploy-example>` for a detailed
explanation of how this deploy interface works.
This interface is used by default, if enabled (see
:ref:`enable-hardware-interfaces`). You can specify it explicitly
when creating or updating a node::
baremetal node create --driver ipmi --deploy-interface iscsi
baremetal node set <NODE> --deploy-interface iscsi
.. _iSCSI: https://en.wikipedia.org/wiki/ISCSI

@ -41,13 +41,13 @@ BIOS, and RAID interfaces.
Agent steps
-----------
All deploy interfaces based on ironic-python-agent (i.e. ``direct``, ``iscsi``
and ``ansible`` and any derivatives) expose the following deploy steps:
All deploy interfaces based on ironic-python-agent (i.e. ``direct``,
``ansible`` and any derivatives) expose the following deploy steps:
``deploy.deploy`` (priority 100)
In this step the node is booted using a provisioning image.
``deploy.write_image`` (priority 80)
An out-of-band (``iscsi``, ``ansible``) or in-band (``direct``) step that
An out-of-band (``ansible``) or in-band (``direct``) step that
downloads and writes the image to the node.
``deploy.tear_down_agent`` (priority 40)
In this step the provisioning image is shut down.
@ -57,7 +57,7 @@ and ``ansible`` and any derivatives) expose the following deploy steps:
``deploy.boot_instance`` (priority 20)
In this step the node is booted into the user image.
Additionally, the ``iscsi`` and ``direct`` deploy interfaces have:
Additionally, the ``direct`` deploy interfaces has:
``deploy.prepare_instance_boot`` (priority 60)
In this step the boot device is configured and the bootloader is installed.

@ -210,7 +210,7 @@ Example of node CRUD notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -444,7 +444,7 @@ node maintenance notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -534,7 +534,7 @@ level, "error" has ERROR. Example of node console notification::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -617,7 +617,7 @@ ironic-conductor is attempting to change the node::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -695,7 +695,7 @@ prior to the correction::
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",
@ -787,7 +787,7 @@ indicate a node's provision states before state change, "event" is the FSM
"bios_interface": "no-bios",
"boot_interface": "pxe",
"console_interface": "no-console",
"deploy_interface": "iscsi",
"deploy_interface": "direct",
"inspect_interface": "no-inspect",
"management_interface": "ipmitool",
"network_interface": "flat",

@ -18,7 +18,7 @@ non-default interfaces, it must be enabled and set for a node to be utilized:
[DEFAULT]
...
enabled_deploy_interfaces = iscsi,direct,ramdisk
enabled_deploy_interfaces = direct,ramdisk
...
Once enabled and the conductor(s) have been restarted, the interface can

@ -420,13 +420,10 @@ Overall:
timers to help ensure a deployment does not fail due to a short-lived
transitory network connectivity failure in the form of a switch port having
moved to a temporary blocking state. Where applicable and possible,
many of these patches have been backported to supported releases,
however users of the iSCSI deployment interface will see the least
capability for these sorts of situations to be handled
automatically. These patches also require that the switchport has an
eventual fallback to a non-bonded mode. If the port remains in a blocking
state, then traffic will be unable to flow and the deloyment is likely to
time out.
many of these patches have been backported to supported releases.
These patches also require that the switchport has an eventual fallback to a
non-bonded mode. If the port remains in a blocking state, then traffic will
be unable to flow and the deployment is likely to time out.
* If you must use LACP, consider ``passive`` LACP negotiation settings
in the network switch as opposed to ``active``. The difference being with
passive the connected workload is likely a server where it should likely
@ -543,16 +540,10 @@ Again, these sorts of cases will depend upon the exact configuration of the
deployment, but hopefully these are areas where these actions can occur.
* Conversion to raw image files upon download to the conductor, from the
``[DEFAULT]force_raw_images`` option, in particular with the ``iscsi``
deployment interface. Users using glance and the ``direct`` deployment
interface may also experience issues here as the conductor will cache
the image to be written which takes place when the
``[agent]image_download_source`` is set to ``http`` instead of ``swift``.
* Write of a QCOW2 file over the ``iscsi`` deployment interface from the
conductor to the node being deployed can result in large amounts of
"white space" to be written to be transmitted over the wire and written
to the end device.
``[DEFAULT]force_raw_images`` option. Users using Glance may also experience
issues here as the conductor will cache the image to be written which takes
place when the ``[agent]image_download_source`` is set to ``http`` instead of
``swift``.
.. note::
The QCOW2 image conversion utility does consume quite a bit of memory
@ -560,9 +551,8 @@ deployment, but hopefully these are areas where these actions can occur.
is because the files are not sequential in nature, and must be re-assembled
from an internal block mapping. Internally Ironic limits this to 1GB
of RAM. Operators performing large numbers of deployments may wish to
explore the ``direct`` deployment interface in these sorts of cases in
order to minimize the conductor becoming a limiting factor due to memory
and network IO.
disable raw images in these sorts of cases in order to minimize the
conductor becoming a limiting factor due to memory and network IO.
Why are my nodes stuck in a "wait" state?
=========================================

@ -10,7 +10,7 @@ be asked by API consumers to perform work for which the underlying tools
require large amounts of memory.
The biggest example of this is image conversion. Images not in a raw format
need to be written out to disk (local files or remote in iscsi deploy) which
need to be written out to disk for conversion (when requested) which
requires the conversion process to generate an in-memory map to re-assemble
the image contents into a coherent stream of data. This entire process also
stresses the kernel buffers and cache.

@ -420,8 +420,8 @@ Ironic
------
Create devstack/local.conf with minimal settings required to enable Ironic.
An example local.conf that enables both ``direct`` and ``iscsi``
:doc:`deploy interfaces </admin/interfaces/deploy>` and uses the ``ipmi``
An example local.conf that enables the ``direct``
:doc:`deploy interface </admin/interfaces/deploy>` and uses the ``ipmi``
hardware type by default::
cd devstack
@ -468,8 +468,6 @@ hardware type by default::
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The 'ipmi' hardware type's default deploy interface is 'iscsi'.
# This would change the default to 'direct':
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.
@ -516,9 +514,8 @@ directory you cloned DevStack::
An example local.conf that enables the ironic tempest plugin and Ironic can be
found below. The ``TEMPEST_PLUGINS`` variable needs to have the absolute path
to the ironic-tempest-plugin folder, otherwise the plugin won't be installed.
Ironic will have enabled both ``direct`` and
``iscsi`` :doc:`deploy interfaces </admin/interfaces/deploy>` and uses the
``ipmi`` hardware type by default::
Ironic will have enabled the ``direct`` :doc:`deploy interface
</admin/interfaces/deploy>` and uses the ``ipmi`` hardware type by default::
cd devstack
cat >local.conf <<END
@ -564,8 +561,6 @@ Ironic will have enabled both ``direct`` and
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The 'ipmi' hardware type's default deploy interface is 'iscsi'.
# This would change the default to 'direct':
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

@ -69,7 +69,6 @@ description for DevStack is at :ref:`deploy_devstack`.
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The default deploy interface is 'iscsi', you can use 'direct' with
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

@ -93,7 +93,6 @@ configured in Neutron.
# interfaces, most often power and management:
#IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake
#IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake
# The default deploy interface is 'iscsi', you can use 'direct' with
#IRONIC_DEFAULT_DEPLOY_INTERFACE=direct
# Change this to alter the default driver for nodes created by devstack.

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 130 KiB

@ -40,7 +40,6 @@ Upgrade Guide
:maxdepth: 2
admin/upgrade-guide
admin/upgrade-to-hardware-types
User Guide
==========

@ -1,5 +0,0 @@
Configuring iSCSI-based drivers
-------------------------------
Ensure that the ``qemu-img`` and ``iscsiadm`` tools are installed on the
**ironic-conductor** host(s).

@ -93,10 +93,8 @@ provisioning will happen in a multi-tenant environment (which means using the
* TFTP
* egress port used for the Bare Metal service (6385 by default)
* ingress port used for ironic-python-agent (9999 by default)
* if using :ref:`iscsi-deploy`, the ingress port used for iSCSI
(3260 by default)
* if using :ref:`direct-deploy`, the egress port used for the Object
Storage service (typically 80 or 443)
Storage service or the local HTTP server (typically 80 or 443)
* if using iPXE, the egress port used for the HTTP server running
on the ironic-conductor nodes (typically 80).

@ -78,7 +78,7 @@ console
deploy
defines how the image gets transferred to the target disk. See
:doc:`/admin/interfaces/deploy` for an explanation of the difference
between supported deploy interfaces ``direct`` and ``iscsi``.
between supported deploy interfaces.
The deploy interfaces can be enabled as follows:
@ -86,13 +86,10 @@ deploy
[DEFAULT]
enabled_hardware_types = ipmi,redfish
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct,ramdisk
Additionally,
* the ``iscsi`` deploy interface requires :doc:`configure-iscsi`
* the ``direct`` deploy interface requires the Object Storage service
.. note::
The ``direct`` deploy interface requires the Object Storage service
or an HTTP service
inspect
implements fetching hardware information from nodes. Can be implemented
@ -186,7 +183,7 @@ IPMI and Redfish, with a few additional features:
enabled_hardware_types = ipmi,redfish
enabled_boot_interfaces = pxe
enabled_console_interfaces = ipmitool-socat,no-console
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_inspect_interfaces = inspector
enabled_management_interfaces = ipmitool,redfish
enabled_network_interfaces = flat,neutron
@ -222,7 +219,7 @@ respectively:
[DEFAULT]
enabled_hardware_types = redfish
enabled_deploy_interfaces = iscsi
enabled_deploy_interfaces = ansible
enabled_power_interfaces = redfish
enabled_management_interfaces = redfish
@ -241,13 +238,13 @@ respectively:
[DEFAULT]
enabled_hardware_types = redfish
enabled_deploy_interfaces = iscsi
enabled_deploy_interfaces = ansible
enabled_power_interfaces = redfish
enabled_management_interfaces = redfish
This is because the ``redfish`` hardware type will have different enabled
*deploy* interfaces on these conductors. It would have been fine, if the second
conductor had ``enabled_deploy_interfaces = direct`` instead of ``iscsi``.
conductor had ``enabled_deploy_interfaces = direct`` instead of ``ansible``.
This situation is not detected by the Bare Metal service, but it can cause
inconsistent behavior in the API, when node functionality will depend on

@ -572,7 +572,7 @@ interfaces for a hardware type (for your deployment):
+-------------------------------+----------------+
| default_boot_interface | pxe |
| default_console_interface | no-console |
| default_deploy_interface | iscsi |
| default_deploy_interface | direct |
| default_inspect_interface | no-inspect |
| default_management_interface | ipmitool |
| default_network_interface | flat |
@ -581,7 +581,7 @@ interfaces for a hardware type (for your deployment):
| default_vendor_interface | no-vendor |
| enabled_boot_interfaces | pxe |
| enabled_console_interfaces | no-console |
| enabled_deploy_interfaces | iscsi, direct |
| enabled_deploy_interfaces | direct |
| enabled_inspect_interfaces | no-inspect |
| enabled_management_interfaces | ipmitool |
| enabled_network_interfaces | flat, noop |
@ -627,10 +627,10 @@ Consider the following configuration (shortened for simplicity):
[DEFAULT]
enabled_hardware_types = ipmi,redfish
enabled_console_interfaces = no-console,ipmitool-shellinabox
enabled_deploy_interfaces = iscsi,direct
enabled_deploy_interfaces = direct
enabled_management_interfaces = ipmitool,redfish
enabled_power_interfaces = ipmitool,redfish
default_deploy_interface = direct
default_deploy_interface = ansible
A new node is created with the ``ipmi`` driver and no interfaces specified:
@ -654,7 +654,7 @@ Then the defaults for the interfaces that will be used by the node in this
example are calculated as follows:
deploy
An explicit value of ``direct`` is provided for
An explicit value of ``ansible`` is provided for
``default_deploy_interface``, so it is used.
power
No default is configured. The ``ipmi`` hardware type supports only

@ -99,18 +99,6 @@ implementation is available for the hardware, it is recommended using it
for better scalability and security. Otherwise, it is recommended to use iPXE,
when it is supported by target hardware.
Deploy interface
~~~~~~~~~~~~~~~~
There are two deploy interfaces in-tree, ``iscsi`` and ``direct``. See
:doc:`../../admin/interfaces/deploy` for explanation of the difference.
With the ``iscsi`` deploy method, most of the deployment operations happen on
the conductor. If the Object Storage service (swift) or RadosGW is present in
the environment, it is recommended to use the ``direct`` deploy method for
better scalability and reliability.
.. TODO(dtantsur): say something about the ansible deploy, when it's in
Hardware specifications
~~~~~~~~~~~~~~~~~~~~~~~
@ -328,11 +316,6 @@ the space requirements are different:
``image_download_source`` can also be provided in the node's
``driver_info`` or ``instance_info``. See :ref:`image_download_source`.
* The ``iscsi`` deploy method always requires caching of the whole instance
image locally during the deployment. The image has to be converted to the raw
format, which may increase the required amount of disk space, as well as the
CPU load.
* When network boot is used, the instance image kernel and ramdisk are cached
locally while the instance is active.

@ -7,4 +7,3 @@ Set up the drivers for the Bare Metal service
enabling-drivers
configure-pxe
configure-ipmi
configure-iscsi

@ -260,7 +260,7 @@ options.
.. _direct-deploy-example:
Example 1: PXE Boot and Direct Deploy Process
Example: PXE Boot and Direct Deploy Process
---------------------------------------------
This process is how :ref:`direct-deploy` works.
@ -318,63 +318,5 @@ This process is how :ref:`direct-deploy` works.
(From a `talk`_ and `slides`_)
.. _iscsi-deploy-example:
Example 2: PXE Boot and iSCSI Deploy Process
--------------------------------------------
This process is how the currently deprecated :ref:`iscsi-deploy` works.
.. seqdiag::
:scale: 75
diagram {
Nova; API; Conductor; Neutron; HTTPStore; "TFTP/HTTPd"; Node;
activation = none;
span_height = 1;
edge_length = 250;
default_note_color = white;
default_fontsize = 14;
Nova -> API [label = "Set instance_info\n(image_source,\nroot_gb, etc.)"];
Nova -> API [label = "Validate power and deploy\ninterfaces"];
Nova -> API [label = "Plug VIFs to the node"];
Nova -> API [label = "Set provision_state,\noptionally pass configdrive"];
API -> Conductor [label = "do_node_deploy()"];
Conductor -> Conductor [label = "Validate power and deploy interfaces"];
Conductor -> HTTPStore [label = "Store configdrive if configdrive_use_swift \noption is set"];
Conductor -> Node [label = "POWER OFF"];
Conductor -> Neutron [label = "Attach provisioning network to port(s)"];
Conductor -> Neutron [label = "Update DHCP boot options"];
Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ndeployment"];
Conductor -> Node [label = "Set PXE boot device \nthrough the BMC"];
Conductor -> Conductor [label = "Cache deploy\nkernel, ramdisk,\ninstance images"];
Conductor -> Node [label = "REBOOT"];
Node -> Neutron [label = "DHCP request"];
Neutron -> Node [label = "next-server = Conductor"];
Node -> Node [label = "Runs agent\nramdisk"];
Node -> API [label = "lookup()"];
API -> Node [label = "Pass UUID"];
Node -> API [label = "Heartbeat (UUID)"];
API -> Conductor [label = "Heartbeat"];
Conductor -> Node [label = "Send IPA a command to expose disks via iSCSI"];
Conductor -> Node [label = "iSCSI attach"];
Conductor -> Node [label = "Copies user image and configdrive, if present"];
Conductor -> Node [label = "iSCSI detach"];
Conductor -> Conductor [label = "Delete instance\nimage from cache"];
Conductor -> Node [label = "Install boot loader, if requested"];
Conductor -> Neutron [label = "Update DHCP boot options"];
Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ninstance image"];
Conductor -> Node [label = "Set boot device either to PXE or to disk"];
Conductor -> Node [label = "Collect ramdisk logs"];
Conductor -> Node [label = "POWER OFF"];
Conductor -> Neutron [label = "Detach provisioning network\nfrom port(s)"];
Conductor -> Neutron [label = "Bind tenant port"];
Conductor -> Node [label = "POWER ON"];
Conductor -> Conductor [label = "Mark node as\nACTIVE"];
}
(From a `talk`_ and `slides`_)
.. _talk: https://www.openstack.org/summit/vancouver-2015/summit-videos/presentation/isn-and-039t-it-ironic-the-bare-metal-cloud
.. _slides: http://www.slideshare.net/devananda1/isnt-it-ironic-managing-a-bare-metal-cloud-osl-tes-2015

@ -2,9 +2,6 @@
# This file should be owned by (and only-writable by) the root user
[Filters]
# ironic/drivers/modules/deploy_utils.py
iscsiadm: CommandFilter, iscsiadm, root
# ironic/common/utils.py
mount: CommandFilter, mount, root
umount: CommandFilter, umount, root

@ -64,8 +64,6 @@ dbapi = db_api.get_instance()
# object, in case it is lazy loaded. The attribute will be accessed when needed
# by doing getattr on the object
ONLINE_MIGRATIONS = (
# Added in Victoria, remove when removing iscsi deploy.
(dbapi, 'migrate_from_iscsi_deploy'),
# NOTE(rloo): Don't remove this; it should always be last
(dbapi, 'update_to_latest_versions'),
)

@ -35,7 +35,6 @@ from ironic.conf import ilo
from ironic.conf import inspector
from ironic.conf import ipmi
from ironic.conf import irmc
from ironic.conf import iscsi
from ironic.conf import metrics
from ironic.conf import metrics_statsd
from ironic.conf import molds
@ -51,6 +50,7 @@ from ironic.conf import xclarity
CONF = cfg.CONF
agent.register_opts(CONF)
anaconda.register_opts(CONF)
ansible.register_opts(CONF)
api.register_opts(CONF)
audit.register_opts(CONF)
@ -69,8 +69,6 @@ ilo.register_opts(CONF)
inspector.register_opts(CONF)
ipmi.register_opts(CONF)
irmc.register_opts(CONF)
iscsi.register_opts(CONF)
anaconda.register_opts(CONF)
metrics.register_opts(CONF)
metrics_statsd.register_opts(CONF)
molds.register_opts(CONF)

@ -1,44 +0,0 @@
# Copyright 2016 Intel Corporation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.PortOpt('portal_port',
default=3260,
mutable=True,
help=_('The port number on which the iSCSI portal listens '
'for incoming connections.')),
cfg.StrOpt('conv_flags',
mutable=True,
help=_('Flags that need to be sent to the dd command, '
'to control the conversion of the original file '
'when copying to the host. It can contain several '
'options separated by commas.')),
cfg.IntOpt('verify_attempts',
default=3,
min=1,
mutable=True,
help=_('Maximum attempts to verify an iSCSI connection is '
'active, sleeping 1 second between attempts. Defaults '
'to 3.')),
]
def register_opts(conf):
conf.register_opts(opts, group='iscsi')

@ -34,7 +34,6 @@ _opts = [
('inspector', ironic.conf.inspector.list_opts()),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('iscsi', ironic.conf.iscsi.opts),
('anaconda', ironic.conf.anaconda.opts),
('metrics', ironic.conf.metrics.opts),
('metrics_statsd', ironic.conf.metrics_statsd.opts),

@ -973,18 +973,6 @@ class Connection(object, metaclass=abc.ABCMeta):
of migrated objects.
"""
@abc.abstractmethod
def migrate_from_iscsi_deploy(self, context, max_count):
"""Tries to migrate away from the iscsi deploy interface.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
@abc.abstractmethod
def set_node_traits(self, node_id, traits, version):
"""Replace all of the node traits with specified list of traits.

@ -1578,59 +1578,6 @@ class Connection(api.Connection):
return total_to_migrate, total_migrated
@oslo_db_api.retry_on_deadlock
def migrate_from_iscsi_deploy(self, context, max_count, force=False):
"""Tries to migrate away from the iscsi deploy interface.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
# TODO(dtantsur): maybe change to force=True by default in W?
if not force:
if 'direct' not in CONF.enabled_deploy_interfaces:
LOG.warning('The direct deploy interface is not enabled, will '
'not migrate nodes to it. Run with --option '
'force=true to override.')
return 0, 0
if CONF.default_deploy_interface == 'iscsi':
LOG.warning('The iscsi deploy interface is the default, will '
'not migrate nodes away from it. Run with '
'--option force=true to override.')
return 0, 0
if CONF.agent.image_download_source == 'swift':
LOG.warning('The direct deploy interface is using swift, will '
'not migrate nodes to it. Run with --option '
'force=true to override.')
return 0, 0
total_to_migrate = (model_query(models.Node)
.filter_by(deploy_interface='iscsi')
.count())
if not total_to_migrate:
return 0, 0
max_to_migrate = max_count or total_to_migrate
with _session_for_write():
query = (model_query(models.Node.id)
.filter_by(deploy_interface='iscsi')
.slice(0, max_to_migrate))
ids = [row[0] for row in query]
num_migrated = (model_query(models.Node)
.filter_by(deploy_interface='iscsi')
.filter(models.Node.id.in_(ids))
.update({'deploy_interface': 'direct'},
synchronize_session=False))
return total_to_migrate, num_migrated
@staticmethod
def _verify_max_traits_per_node(node_id, num_traits):
"""Verify that an operation would not exceed the per-node trait limit.

@ -23,7 +23,6 @@ from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules.network import flat as flat_net
from ironic.drivers.modules.network import neutron
from ironic.drivers.modules.network import noop as noop_net
@ -49,9 +48,9 @@ class GenericHardware(hardware_type.AbstractHardwareType):
@property
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
return [agent.AgentDeploy, iscsi_deploy.ISCSIDeploy,
ansible_deploy.AnsibleDeploy, pxe.PXERamdiskDeploy,
pxe.PXEAnacondaDeploy, agent.CustomAgentDeploy]
return [agent.AgentDeploy, ansible_deploy.AnsibleDeploy,
pxe.PXERamdiskDeploy, pxe.PXEAnacondaDeploy,
agent.CustomAgentDeploy]
@property
def supported_inspect_interfaces(self):

@ -342,34 +342,6 @@ class AgentClient(object):
{'cmd': method, 'node': node.uuid})
return None
@METRICS.timer('AgentClient.start_iscsi_target')
def start_iscsi_target(self, node, iqn,
portal_port=DEFAULT_IPA_PORTAL_PORT,
wipe_disk_metadata=False):
"""Expose the node's disk as an ISCSI target.
:param node: an Ironic node object
:param iqn: iSCSI target IQN
:param portal_port: iSCSI portal port
:param wipe_disk_metadata: True if the agent should wipe first the
disk magic strings like the partition
table, RAID or filesystem signature.
:raises: IronicException when failed to issue the request or there was
a malformed response from the agent.
:raises: AgentAPIError when agent failed to execute specified command.
:raises: AgentInProgress when the command fails to execute as the agent
is presently executing the prior command.
:returns: A dict containing command response from agent.
See :func:`get_commands_status` for a command result sample.
"""
params = {'iqn': iqn,
'portal_port': portal_port,
'wipe_disk_metadata': wipe_disk_metadata}
return self._command(node=node,
method='iscsi.start_iscsi_target',
params=params,
wait=True)
@METRICS.timer('AgentClient.install_bootloader')
def install_bootloader(self, node, root_uuid, target_boot_mode,
efi_system_part_uuid=None,

@ -1,813 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import time
from urllib import parse as urlparse
from ironic_lib import disk_utils
from ironic_lib import metrics_utils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
def _save_disk_layout(node, i_info):
"""Saves the disk layout.
The disk layout used for deployment of the node, is saved.
:param node: the node of interest
:param i_info: instance information (a dictionary) for the node, containing
disk layout information
"""
driver_internal_info = node.driver_internal_info
driver_internal_info['instance'] = {}
for param in DISK_LAYOUT_PARAMS:
driver_internal_info['instance'][param] = i_info[param]
node.driver_internal_info = driver_internal_info
node.save()
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
run_as_root=True,
attempts=5,
delay_on_retry=True)
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
attempts=5,
delay_on_retry=True)
error_occurred = False
try:
# Ensure the login complete
verify_iscsi_connection(target_iqn)
# force iSCSI initiator to re-read luns
force_iscsi_lun_update(target_iqn)
# ensure file system sees the block device
check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn)
except (exception.InstanceDeployFailure,
processutils.ProcessExecutionError) as e:
with excutils.save_and_reraise_exception():
error_occurred = True
LOG.error("Failed to login to an iSCSI target due to %s", e)
finally:
if error_occurred:
try:
logout_iscsi(portal_address, portal_port, target_iqn)
delete_iscsi(portal_address, portal_port, target_iqn)
except processutils.ProcessExecutionError as e:
LOG.warning("An error occurred when trying to cleanup "
"failed ISCSI session error %s", e)
def check_file_system_for_iscsi_device(portal_address,
portal_port,
target_iqn):
"""Ensure the file system sees the iSCSI block device."""
check_dir = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-1" % (portal_address,
portal_port,
target_iqn)
total_checks = CONF.iscsi.verify_attempts
for attempt in range(total_checks):
if os.path.exists(check_dir):
break
time.sleep(1)
if LOG.isEnabledFor(logging.DEBUG):
existing_devs = ', '.join(glob.iglob('/dev/disk/by-path/*iscsi*'))
LOG.debug("iSCSI connection not seen by file system. Rechecking. "
"Attempt %(attempt)d out of %(total)d. Available iSCSI "
"devices: %(devs)s.",
{"attempt": attempt + 1,
"total": total_checks,
"devs": existing_devs})
else:
msg = _("iSCSI connection was not seen by the file system after "
"attempting to verify %d times.") % total_checks
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def verify_iscsi_connection(target_iqn):
"""Verify iscsi connection."""
LOG.debug("Checking for iSCSI target to become active.")
total_checks = CONF.iscsi.verify_attempts
for attempt in range(total_checks):
out, _err = utils.execute('iscsiadm',
'-m', 'node',
'-S',
run_as_root=True)
if target_iqn in out:
break
time.sleep(1)
LOG.debug("iSCSI connection not active. Rechecking. Attempt "
"%(attempt)d out of %(total)d",
{"attempt": attempt + 1, "total": total_checks})
else:
msg = _("iSCSI connection did not become active after attempting to "
"verify %d times.") % total_checks
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def force_iscsi_lun_update(target_iqn):
"""force iSCSI initiator to re-read luns."""
LOG.debug("Re-reading iSCSI luns.")
utils.execute('iscsiadm',
'-m', 'node',
'-T', target_iqn,
'-R',
run_as_root=True)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
attempts=5,
delay_on_retry=True)
def delete_iscsi(portal_address, portal_port, target_iqn):
"""Delete the iSCSI target."""
# Retry delete until it succeeds (exit code 0) or until there is
# no longer a target to delete (exit code 21).
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (utils.wrap_ipv6(portal_address),
portal_port),
'-T', target_iqn,
'-o', 'delete',
run_as_root=True,
check_exit_code=[0, 21],
attempts=5,
delay_on_retry=True)
@contextlib.contextmanager
def _iscsi_setup_and_handle_errors(address, port, iqn, lun):
"""Function that yields an iSCSI target device to work on.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
"""
dev = ("/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s"
% (address, port, iqn, lun))
discovery(address, port)
login_iscsi(address, port, iqn)
if not disk_utils.is_block_device(dev):
raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
% dev)
try:
yield dev
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error("Deploy to address %s failed.", address)
LOG.error("Command: %s", err.cmd)
LOG.error("StdOut: %r", err.stdout)
LOG.error("StdErr: %r", err.stderr)
except exception.InstanceDeployFailure as e:
with excutils.save_and_reraise_exception():
LOG.error("Deploy to address %s failed.", address)
LOG.error(e)
finally:
logout_iscsi(address, port, iqn)
delete_iscsi(address, port, iqn)
def deploy_partition_image(
address, port, iqn, lun, image_path,
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
preserve_ephemeral=False, configdrive=None,
boot_option=None, boot_mode="bios", disk_label=None,
cpu_arch=""):
"""All-in-one function to deploy a partition image to a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever
content it had (if the partition table has
not changed).
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param boot_option: Can be "local" or "netboot".
"netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:param disk_label: The disk label to be used when creating the
partition table. Valid values are: "msdos",
"gpt" or None; If None ironic will figure it
out according to the boot_mode parameter.
:param cpu_arch: Architecture of the node being deployed to.
:raises: InstanceDeployFailure if image virtual size is bigger than root
partition size.
:returns: a dictionary containing the following keys:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
"""
# NOTE(dtantsur): CONF.default_boot_option is mutable, don't use it in
# the function signature!
boot_option = boot_option or deploy_utils.get_default_boot_option()
image_mb = disk_utils.get_image_mb(image_path)
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. Image '
'virtual size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
with _iscsi_setup_and_handle_errors(address, port, iqn, lun) as dev:
uuid_dict_returned = disk_utils.work_on_disk(
dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format, image_path,
node_uuid, preserve_ephemeral=preserve_ephemeral,
configdrive=configdrive, boot_option=boot_option,
boot_mode=boot_mode, disk_label=disk_label, cpu_arch=cpu_arch)
return uuid_dict_returned
def deploy_disk_image(address, port, iqn, lun,
image_path, node_uuid, configdrive=None,
conv_flags=None):
"""All-in-one function to deploy a whole disk image to a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param node_uuid: node's uuid.
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param conv_flags: Optional. Add a flag that will modify the behaviour of
the image copy to disk.
:returns: a dictionary containing the key 'disk identifier' to identify
the disk which was used for deployment.
"""
with _iscsi_setup_and_handle_errors(address, port, iqn,
lun) as dev:
disk_utils.populate_image(image_path, dev, conv_flags=conv_flags)
if configdrive:
disk_utils.create_config_drive_partition(node_uuid, dev,
configdrive)
disk_identifier = disk_utils.get_disk_identifier(dev)
return {'disk identifier': disk_identifier}
@METRICS.timer('check_image_size')
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
Does nothing for whole-disk images.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
if task.node.driver_internal_info['is_whole_disk_image']:
# The root partition is already created and populated, no use
# validating its size
return
i_info = deploy_utils.parse_instance_info(task.node)
image_path = deploy_utils._get_image_file_path(task.node.uuid)
image_mb = disk_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb: