Remove Ansible-deploy interface
it is now part of ironic itself, and version in ironic-staging-drivers conflicts with version in ironic, blocking ironic-conductor from starting when ironic-staging-drivers is installed. Change-Id: I917b7399b3249143ba6cf75f61d96a1c64f94e7d
This commit is contained in:
parent
e3cb506e2e
commit
725b85484f
25
.zuul.yaml
Normal file
25
.zuul.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
- job:
|
||||
name: ironic-staging-drivers-dsvm-all-drivers
|
||||
parent: legacy-dsvm-base
|
||||
run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/run.yaml
|
||||
post-run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/post.yaml
|
||||
timeout: 4800
|
||||
irrelevant-files:
|
||||
- ^test-requirements.txt$
|
||||
- ^setup.cfg$
|
||||
- ^doc/.*$
|
||||
- ^releasenotes/.*$
|
||||
- ^ironic-staging-drivers/tests/.*$
|
||||
required-projects:
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/ironic
|
||||
- openstack/ironic-staging-drivers
|
||||
|
||||
- project:
|
||||
name: openstack/ironic-staging-drivers
|
||||
check:
|
||||
jobs:
|
||||
- ironic-staging-drivers-dsvm-all-drivers
|
||||
gate:
|
||||
jobs:
|
||||
- ironic-staging-drivers-dsvm-all-drivers
|
@ -3,7 +3,9 @@
|
||||
|
||||
IRONIC_STAGING_DRIVERS_DIR=$DEST/ironic-staging-drivers
|
||||
IRONIC_DRIVERS_EXCLUDED_DIRS='tests common'
|
||||
IRONIC_STAGING_DRIVER=${IRONIC_STAGING_DRIVER:-}
|
||||
# NOTE(pas-ha) change this back when there is any other then former
|
||||
# ansible-deploy driver being able to set up by this devstack plugin
|
||||
IRONIC_STAGING_DRIVER=""
|
||||
# NOTE(pas-ha) skip iboot drivers by default as they require package not available on PyPI
|
||||
IRONIC_STAGING_DRIVERS_SKIPS=${IRONIC_STAGING_DRIVERS_SKIPS:-"iboot"}
|
||||
IRONIC_STAGING_DRIVERS_FILTERS=${IRONIC_STAGING_DRIVERS_FILTERS:-}
|
||||
@ -15,6 +17,7 @@ if [[ -n "$IRONIC_STAGING_DRIVERS_FILTERS" ]]; then
|
||||
IRONIC_STAGING_LIST_EP_CMD+=" -f $IRONIC_STAGING_DRIVERS_FILTERS"
|
||||
fi
|
||||
|
||||
|
||||
function setup_ironic_enabled_interfaces_for {
|
||||
|
||||
local iface=$1
|
||||
@ -91,88 +94,11 @@ function install_drivers_dependencies {
|
||||
}
|
||||
|
||||
function configure_ironic_testing_driver {
|
||||
if [[ "$IRONIC_STAGING_DRIVER" =~ "ansible" && \
|
||||
"$IRONIC_STAGING_DRIVER" =~ "ipmi" ]]; then
|
||||
echo_summary "Configuring ansible deploy driver interface"
|
||||
configure_ansible_deploy_driver
|
||||
else
|
||||
die $LINENO "Failed to configure ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_ansible_deploy_driver {
|
||||
# NOTE(pas-ha) DevStack now defaults to tls-proxy being enabled.
|
||||
# Using custom CA bundle is not that easy with TinyCore,
|
||||
# requiring extra rebuild steps and resulting in bigger image,
|
||||
# so just disable validating SSL certs for now in DevStack
|
||||
# similar to what ironic does for IPA by default in DevStack
|
||||
iniset $IRONIC_CONF_FILE ansible image_store_insecure True
|
||||
|
||||
# set logging for ansible-deploy
|
||||
# NOTE(pas-ha) w/o systemd or syslog, there will be no output
|
||||
# of single ansible tasks to ironic log,
|
||||
# only in the stdout returned by processutils
|
||||
if [[ "$USE_SYSTEMD" == "True" ]]; then
|
||||
iniset $IRONIC_STAGING_DRIVERS_DIR/ironic_staging_drivers/ansible/playbooks/callback_plugins/ironic_log.ini ironic use_journal "True"
|
||||
elif [[ "$SYSLOG" == "True" ]]; then
|
||||
iniset $IRONIC_STAGING_DRIVERS_DIR/ironic_staging_drivers/ansible/playbooks/callback_plugins/ironic_log.ini ironic use_syslog "True"
|
||||
fi
|
||||
die $LINENO "Failed to configure ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
|
||||
}
|
||||
|
||||
function set_ironic_testing_driver {
|
||||
if [[ "$IRONIC_STAGING_DRIVER" =~ "ansible" && \
|
||||
"$IRONIC_STAGING_DRIVER" =~ "ipmi" && \
|
||||
"$IRONIC_DEPLOY_DRIVER" == "agent_ipmitool" && \
|
||||
"$IRONIC_RAMDISK_TYPE" == "tinyipa" ]]; then
|
||||
echo_summary "Setting nodes to use 'staging-ansible-ipmi' hardware type with 'staging-ansible' deploy interface"
|
||||
set_ansible_deploy_driver
|
||||
else
|
||||
die $LINENO "Failed to configure ironic to use ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_ansible_deploy_driver {
|
||||
local tinyipa_ramdisk_name
|
||||
local ansible_key_file
|
||||
local ansible_ramdisk_id
|
||||
|
||||
# ensure the tinyipa ramdisk is present in Glance
|
||||
tinyipa_ramdisk_name=$(openstack --os-cloud devstack-admin image show ${IRONIC_DEPLOY_RAMDISK_ID} -f value -c name)
|
||||
if [ -z $tinyipa_ramdisk_name ]; then
|
||||
die $LINENO "Failed to find ironic deploy ramdisk ${IRONIC_DEPLOY_RAMDISK_ID}"
|
||||
fi
|
||||
|
||||
cd $IRONIC_STAGING_DRIVERS_DIR/imagebuild/tinyipa-ansible
|
||||
# download original tinyipa ramdisk from Glance
|
||||
openstack --os-cloud devstack-admin image save ${IRONIC_DEPLOY_RAMDISK_ID} --file ${tinyipa_ramdisk_name}
|
||||
export TINYIPA_RAMDISK_FILE="${PWD}/${tinyipa_ramdisk_name}"
|
||||
# generate SSH keys for deploy ramdisk and ansible driver
|
||||
mkdir -p ${IRONIC_DATA_DIR}/ssh_keys
|
||||
ansible_key_file="${IRONIC_DATA_DIR}/ssh_keys/ansible_key"
|
||||
ssh-keygen -q -t rsa -N "" -f ${ansible_key_file}
|
||||
export SSH_PUBLIC_KEY=${ansible_key_file}.pub
|
||||
# rebuild ramdisk, produces ansible-${tinyipa_ramdisk_name} file
|
||||
make
|
||||
# upload rebuilt ramdisk to Glance
|
||||
ansible_ramdisk_id=$(openstack --os-cloud devstack-admin image create "ansible-${tinyipa_ramdisk_name}" \
|
||||
--file "${PWD}/ansible-${tinyipa_ramdisk_name}" \
|
||||
--disk-format ari --container-format ari \
|
||||
--public \
|
||||
-f value -c id)
|
||||
|
||||
for node in $(openstack --os-cloud devstack baremetal node list -f value -c UUID); do
|
||||
# switch driver to ansible-enabled hardware type, use minimal API version that supports setting driver interfaces,
|
||||
# set nodes to use the uploaded ramdisk and appropriate SSH creds.
|
||||
# TODO(pas-ha) remove API version when OSC defaults to 'latest'
|
||||
# TODO(pas-ha) change the job definition in project-config to set the HW type
|
||||
# when stable/pike is no longer supported
|
||||
openstack --os-cloud devstack-admin --os-baremetal-api-version 1.31 baremetal node set $node \
|
||||
--driver staging-ansible-ipmi \
|
||||
--deploy-interface staging-ansible \
|
||||
--driver-info deploy_ramdisk=$ansible_ramdisk_id \
|
||||
--driver-info ansible_deploy_username=tc \
|
||||
--driver-info ansible_deploy_key_file=$ansible_key_file
|
||||
done
|
||||
die $LINENO "Failed to configure ironic to use ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
|
||||
}
|
||||
|
||||
echo_summary "ironic-staging-drivers plugin.sh was called..."
|
||||
|
@ -12,4 +12,3 @@ Available drivers
|
||||
drivers/iboot
|
||||
drivers/libvirt
|
||||
drivers/intel_nm
|
||||
drivers/ansible
|
||||
|
@ -1,546 +0,0 @@
|
||||
.. _ansible:
|
||||
|
||||
#####################
|
||||
Ansible-deploy driver
|
||||
#####################
|
||||
|
||||
Ansible is an already mature and popular automation tool, written in Python
|
||||
and requiring no agents running on the node being configured.
|
||||
All communications with the node are by default performed over secure SSH
|
||||
transport.
|
||||
|
||||
The Ansible-deploy deployment driver is using Ansible playbooks to define the
|
||||
deployment logic. It is not based on `Ironic Python Agent`_ (IPA)
|
||||
and does not generally need it to be running in the deploy ramdisk.
|
||||
|
||||
.. note::
|
||||
The "playbook API", that is the set and structure of variables passed
|
||||
into playbooks from the driver, is not stable yet and will most probably
|
||||
change in next versions.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
The main advantage of this driver is extended flexibility in regards of
|
||||
changing and adapting node deployment logic to the particular use case,
|
||||
using the tooling already familiar to operators.
|
||||
|
||||
It also allows to shorten the usual feature development cycle of
|
||||
|
||||
* implementing logic in ironic,
|
||||
* implementing logic in IPA,
|
||||
* rebuilding deploy ramdisk,
|
||||
* uploading it to Glance/HTTP storage,
|
||||
* reassigning deploy ramdisk to nodes,
|
||||
* restarting ironic service and
|
||||
* runing a test deployment
|
||||
|
||||
by using a more "stable" deploy ramdisk and not requiring
|
||||
ironic-conductor restarts (see `Extending playbooks`_).
|
||||
|
||||
The main disadvantage is a synchronous manner of performing
|
||||
deployment/cleaning tasks, as Ansible is invoked as ``ansible-playbook``
|
||||
CLI command via Python's ``subprocess`` library.
|
||||
|
||||
Each action (deploy, clean) is described by single playbook with roles,
|
||||
which is run whole during deployment, or tag-wise during cleaning.
|
||||
Control of cleaning steps is through tags and auxiliary clean steps file.
|
||||
The playbooks for actions can be set per-node, as is cleaning steps
|
||||
file.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
Supports two modes for continuing deployment (configured in driver
|
||||
options, see `Configuration file`_):
|
||||
|
||||
- having the deploy ramdisk calling back to ironic API's
|
||||
``heartbeat`` endpoint (default)
|
||||
- polling the node until the ssh port is open as part of a playbook
|
||||
|
||||
User images
|
||||
~~~~~~~~~~~
|
||||
|
||||
Supports whole-disk images and partition images:
|
||||
|
||||
- compressed images are downloaded to RAM and converted to disk device;
|
||||
- raw images are streamed to disk directly.
|
||||
|
||||
For partition images the driver will create root partition, and,
|
||||
if requested, ephemeral and swap partitions as set in node's
|
||||
``instance_info`` by nova or operator.
|
||||
Partition table created will be of ``msdos`` type by default,
|
||||
the node's``disk_label`` capability is honored if it is set in node's
|
||||
``instance_info``.
|
||||
|
||||
Configdrive partition
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Creating a configdrive partition is supported for both whole disk
|
||||
and partition images, on both ``msdos`` and ``GPT`` labeled disks.
|
||||
|
||||
Root device hints
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Root device hints are currently supported in their basic form only
|
||||
(with exact matches, without oslo.utils operators).
|
||||
If no root device hint is provided for the node, first device returned as
|
||||
part of ``ansible_devices`` fact is used as root device to create partitions
|
||||
on or write the whole disk image to.
|
||||
|
||||
Node cleaning
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Cleaning is supported, both automated and manual.
|
||||
Currently the driver has two default clean steps:
|
||||
|
||||
- wiping device metadata
|
||||
- disk shredding
|
||||
|
||||
Their priority can be overridden via options in ironic configuration file's
|
||||
``[deploy]`` section the same as for IPA-based drivers.
|
||||
|
||||
As in the case of this driver all cleaning steps are known to conductor,
|
||||
booting the deploy ramdisk is completely skipped when
|
||||
there are no cleaning steps to perform.
|
||||
|
||||
Aborting cleaning tasks is not supported.
|
||||
|
||||
Logging
|
||||
~~~~~~~
|
||||
|
||||
Logging is implemented as custom Ansible callback module,
|
||||
that makes use of ``oslo.log`` and ``oslo.config`` libraries
|
||||
and can re-use logging configuration defined in the main ironic configuration
|
||||
file (``/etc/ironic/ironic.conf`` by default) to set logging for Ansible
|
||||
events, or use a separate file for this purpose.
|
||||
|
||||
.. note::
|
||||
Currently this has some quirks in DevStack - due to default
|
||||
logging system there the ``log_file`` must be set explicitly in
|
||||
``$playbooks_path/callback_plugins/ironic_log.ini`` when running
|
||||
DevStack in 'developer' mode using ``screen``.
|
||||
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
ironic
|
||||
Requires ironic version >= 8.0.0. (Pike release or newer).
|
||||
|
||||
Ansible
|
||||
Tested with and targets Ansible ≥ 2.1
|
||||
|
||||
Bootstrap image requirements
|
||||
----------------------------
|
||||
|
||||
- password-less sudo permissions for the user used by Ansible
|
||||
- python 2.7.x
|
||||
- openssh-server
|
||||
- GNU coreutils
|
||||
- utils-linux
|
||||
- parted
|
||||
- gdisk
|
||||
- qemu-utils
|
||||
- python-requests (for ironic callback and streaming image download)
|
||||
- python-netifaces (for ironic callback)
|
||||
|
||||
Set of scripts to build a suitable deploy ramdisk based on TinyCore Linux,
|
||||
and an element for ``diskimage-builder`` is provided.
|
||||
|
||||
Setting up your environment
|
||||
===========================
|
||||
|
||||
#. Install ironic (either as part of OpenStack/DevStack or standalone)
|
||||
#. Install Ansible (``pip install ansible`` should suffice).
|
||||
#. Install ``ironic-staging-drivers``
|
||||
#. Edit ironic configuration file
|
||||
|
||||
A. Add one of the Ansible-enabled drivers to ``enabled_drivers`` option.
|
||||
(see `Available drivers and options`_).
|
||||
B. Add ``[ansible]`` config section and configure it if needed
|
||||
(see `Configuration file`_).
|
||||
|
||||
#. (Re)start ironic-conductor service
|
||||
#. Build suitable deploy kernel and ramdisk images
|
||||
#. Upload them to Glance or put in your HTTP storage
|
||||
#. Create new or update existing nodes to use the enabled driver
|
||||
of your choice and populate `Driver properties for the Node`_ when
|
||||
different from defaults.
|
||||
#. Deploy the node as usual.
|
||||
|
||||
Available drivers and options
|
||||
=============================
|
||||
|
||||
Three drivers are provided:
|
||||
|
||||
pxe_ipmitool_ansible
|
||||
Uses PXE/iPXE to boot of nodes, and ``ipmitool`` for Power/Management.
|
||||
This is the driver to use with real hardware nodes.
|
||||
|
||||
pxe_ssh_ansible
|
||||
Uses PXE/iPXE to boot the nodes, and ironic's SSH driver for
|
||||
Power/Management. Used only in testing environments.
|
||||
|
||||
pxe_libvirt_ansible
|
||||
Alternative to ``pxe_ssh_ansible``, uses LibVirt-based driver for
|
||||
Power/Management (part of ``ironic-staging-drivers``).
|
||||
Can be used for bigger CI environments, where it is has better
|
||||
performance than ironic's SSH driver.
|
||||
|
||||
Ansible-deploy options
|
||||
----------------------
|
||||
|
||||
Configuration file
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Driver options are configured in ``[ansible]`` section of ironic
|
||||
configuration file.
|
||||
|
||||
use_ramdisk_callback
|
||||
Whether to expect the callback from the deploy ramdisk when it is
|
||||
ready to accept command or use passive polling for running SSH daemon
|
||||
on the node as part of running playbooks.
|
||||
Note that setting it to False *requires* Neutron to resolve the IP
|
||||
of the node for Ansible to attempt connection to, and thus is not
|
||||
suitable for standalone deployment.
|
||||
Default is True.
|
||||
|
||||
verbosity
|
||||
None, 0-4. Corresponds to number of 'v's passed to ``ansible-playbook``.
|
||||
Default (None) will pass 'vvvv' when global debug is enabled in ironic,
|
||||
and nothing otherwise.
|
||||
|
||||
ansible_playbook_script
|
||||
Full path to the ``ansible-playbook`` script. Useful mostly for
|
||||
testing environments when you e.g. run Ansible from source instead
|
||||
of installing it.
|
||||
Default (None) will search in ``$PATH`` of the user running
|
||||
ironic-conductor service.
|
||||
|
||||
playbooks_path
|
||||
Path to folder that contains all the Ansible-related files
|
||||
(Ansible inventory, deployment/cleaning playbooks, roles etc).
|
||||
Default is to use the playbooks provided with ``ironic-staging-drivers``
|
||||
from where it is installed.
|
||||
|
||||
config_file_path
|
||||
Path to Ansible's config file. When set to None will use global system
|
||||
default (usually ``/etc/ansible/ansible.cfg``).
|
||||
Default is ``playbooks_path``/ansible.cfg
|
||||
|
||||
ansible_extra_args
|
||||
Extra arguments to pass to ``ansible-playbook`` on each invocation.
|
||||
Default is None.
|
||||
|
||||
extra_memory
|
||||
Memory overhead (in MiB) for the Ansible-related processes
|
||||
in the deploy ramdisk.
|
||||
Affects decision if the downloaded user image will fit into RAM
|
||||
of the node.
|
||||
Default is 10.
|
||||
|
||||
post_deploy_get_power_state_retries
|
||||
Number of times to retry getting power state to check if
|
||||
bare metal node has been powered off after a soft poweroff.
|
||||
Default is 6.
|
||||
|
||||
post_deploy_get_power_state_retry_interval
|
||||
Amount of time (in seconds) to wait between polling power state
|
||||
after triggering soft poweroff.
|
||||
Default is 5.
|
||||
|
||||
image_store_insecure
|
||||
Boolean to disable validation of server SSL certificate of
|
||||
the image store when downloading image and configdrive.
|
||||
Default is False.
|
||||
|
||||
image_store_cafile
|
||||
Path to custom PEM CA bundle to use for validation of server SSL
|
||||
certificate of the image store when downloading image of configdrive.
|
||||
Is not currently used by default playbooks included with the driver.
|
||||
|
||||
image_store_certfile
|
||||
Path to client certificate file to use for client SSL authentication
|
||||
to the image store when downloading image of configdrive.
|
||||
Is not currently used by default playbooks included with the driver.
|
||||
|
||||
image_store_keyfile
|
||||
Path to private key file to use for client SSL authentication
|
||||
to the image store when downloading image of configdrive.
|
||||
Is not currently used by default playbooks included with the driver.
|
||||
|
||||
Driver properties for the Node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Set them per-node via:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
ironic node-update <node> <op> driver_info/<key>=<value>
|
||||
|
||||
or:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
openstack baremetal node set <node> --driver-info <key>=<value>
|
||||
|
||||
|
||||
ansible_deploy_username
|
||||
User name to use for Ansible to access the node (default is ``ansible``).
|
||||
|
||||
ansible_deploy_key_file
|
||||
Private SSH key used to access the node. If none is provided (default),
|
||||
Ansible will use the default SSH keys configured for the user running
|
||||
ironic-conductor service.
|
||||
Also note, that for private keys with password, these must be pre-loaded
|
||||
into ``ssh-agent``.
|
||||
|
||||
ansible_deploy_playbook
|
||||
Name of the playbook file inside the ``playbooks_path`` folder
|
||||
to use when deploying this node.
|
||||
Default is ``deploy.yaml``.
|
||||
|
||||
ansible_shutdown_playbook
|
||||
Name of the playbook file inside the ``playbooks_path`` folder
|
||||
to use to gracefully shutdown the node in-band.
|
||||
Default is ``shutdown.yaml``.
|
||||
|
||||
ansible_clean_playbook
|
||||
Name of the playbook file inside the ``playbooks_path`` folder
|
||||
to use when cleaning the node.
|
||||
Default is ``clean.yaml``.
|
||||
|
||||
ansible_clean_steps_config
|
||||
Name of the YAML file inside the ``playbooks_path`` folder
|
||||
that holds description of cleaning steps used by this node,
|
||||
and defines playbook tags in ``ansible_clean_playbook`` file
|
||||
corresponding to each cleaning step.
|
||||
Default is ``clean_steps.yaml``.
|
||||
|
||||
|
||||
Customizing the deployment logic
|
||||
================================
|
||||
|
||||
|
||||
Expected playbooks directory layout
|
||||
-----------------------------------
|
||||
|
||||
The ``playbooks_path`` configured in the ironic config is expected
|
||||
to have a standard layout for an Ansible project with some additions::
|
||||
|
||||
<playbooks_path>
|
||||
|
|
||||
\_ inventory
|
||||
\_ add-ironic-nodes.yaml
|
||||
\_ roles
|
||||
\_ role1
|
||||
\_ role2
|
||||
\_ ...
|
||||
|
|
||||
\_callback_plugins
|
||||
\_ ...
|
||||
|
|
||||
\_ library
|
||||
\_ ...
|
||||
|
||||
|
||||
The extra files relied by this driver are:
|
||||
|
||||
inventory
|
||||
Ansible inventory file containing a single entry of
|
||||
``conductor ansible_connection=local``.
|
||||
This basically defines an alias to ``localhost``.
|
||||
Its purpose is to make logging for tasks performed by Ansible locally and
|
||||
referencing the localhost in playbooks more intuitive.
|
||||
This also suppresses warnings produced by Ansible about ``hosts`` file
|
||||
being empty.
|
||||
|
||||
add-ironic-nodes.yaml
|
||||
This file contains an Ansible play that populates in-memory Ansible
|
||||
inventory with access info received from the ansible-deploy driver,
|
||||
as well as some per-node variables.
|
||||
Include it in all your custom playbooks as the first play.
|
||||
|
||||
The default ``deploy.yaml`` playbook is using several smaller roles that
|
||||
correspond to particular stages of deployment process:
|
||||
|
||||
- ``discover`` - e.g. set root device and image target
|
||||
- ``prepare`` - if needed, prepare system, for example create partitions
|
||||
- ``deploy`` - download/convert/write user image and configdrive
|
||||
- ``configure`` - post-deployment steps, e.g. installing the bootloader
|
||||
|
||||
Some more included roles are:
|
||||
|
||||
- ``wait`` - used when the driver is configured to not use callback from
|
||||
node to start the deployment. This role waits for OpenSSH server to
|
||||
become available on the node to connect to.
|
||||
- ``shutdown`` - used to gracefully power the node off in-band
|
||||
- ``clean`` - defines cleaning procedure, with each clean step defined
|
||||
as separate playbook tag.
|
||||
|
||||
Extending playbooks
|
||||
-------------------
|
||||
|
||||
Most probably you'd start experimenting like this:
|
||||
|
||||
#. Create a copy of ``deploy.yaml`` playbook, name it distinctively.
|
||||
#. Create Ansible roles with your customized logic in ``roles`` folder.
|
||||
|
||||
A. In your custom deploy playbook, replace the ``prepare`` role
|
||||
with your own one that defines steps to be run
|
||||
*before* image download/writing.
|
||||
This is a good place to set facts overriding those provided/omitted
|
||||
by the driver, like ``ironic_partitions`` or ``ironic_root_device``,
|
||||
and create custom partitions or (software) RAIDs.
|
||||
B. In your custom deploy playbook, replace the ``configure`` role
|
||||
with your own one that defines steps to be run
|
||||
*after* image is written to disk.
|
||||
This is a good place for example to configure the bootloader and
|
||||
add kernel options to avoid additional reboots.
|
||||
|
||||
#. Assign the custom deploy playbook you've created to the node's
|
||||
``driver_info/ansible_deploy_playbook`` field.
|
||||
#. Run deployment.
|
||||
|
||||
A. No ironic-conductor restart is necessary.
|
||||
B. A new deploy ramdisk must be built and assigned to nodes only when
|
||||
you want to use a command/script/package not present in the current
|
||||
deploy ramdisk and you can not or do not want
|
||||
to install those at runtime.
|
||||
|
||||
Variables you have access to
|
||||
----------------------------
|
||||
|
||||
This driver will pass the single JSON-ified extra var argument to
|
||||
Ansible (as ``ansible-playbook -e ..``).
|
||||
Those values are then accessible in your plays as well
|
||||
(some of them are optional and might not be defined):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
||||
ironic:
|
||||
nodes:
|
||||
- ip: <IPADDRESS>
|
||||
name: <NODE_UUID>
|
||||
user: <USER ANSIBLE WILL USE>
|
||||
extra: <COPY OF NODE's EXTRA FIELD>
|
||||
image:
|
||||
url: <URL TO FETCH THE USER IMAGE FROM>
|
||||
disk_format: <qcow2|raw|...>
|
||||
container_format: <bare|...>
|
||||
checksum: <hash-algo:hashstring>
|
||||
mem_req: <REQUIRED FREE MEMORY TO DOWNLOAD IMAGE TO RAM>
|
||||
tags: <LIST OF IMAGE TAGS AS DEFINED IN GLANCE>
|
||||
properties: <DICT OF IMAGE PROPERTIES AS DEFINED IN GLANCE>
|
||||
configdrive:
|
||||
type: <url|file>
|
||||
location: <URL OR PATH ON CONDUCTOR>
|
||||
partition_info:
|
||||
label: <msdos|gpt>
|
||||
preserve_ephemeral: <bool>
|
||||
ephemeral_format: <FILESYSTEM TO CREATE ON EPHEMERAL PARTITION>
|
||||
partitions: <LIST OF PARTITIONS IN FORMAT EXPECTED BY PARTED MODULE>
|
||||
|
||||
|
||||
Some more explanations:
|
||||
|
||||
``ironic.nodes``
|
||||
List of dictionaries (currently of only one element) that will be used by
|
||||
``add-ironic-nodes.yaml`` play to populate in-memory inventory.
|
||||
It also contains a copy of node's ``extra`` field so you can access it in
|
||||
the playbooks. The Ansible's host is set to node's UUID.
|
||||
|
||||
``ironic.image``
|
||||
All fields of node's ``instance_info`` that start with ``image_`` are
|
||||
passed inside this variable. Some extra notes and fields:
|
||||
|
||||
- ``mem_req`` is calculated from image size (if available) and config
|
||||
option ``[ansible]extra_memory``.
|
||||
- if ``checksum`` initially does not start with ``hash-algo:``, hashing
|
||||
algorithm is assumed to be ``md5`` (default in Glance).
|
||||
- ``validate_certs`` - boolean (``yes/no``) flag that turns validating
|
||||
image store SSL certificate on or off (default is 'yes').
|
||||
Governed by ``[ansible]image_store_insecure`` option
|
||||
in ironic configuration file.
|
||||
- ``cafile`` - custom CA bundle to use for validating image store
|
||||
SSL certificate.
|
||||
Takes value of ``[ansible]image_store_cafile`` if that is defined.
|
||||
Currently is not used by default playbooks, as Ansible has no way to
|
||||
specify the custom CA bundle to use for single HTTPS actions,
|
||||
however you can use this value in your custom playbooks to for example
|
||||
upload and register this CA in the ramdisk at deploy time.
|
||||
- ``client_cert`` - cert file for client-side SSL authentication.
|
||||
Takes value of ``[ansible]image_store_certfile`` option if defined.
|
||||
Currently is not used by default playbooks as it is generally available
|
||||
since Ansible 2.4 only,
|
||||
however you can use this value in your custom playbooks.
|
||||
- ``client_key`` - private key file for client-side SSL authentication.
|
||||
Takes value of ``[ansible]image_store_keyfile`` option if defined.
|
||||
Currently is not used by default playbooks as it is generally available
|
||||
since Ansible 2.4 only,
|
||||
however you can use this value in your custom playbooks.
|
||||
|
||||
``ironic.partiton_info.partitions``
|
||||
Optional. List of dictionaries defining partitions to create on the node
|
||||
in the form:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
partitions:
|
||||
- name: <NAME OF PARTITION>
|
||||
unit: <UNITS FOR SIZE>
|
||||
size: <SIZE OF THE PARTITION>
|
||||
type: <primary|extended|logical>
|
||||
align: <ONE OF PARTED_SUPPORTED OPTIONS>
|
||||
format: <PARTITION TYPE TO SET>
|
||||
flags:
|
||||
flag_name: <bool>
|
||||
|
||||
The driver will populate this list from ``root_gb``, ``swap_mb`` and
|
||||
``ephemeral_gb`` fields of ``instance_info``.
|
||||
The driver will also prepend the ``bios_grub``-labeled partition
|
||||
when deploying on GPT-labeled disk,
|
||||
and pre-create a 64MiB partiton for configdrive if it is set in
|
||||
``instance_info``.
|
||||
|
||||
Please read the documentation included in the ``ironic_parted`` module's
|
||||
source for more info on the module and its arguments.
|
||||
|
||||
``ironic.partiton_info.ephemeral_format``
|
||||
Optional. Taken from ``instance_info``, it defines file system to be
|
||||
created on the ephemeral partition.
|
||||
Defaults to the value of ``[pxe]default_ephemeral_format`` option
|
||||
in ironic configuration file.
|
||||
|
||||
``ironic.partiton_info.preserve_ephemeral``
|
||||
Optional. Taken from the ``instance_info``, it specifies if the ephemeral
|
||||
partition must be preserved or rebuilt. Defaults to ``no``.
|
||||
|
||||
As usual for Ansible playbooks, you also have access to standard
|
||||
Ansible facts discovered by ``setup`` module.
|
||||
|
||||
Included custom Ansible modules
|
||||
-------------------------------
|
||||
|
||||
The provided ``playbooks_path/library`` folder includes several custom
|
||||
Ansible modules used by default implementation of ``deploy`` and
|
||||
``prepare`` roles.
|
||||
You can use these modules in your playbooks as well.
|
||||
|
||||
``stream_url``
|
||||
Streaming download from HTTP(S) source to the disk device directly,
|
||||
tries to be compatible with Ansible's ``get_url`` module in terms of
|
||||
module arguments.
|
||||
Due to the low level of such operation it is not idempotent.
|
||||
|
||||
``ironic_parted``
|
||||
creates partition tables and partitions with ``parted`` utility.
|
||||
Due to the low level of such operation it is not idempotent.
|
||||
Please read the documentation included in the module's source
|
||||
for more information about this module and its arguments.
|
||||
The name is chosen so that the ``parted`` module included in Ansible 2.3
|
||||
is not shadowed.
|
||||
|
||||
.. _Ironic Python Agent: http://docs.openstack.org/developer/ironic-python-agent
|
@ -1,22 +0,0 @@
|
||||
==============
|
||||
ironic-ansible
|
||||
==============
|
||||
|
||||
Builds a ramdisk for Ironic Ansible deploy driver.
|
||||
|
||||
This element is based on the following elements:
|
||||
|
||||
- ``devuser`` to create and configure a user for Ansible to access the node
|
||||
- ``ironic-agent`` to provide Ironic API lookup and heartbeats via IPA
|
||||
|
||||
Consult docs for those elements for available options.
|
||||
|
||||
Additionally this element:
|
||||
|
||||
- ensures OpenSSH is installed and configured properly
|
||||
- correctly sets hostname to avoid some Ansible problems with elevation
|
||||
|
||||
Note: compared to ``devuser`` element, this element **always** gives
|
||||
the configured user password-less sudo permissions (*unconfigurable*).
|
||||
|
||||
Requires Ironic API >= 1.22.
|
@ -1,3 +0,0 @@
|
||||
ironic-agent
|
||||
devuser
|
||||
openssh-server
|
@ -1 +0,0 @@
|
||||
ironic-ansible-deploy
|
@ -1 +0,0 @@
|
||||
export DIB_DEV_USER_PWDLESS_SUDO="yes"
|
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
|
||||
set -x
|
||||
fi
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
ANSIBLE_DEPLOY_HOSTAME="ironic-ansible-deploy"
|
||||
|
||||
echo $ANSIBLE_DEPLOY_HOSTAME > /etc/hostname
|
||||
|
||||
# not having a hostname in hosts produces an extra output
|
||||
# on every "sudo" command like the following:
|
||||
#
|
||||
# sudo: unable to resolve host <HOSTNAME>\r\n
|
||||
#
|
||||
# which as of Ansible 2.0.1.0 fails JSON parsing
|
||||
# in case of tasks using become+async.
|
||||
# Ansible issues #13965 (fixed in 2.0.1.0), #14568, #14714
|
||||
|
||||
# ensure /etc/hosts has hostname in it
|
||||
echo "127.0.0.1 $ANSIBLE_DEPLOY_HOSTAME" >> /etc/hosts
|
@ -1 +0,0 @@
|
||||
fedora
|
@ -1 +0,0 @@
|
||||
ramdisk
|
@ -1,2 +0,0 @@
|
||||
# Pin to this mirror because the roundrobin is fairly unreliable
|
||||
export DIB_DISTRIBUTION_MIRROR=http://dl.fedoraproject.org/pub/fedora/linux
|
5
imagebuild/tinyipa-ansible/.gitignore
vendored
5
imagebuild/tinyipa-ansible/.gitignore
vendored
@ -1,5 +0,0 @@
|
||||
build_files/cache
|
||||
rebuild/
|
||||
*.gz
|
||||
*.initramfs
|
||||
*.vmlinuz
|
@ -1,13 +0,0 @@
|
||||
.PHONY: all dependencies rebuild clean
|
||||
all: dependencies rebuild
|
||||
|
||||
dependencies:
|
||||
./install-deps.sh
|
||||
rebuild:
|
||||
./rebuild-tinyipa.sh
|
||||
clean:
|
||||
sudo -v
|
||||
sudo rm -rf rebuild
|
||||
rm -f *.initramfs
|
||||
rm -f *.gz
|
||||
rm -rf build_files/cache/*
|
@ -1,87 +0,0 @@
|
||||
###################################################
|
||||
TinyIPA image compatible with Ansible-deploy driver
|
||||
###################################################
|
||||
|
||||
It is possible to rebuild the pre-built tinyipa ramdisk available from
|
||||
http://tarballs.openstack.org/ironic-python-agent/tinyipa
|
||||
to make it usable with Ansible-deploy driver.
|
||||
|
||||
Rebuilding TinyIPA
|
||||
==================
|
||||
|
||||
#. Run the provided ``rebuild-tinyipa.sh`` script,
|
||||
set environment variables as explained in `Build options`_.
|
||||
|
||||
#. Running this script will create a rebuilt ramdisk as
|
||||
``ansible-<original-tinyipa-ramdisk-name>``.
|
||||
That file must be uploaded to Glance as ARI image.
|
||||
|
||||
* If tinyipa kernel is not in Glance yet, an appropriate version can be
|
||||
downloaded from tarballs.openstack.org and
|
||||
uploaded to Glance as AKI image.
|
||||
|
||||
#. Update nodes that use ``*_ansible`` driver:
|
||||
|
||||
* Assign ramdisk uploaded in the previous step as
|
||||
``driver_info/deploy_ramdisk``.
|
||||
|
||||
* The kernel image created during TinyIPA build
|
||||
(``tinyipa[-branch_name].vmlinuz``) should be used as
|
||||
``driver_info/deploy_kernel`` if not set yet.
|
||||
|
||||
* Set ``tc`` as ``driver_info/ansible_deploy_user``.
|
||||
|
||||
+ If you have used a custom ``SSH_PUBLIC_KEY`` specify it as
|
||||
``driver_info/ansible_deploy_key_file``
|
||||
|
||||
* Ensure that the private SSH key file has correct ``600`` or ``400``
|
||||
exclusive permissions for the user running the ironic-conductor process.
|
||||
|
||||
#. You can also assign the ramdisk created to other nodes that use
|
||||
``IPA``-based ramdisks as ``driver_info/deploy_ramdisk`` to have a
|
||||
unified deploy image for all nodes.
|
||||
It should work for them the same as original tinyipa ramdisk.
|
||||
|
||||
Build options
|
||||
-------------
|
||||
|
||||
#. If rebuilding an existing tinyipa ramdisk file, set the
|
||||
``TINYIPA_RAMDISK_FILE`` environment variable to absolute path to
|
||||
this file before running this script::
|
||||
|
||||
export TINYIPA_RAMDISK_FILE=</full/path/to/tinyipa-ramdisk-file>
|
||||
|
||||
#. When not provided with existing file, this script will rebuild the
|
||||
tinyipa master branch build.
|
||||
To use a stable branch, set ``BRANCH_PATH`` environment variable
|
||||
(``master`` by default) before running the rebuild script accordingly.
|
||||
Branch names for stable releases must be in the form ``stable-<release>``,
|
||||
for example::
|
||||
|
||||
export BRANCH_PATH=stable-newton
|
||||
|
||||
Consult https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/
|
||||
for currently available versions.
|
||||
|
||||
#. By default, the script will bake ``id_rsa`` or ``id_dsa`` public SSH keys
|
||||
of the user running the build into the ramdisk as authorized_keys for
|
||||
``tc`` user.
|
||||
To provide a custom key, set absolute path to it as ``SSH_PUBLIC_KEY``
|
||||
environment variable before running this script::
|
||||
|
||||
export SSH_PUBLIC_KEY=<path-to-public-ssh-key>
|
||||
|
||||
Using Makefile
|
||||
--------------
|
||||
|
||||
For simplified configuration, a Makefile is provided to use ``make`` for
|
||||
some standard operations.
|
||||
|
||||
make
|
||||
will install required dependencies and run the ``rebuild-tinyipa`` script
|
||||
without arguments, downloading and rebuilding the image available at
|
||||
https://tarballs.openstack.org
|
||||
All customizations through environment variables still apply.
|
||||
|
||||
make clean
|
||||
will cleanup temporary files and images created during build
|
@ -1,104 +0,0 @@
|
||||
#!/bin/sh
|
||||
S="Linux"
|
||||
N="box"
|
||||
R="4.2.9-tinycore64"
|
||||
P="unknown"
|
||||
V="#777 SMP (2016-02-29)"
|
||||
M="x86_64"
|
||||
I="unknown"
|
||||
O="GNU/Linux"
|
||||
|
||||
OPT_A=false
|
||||
OPT_S=false
|
||||
OPT_N=false
|
||||
OPT_R=false
|
||||
OPT_P=false
|
||||
OPT_V=false
|
||||
OPT_M=false
|
||||
OPT_I=false
|
||||
OPT_O=false
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "-ASNRPVMIO"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
-a)
|
||||
OPT_A=true
|
||||
shift
|
||||
;;
|
||||
-s)
|
||||
OPT_S=true
|
||||
shift
|
||||
;;
|
||||
-n)
|
||||
OPT_N=true
|
||||
shift
|
||||
;;
|
||||
-r)
|
||||
OPT_R=true
|
||||
shift
|
||||
;;
|
||||
-p)
|
||||
OPT_P=true
|
||||
shift
|
||||
;;
|
||||
-v)
|
||||
OPT_V=true
|
||||
shift
|
||||
;;
|
||||
-m)
|
||||
OPT_M=true
|
||||
shift
|
||||
;;
|
||||
-i)
|
||||
OPT_I=true
|
||||
shift
|
||||
;;
|
||||
-o)
|
||||
OPT_O=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
if [ ! -z "$1" ]; then
|
||||
echo "uname -asnrpvmio"
|
||||
exit 1
|
||||
fi
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $OPT_A; then
|
||||
echo "$S $N $R $V $M $O"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
string=''
|
||||
if $OPT_S; then
|
||||
string="$string $S"
|
||||
fi
|
||||
if $OPT_N; then
|
||||
string="$string $N"
|
||||
fi
|
||||
if $OPT_R; then
|
||||
string="$string $R"
|
||||
fi
|
||||
if $OPT_P; then
|
||||
string="$string $P"
|
||||
fi
|
||||
if $OPT_V; then
|
||||
string="$string $V"
|
||||
fi
|
||||
if $OPT_M; then
|
||||
string="$string $M"
|
||||
fi
|
||||
if $OPT_I; then
|
||||
string="$string $I"
|
||||
fi
|
||||
if $OPT_O; then
|
||||
string="$string $O"
|
||||
fi
|
||||
echo $string
|
@ -1,53 +0,0 @@
|
||||
|
||||
#NOTE(pas-ha)
|
||||
# The first URL is the official TC repo,
|
||||
# the rest of the list is taken from
|
||||
# http://wiki.tinycorelinux.net/wiki:mirrors
|
||||
# as of time of this writing.
|
||||
# Only HTTP mirrors were considered with the following ordering
|
||||
# - those that were unavailable are moved to the bottom of the list
|
||||
# - those that already responded with 404 are moved to the very bottom
|
||||
|
||||
# List generated on 12-Dec-2016
|
||||
TC_MIRRORS="http://repo.tinycorelinux.net
|
||||
http://distro.ibiblio.org/tinycorelinux
|
||||
http://mirror.cedia.org.ec/tinycorelinux
|
||||
http://mirror.epn.edu.ec/tinycorelinux
|
||||
http://mirrors.163.com/tinycorelinux
|
||||
http://kambing.ui.ac.id/tinycorelinux
|
||||
http://ftp.nluug.nl/os/Linux/distr/tinycorelinux
|
||||
http://ftp.vim.org/os/Linux/distr/tinycorelinux
|
||||
http://www.gtlib.gatech.edu/pub/tinycore
|
||||
http://tinycore.mirror.uber.com.au
|
||||
http://l4u-00.jinr.ru/LinuxArchive/Ftp/tinycorelinux"
|
||||
|
||||
function probe_url {
|
||||
wget -q --spider --tries 1 --timeout 10 "$1" 2>&1
|
||||
}
|
||||
|
||||
function choose_tc_mirror {
|
||||
if [ -z ${TINYCORE_MIRROR_URL} ]; then
|
||||
for url in ${TC_MIRRORS}; do
|
||||
echo "Checking Tiny Core Linux mirror ${url}"
|
||||
if probe_url ${url} ; then
|
||||
echo "Check succeeded: ${url} is responding."
|
||||
TINYCORE_MIRROR_URL=${url}
|
||||
break
|
||||
else
|
||||
echo "Check failed: ${url} is not responding"
|
||||
fi
|
||||
done
|
||||
if [ -z ${TINYCORE_MIRROR_URL} ]; then
|
||||
echo "Failed to find working Tiny Core Linux mirror"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Probing provided Tiny Core Linux mirror ${TINYCORE_MIRROR_URL}"
|
||||
if probe_url ${TINYCORE_MIRROR_URL} ; then
|
||||
echo "Check succeeded: ${TINYCORE_MIRROR_URL} is responding."
|
||||
else
|
||||
echo "Check failed: ${TINYCORE_MIRROR_URL} is not responding"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
PACKAGES="wget unzip sudo"
|
||||
|
||||
echo "Installing dependencies:"
|
||||
|
||||
if [ -x "/usr/bin/apt-get" ]; then
|
||||
sudo -E apt-get update
|
||||
sudo -E apt-get install -y $PACKAGES
|
||||
elif [ -x "/usr/bin/dnf" ]; then
|
||||
sudo -E dnf install -y $PACKAGES
|
||||
elif [ -x "/usr/bin/yum" ]; then
|
||||
sudo -E yum install -y $PACKAGES
|
||||
else
|
||||
echo "No supported package manager installed on system. Supported: apt, yum, dnf"
|
||||
exit 1
|
||||
fi
|
@ -1,223 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Rebuild upstream pre-built tinyipa it to be usable with ansible-deploy.
|
||||
#
|
||||
# Downloads the pre-built tinyipa ramdisk from tarballs.openstack.org or
|
||||
# rebuilds a ramdisk under path provided as first script argument
|
||||
|
||||
# During rebuild this script installs and configures OpenSSH server and
|
||||
# makes required changes for Ansible + Python to work in compiled/optimized
|
||||
# Python environment.
|
||||
#
|
||||
# By default, id_rsa or id_dsa keys of the user performing the build
|
||||
# are baked into the image as authorized_keys for 'tc' user.
|
||||
# To supply different public ssh key, befor running this script set
|
||||
# SSH_PUBLIC_KEY environment variable to point to absolute path to the key.
|
||||
#
|
||||
# This script produces "ansible-<tinyipa-ramdisk-name>" ramdisk that can serve
|
||||
# as ramdisk for both ansible-deploy driver and agent-based Ironic drivers,
|
||||
|
||||
set -ex
|
||||
WORKDIR=$(readlink -f $0 | xargs dirname)
|
||||
SSH_PUBLIC_KEY=${SSH_PUBLIC_KEY:-}
|
||||
source ${WORKDIR}/build_files/tc-mirror.sh
|
||||
TINYCORE_MIRROR_URL=${TINYCORE_MIRROR_URL:-}
|
||||
BRANCH_PATH=${BRANCH_PATH:-master}
|
||||
TINYIPA_RAMDISK_FILE=${TINYIPA_RAMDISK_FILE:-}
|
||||
|
||||
TC=1001
|
||||
STAFF=50
|
||||
|
||||
REBUILDDIR="$WORKDIR/rebuild"
|
||||
CHROOT_PATH="/tmp/overides:/usr/local/sbin:/usr/local/bin:/apps/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
CHROOT_CMD="sudo chroot $REBUILDDIR /usr/bin/env -i PATH=$CHROOT_PATH http_proxy=$http_proxy https_proxy=$https_proxy no_proxy=$no_proxy"
|
||||
TC_CHROOT_CMD="sudo chroot --userspec=$TC:$STAFF $REBUILDDIR /usr/bin/env -i PATH=$CHROOT_PATH http_proxy=$http_proxy https_proxy=$https_proxy no_proxy=$no_proxy"
|
||||
|
||||
function validate_params {
|
||||
echo "Validating location of public SSH key"
|
||||
if [ -n "$SSH_PUBLIC_KEY" ]; then
|
||||
if [ -r "$SSH_PUBLIC_KEY" ]; then
|
||||
_found_ssh_key="$SSH_PUBLIC_KEY"
|
||||
fi
|
||||
else
|
||||
for fmt in rsa dsa; do
|
||||
if [ -r "$HOME/.ssh/id_$fmt.pub" ]; then
|
||||
_found_ssh_key="$HOME/.ssh/id_$fmt.pub"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z $_found_ssh_key ]; then
|
||||
echo "Failed to find neither provided nor default SSH key"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
choose_tc_mirror
|
||||
}
|
||||
|
||||
function get_tinyipa {
|
||||
if [ -z $TINYIPA_RAMDISK_FILE ]; then
|
||||
mkdir -p $WORKDIR/build_files/cache
|
||||
cd $WORKDIR/build_files/cache
|
||||
wget -N https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${BRANCH_PATH}.gz
|
||||
TINYIPA_RAMDISK_FILE="$WORKDIR/build_files/cache/tinyipa-${BRANCH_PATH}.gz"
|
||||
fi
|
||||
}
|
||||
|
||||
function unpack_ramdisk {
|
||||
|
||||
if [ -d "$REBUILDDIR" ]; then
|
||||
sudo rm -rf "$REBUILDDIR"
|
||||
fi
|
||||
|
||||
mkdir -p "$REBUILDDIR"
|
||||
|
||||
# Extract rootfs from .gz file
|
||||
( cd "$REBUILDDIR" && zcat "$TINYIPA_RAMDISK_FILE" | sudo cpio -i -H newc -d )
|
||||
|
||||
}
|
||||
|
||||
function prepare_chroot {
|
||||
sudo cp $REBUILDDIR/etc/resolv.conf $REBUILDDIR/etc/resolv.conf.old
|
||||
sudo cp /etc/resolv.conf $REBUILDDIR/etc/resolv.conf
|
||||
|
||||
sudo cp -a $REBUILDDIR/opt/tcemirror $REBUILDDIR/opt/tcemirror.old
|
||||
sudo sh -c "echo $TINYCORE_MIRROR_URL > $REBUILDDIR/opt/tcemirror"
|
||||
|
||||
mkdir -p $REBUILDDIR/tmp/builtin/optional
|
||||
$CHROOT_CMD chown -R tc.staff /tmp/builtin
|
||||
$CHROOT_CMD chmod -R a+w /tmp/builtin
|
||||
$CHROOT_CMD ln -sf /tmp/builtin /etc/sysconfig/tcedir
|
||||
echo "tc" | $CHROOT_CMD tee -a /etc/sysconfig/tcuser
|
||||
$CHROOT_CMD mkdir -p /usr/local/tce.installed
|
||||
$CHROOT_CMD chmod 777 /usr/local/tce.installed
|
||||
|
||||
mkdir -p $REBUILDDIR/tmp/overides
|
||||
sudo cp -f $WORKDIR/build_files/fakeuname $REBUILDDIR/tmp/overides/uname
|
||||
|
||||
trap "sudo umount $REBUILDDIR/proc" EXIT
|
||||
# Mount /proc for chroot commands
|
||||
sudo mount --bind /proc "$REBUILDDIR/proc"
|
||||
}
|
||||
|
||||
function clean_up_chroot {
|
||||
# Unmount /proc and clean up everything
|
||||
sudo umount $REBUILDDIR/proc
|
||||
# all went well, remove the trap
|
||||
trap - EXIT
|
||||
sudo rm $REBUILDDIR/etc/sysconfig/tcuser
|
||||
sudo rm $REBUILDDIR/etc/sysconfig/tcedir
|
||||
sudo rm -rf $REBUILDDIR/usr/local/tce.installed
|
||||
sudo rm -rf $REBUILDDIR/tmp/builtin
|
||||
sudo rm -rf $REBUILDDIR/tmp/tcloop
|
||||
sudo rm -rf $REBUILDDIR/tmp/overides
|
||||
sudo mv $REBUILDDIR/opt/tcemirror.old $REBUILDDIR/opt/tcemirror
|
||||
sudo mv $REBUILDDIR/etc/resolv.conf.old $REBUILDDIR/etc/resolv.conf
|
||||
}
|
||||
|
||||
function install_ssh {
|
||||
if [ ! -f "$REBUILDDIR/usr/local/etc/ssh/sshd_config" ]; then
|
||||
# tinyipa was built without SSH server installed
|
||||
# Install and configure bare minimum for SSH access
|
||||
$TC_CHROOT_CMD tce-load -wic openssh
|
||||
# Configure OpenSSH
|
||||
$CHROOT_CMD cp /usr/local/etc/ssh/sshd_config.orig /usr/local/etc/ssh/sshd_config
|
||||
echo "PasswordAuthentication no" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
|
||||
# Generate and configure host keys - RSA, DSA, Ed25519
|
||||
# NOTE(pas-ha) ECDSA host key will still be re-generated fresh on every image boot
|
||||
$CHROOT_CMD ssh-keygen -q -t rsa -N "" -f /usr/local/etc/ssh/ssh_host_rsa_key
|
||||
$CHROOT_CMD ssh-keygen -q -t dsa -N "" -f /usr/local/etc/ssh/ssh_host_dsa_key
|
||||
$CHROOT_CMD ssh-keygen -q -t ed25519 -N "" -f /usr/local/etc/ssh/ssh_host_ed25519_key
|
||||
echo "HostKey /usr/local/etc/ssh/ssh_host_rsa_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
|
||||
echo "HostKey /usr/local/etc/ssh/ssh_host_dsa_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
|
||||
echo "HostKey /usr/local/etc/ssh/ssh_host_ed25519_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
|
||||
fi
|
||||
|
||||
# setup new user SSH keys anyway
|
||||
$CHROOT_CMD mkdir -p /home/tc
|
||||
$CHROOT_CMD chown -R tc.staff /home/tc
|
||||
$TC_CHROOT_CMD mkdir -p /home/tc/.ssh
|
||||
cat $_found_ssh_key | $TC_CHROOT_CMD tee /home/tc/.ssh/authorized_keys
|
||||
$CHROOT_CMD chown tc.staff /home/tc/.ssh/authorized_keys
|
||||
$TC_CHROOT_CMD chmod 600 /home/tc/.ssh/authorized_keys
|
||||
}
|
||||
|
||||
function install_packages {
|
||||
if [ -f "$WORKDIR/build_files/rebuildreqs.lst" ]; then
|
||||
while read line; do
|
||||
$TC_CHROOT_CMD tce-load -wic $line
|
||||
done < $WORKDIR/build_files/rebuildreqs.lst
|
||||
fi
|
||||
}
|
||||
|
||||
function fix_python_optimize {
|
||||
if grep -q "PYTHONOPTIMIZE=1" "$REBUILDDIR/opt/bootlocal.sh"; then
|
||||
# tinyipa was built with optimized Python environment, apply fixes
|
||||
echo "PYTHONOPTIMIZE=1" | $TC_CHROOT_CMD tee -a /home/tc/.ssh/environment
|
||||
echo "PermitUserEnvironment yes" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
|
||||
echo 'Defaults env_keep += "PYTHONOPTIMIZE"' | $CHROOT_CMD tee -a /etc/sudoers
|
||||
fi
|
||||
}
|
||||
|
||||
function make_symlinks {
|
||||
|
||||
set +x
|
||||
echo "Symlink all from /usr/local/sbin to /usr/sbin"
|
||||
cd "$REBUILDDIR/usr/local/sbin"
|
||||
for target in *
|
||||
do
|
||||
if [ ! -f "$REBUILDDIR/usr/sbin/$target" ]
|
||||
then
|
||||
$CHROOT_CMD ln -s "/usr/local/sbin/$target" "/usr/sbin/$target"
|
||||
fi
|
||||
done
|
||||
echo "Symlink all from /usr/local/bin to /usr/bin"
|
||||
# this also includes symlinking Python to the place expected by Ansible
|
||||
cd "$REBUILDDIR/usr/local/bin"
|
||||
for target in *
|
||||
do
|
||||
if [ ! -f "$REBUILDDIR/usr/bin/$target" ]
|
||||
then
|
||||
$CHROOT_CMD ln -s "/usr/local/bin/$target" "/usr/bin/$target"
|
||||
fi
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function rebuild_ramdisk {
|
||||
# Rebuild build directory into gz file
|
||||
ansible_basename="ansible-$(basename $TINYIPA_RAMDISK_FILE)"
|
||||
( cd "$REBUILDDIR" && sudo find | sudo cpio -o -H newc | gzip -9 > "$WORKDIR/${ansible_basename}" )
|
||||
# Output file created by this script and its size
|
||||
cd "$WORKDIR"
|
||||
echo "Produced files:"
|
||||
du -h "${ansible_basename}"
|
||||
}
|
||||
|
||||
sudo -v
|
||||
|
||||
validate_params
|
||||
get_tinyipa
|
||||
unpack_ramdisk
|
||||
prepare_chroot
|
||||
|
||||
# NOTE (pas-ha) default tinyipa is built without SSH access, enable it here
|
||||
install_ssh
|
||||
# NOTE (pas-ha) allow installing some extra pkgs by placing 'rebuildreqs.lst'
|
||||
# file in the 'build_files' folder
|
||||
install_packages
|
||||
# NOTE(pas-ha) default tinyipa is built with PYOPTIMIZE_TINYIPA=true and
|
||||
# for Ansible+python to work we need to ensure that PYTHONOPTIMIZE=1 is
|
||||
# set for all sessions from 'tc' user including those that are escalated
|
||||
# with 'sudo' afterwards
|
||||
fix_python_optimize
|
||||
# NOTE(pas-ha) Apparently on TinyCore Ansible's 'command' module is
|
||||
# not searching for executables in the '/usr/local/(s)bin' paths.
|
||||
# Thus we need to have everything from there symlinked to '/usr/(s)bin'
|
||||
# which is being searched, so that 'command' module picks full utilities
|
||||
# installed by 'util-linux' instead of built-in simplified BusyBox ones.
|
||||
make_symlinks
|
||||
|
||||
clean_up_chroot
|
||||
rebuild_ramdisk
|
@ -1,74 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers import ipmi
|
||||
from ironic.drivers.modules import fake
|
||||
from ironic.drivers.modules import ipmitool
|
||||
from ironic.drivers.modules import pxe
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ironic_staging_drivers.ansible import deploy as ansible_deploy
|
||||
from ironic_staging_drivers.libvirt import power as libvirt_power
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnsibleAndIPMIToolDriver(base.BaseDriver):
|
||||
"""Ansible + Ipmitool driver."""
|
||||
|
||||
def __init__(self):
|
||||
LOG.warning("This driver is deprecated and will be removed "
|
||||
"in the Rocky release. "
|
||||
"Use 'staging-ansible-ipmi' hardware type instead.")
|
||||
self.power = ipmitool.IPMIPower()
|
||||
self.boot = pxe.PXEBoot()
|
||||
self.deploy = ansible_deploy.AnsibleDeploy()
|
||||
self.management = ipmitool.IPMIManagement()
|
||||
self.vendor = ipmitool.VendorPassthru()
|
||||
|
||||
|
||||
class FakeAnsibleDriver(base.BaseDriver):
|
||||
"""Ansible + Fake driver"""
|
||||
|
||||
def __init__(self):
|
||||
self.power = fake.FakePower()
|
||||
self.boot = pxe.PXEBoot()
|
||||
self.deploy = ansible_deploy.AnsibleDeploy()
|
||||
self.management = fake.FakeManagement()
|
||||
|
||||
|
||||
class AnsibleAndLibvirtDriver(base.BaseDriver):
|
||||
"""Ansible + Libvirt driver.
|
||||
|
||||
NOTE: This driver is meant only for testing environments.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
LOG.warning("This driver is deprecated and will be removed "
|
||||
"in the Rocky release. "
|
||||
"Use 'staging-libvirt' hardware type instead.")
|
||||
self.power = libvirt_power.LibvirtPower()
|
||||
self.boot = pxe.PXEBoot()
|
||||
self.deploy = ansible_deploy.AnsibleDeploy()
|
||||
self.management = libvirt_power.LibvirtManagement()
|
||||
|
||||
|
||||
# NOTE(yuriyz): This class is not a "real" hardware.
|
||||
# Added to support the ansible deploy interface in 'ipmi' hardware
|
||||
class AnsibleDeployIPMI(ipmi.IPMIHardware):
|
||||
|
||||
@property
|
||||
def supported_deploy_interfaces(self):
|
||||
"""List of supported deploy interfaces."""
|
||||
return (super(AnsibleDeployIPMI, self).supported_deploy_interfaces +
|
||||
[ansible_deploy.AnsibleDeploy])
|
@ -1,776 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Ansible deploy driver
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
|
||||
from ironic_lib import metrics_utils
|
||||
from ironic_lib import utils as irlib_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import units
|
||||
import retrying
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import yaml
|
||||
|
||||
from ironic.common import dhcp_factory
|
||||
from ironic.common import exception
|
||||
from ironic.common.i18n import _
|
||||
from ironic.common import images
|
||||
from ironic.common import states
|
||||
from ironic.common import utils
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.conductor import utils as manager_utils
|
||||
from ironic.conf import CONF
|
||||
from ironic.drivers import base
|
||||
from ironic.drivers.modules import agent_base_vendor as agent_base
|
||||
from ironic.drivers.modules import deploy_utils
|
||||
|
||||
|
||||
ansible_opts = [
|
||||
cfg.StrOpt('ansible_extra_args',
|
||||
help=_('Extra arguments to pass on every '
|
||||
'invocation of Ansible.')),
|
||||
cfg.IntOpt('verbosity',
|
||||
min=0,
|
||||
max=4,
|
||||
help=_('Set ansible verbosity level requested when invoking '
|
||||
'"ansible-playbook" command. '
|
||||
'4 includes detailed SSH session logging. '
|
||||
'Default is 4 when global debug is enabled '
|
||||
'and 0 otherwise.')),
|
||||
cfg.StrOpt('ansible_playbook_script',
|
||||
default='ansible-playbook',
|
||||
help=_('Path to "ansible-playbook" script. '
|
||||
'Default will search the $PATH configured for user '
|
||||
'running ironic-conductor process. '
|
||||
'Provide the full path when ansible-playbook is not in '
|
||||
'$PATH or installed in not default location.')),
|
||||
cfg.StrOpt('playbooks_path',
|
||||
default=os.path.join(os.path.dirname(__file__), 'playbooks'),
|
||||
help=_('Path to directory with playbooks, roles and '
|
||||
'local inventory.')),
|
||||
cfg.StrOpt('config_file_path',
|
||||
default=os.path.join(
|
||||
os.path.dirname(__file__), 'playbooks', 'ansible.cfg'),
|
||||
help=_('Path to ansible configuration file. If set to empty, '
|
||||
'system default will be used.')),
|
||||
cfg.IntOpt('post_deploy_get_power_state_retries',
|
||||
min=0,
|
||||
default=6,
|
||||
help=_('Number of times to retry getting power state to check '
|
||||
'if bare metal node has been powered off after a soft '
|
||||
'power off.')),
|
||||
cfg.IntOpt('post_deploy_get_power_state_retry_interval',
|
||||
min=0,
|
||||
default=5,
|
||||
help=_('Amount of time (in seconds) to wait between polling '
|
||||
'power state after trigger soft poweroff.')),
|
||||
cfg.IntOpt('extra_memory',
|
||||
default=10,
|
||||
help=_('Extra amount of memory in MiB expected to be consumed '
|
||||
'by Ansible-related processes on the node. Affects '
|
||||
'decision whether image will fit into RAM.')),
|
||||
cfg.BoolOpt('use_ramdisk_callback',
|
||||
default=True,
|
||||
help=_('Use callback request from ramdisk for start deploy or '
|
||||
'cleaning. Disable it when using custom ramdisk '
|
||||
'without callback script. '
|
||||
'When callback is disabled, Neutron is mandatory.')),
|
||||
cfg.BoolOpt('image_store_insecure',
|
||||
default=False,
|
||||
help=_('Skip verifying SSL connections to the image store '
|
||||
'when downloading the image. '
|
||||
'Setting it to "True" is only recommended for testing '
|
||||
'environments that use self-signed certificates.')),
|
||||
cfg.StrOpt('image_store_cafile',
|
||||
help=_('Specific CA bundle to use for validating '
|
||||
'SSL connections to the image store. '
|
||||
'If not specified, CA available in the ramdisk '
|
||||
'will be used. '
|
||||
'Is not used by default playbooks included with '
|
||||
'the driver. '
|
||||
'Suitable for environments that use self-signed '
|
||||
'certificates.')),
|
||||
cfg.StrOpt('image_store_certfile',
|
||||
help=_('Client cert to use for SSL connections '
|
||||
'to image store. '
|
||||
'Is not used by default playbooks included with '
|
||||
'the driver. '
|
||||
'Can be used in custom playbooks and Ansible>=2.4.')),
|
||||
cfg.StrOpt('image_store_keyfile',
|
||||
help=_('Client key to use for SSL connections '
|
||||
'to image store. '
|
||||
'Is not used by default playbooks included with '
|
||||
'the driver. '
|
||||
'Can be used in custom playbooks and Ansible>=2.4.')),
|
||||
]
|
||||
|
||||
CONF.register_opts(ansible_opts, group='ansible')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
METRICS = metrics_utils.get_metrics_logger(__name__)
|
||||
|
||||
DEFAULT_PLAYBOOKS = {
|
||||
'deploy': 'deploy.yaml',
|
||||
'shutdown': 'shutdown.yaml',
|
||||
'clean': 'clean.yaml'
|
||||
}
|
||||
DEFAULT_CLEAN_STEPS = 'clean_steps.yaml'
|
||||
|
||||
OPTIONAL_PROPERTIES = {
|
||||
'ansible_deploy_username': _('Deploy ramdisk username for Ansible. '
|
||||
'This user must have passwordless sudo '
|
||||
'permissions. Default is "ansible". '
|
||||
'Optional.'),
|
||||
'ansible_deploy_key_file': _('Path to private key file. If not specified, '
|
||||
'default keys for user running '
|
||||
'ironic-conductor process will be used. '
|
||||
'Note that for keys with password, those '
|
||||
'must be pre-loaded into ssh-agent. '
|
||||
'Optional.'),
|
||||
'ansible_deploy_playbook': _('Name of the Ansible playbook used for '
|
||||
'deployment. Default is %s. Optional.'
|
||||
) % DEFAULT_PLAYBOOKS['deploy'],
|
||||
'ansible_shutdown_playbook': _('Name of the Ansible playbook used to '
|
||||
'power off the node in-band. '
|
||||
'Default is %s. Optional.'
|
||||
) % DEFAULT_PLAYBOOKS['shutdown'],
|
||||
'ansible_clean_playbook': _('Name of the Ansible playbook used for '
|
||||
'cleaning. Default is %s. Optional.'
|
||||
) % DEFAULT_PLAYBOOKS['clean'],
|
||||
'ansible_clean_steps_config': _('Name of the file with default cleaning '
|
||||
'steps configuration. Default is %s. '
|
||||
'Optional.'
|
||||
) % DEFAULT_CLEAN_STEPS
|
||||
}
|
||||
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
|
||||
|
||||
INVENTORY_FILE = os.path.join(CONF.ansible.playbooks_path, 'inventory')
|
||||
|
||||
|
||||
class PlaybookNotFound(exception.IronicException):
|
||||
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
|
||||
|
||||
|
||||
def _parse_ansible_driver_info(node, action='deploy'):
|
||||
user = node.driver_info.get('ansible_deploy_username', 'ansible')
|
||||
key = node.driver_info.get('ansible_deploy_key_file')
|
||||
playbook = node.driver_info.get('ansible_%s_playbook' % action,
|
||||
DEFAULT_PLAYBOOKS.get(action))
|
||||
if not playbook:
|
||||
raise PlaybookNotFound(action=action)
|
||||
return playbook, user, key
|
||||
|
||||
|
||||
def _get_configdrive_path(basename):
|
||||
return os.path.join(CONF.tempdir, basename + '.cndrive')
|
||||
|
||||
|
||||
def _get_node_ip_dhcp(task):
|
||||
"""Get node IP from DHCP provider."""
|
||||
api = dhcp_factory.DHCPFactory().provider
|
||||
ip_addrs = api.get_ip_addresses(task)
|
||||
if not ip_addrs:
|
||||
raise exception.FailedToGetIPAddressOnPort(_(
|
||||
"Failed to get IP address for any port on node %s.") %
|
||||
task.node.uuid)
|
||||
if len(ip_addrs) > 1:
|
||||
error = _("Ansible driver does not support multiple IP addresses "
|
||||
"during deploy or cleaning")
|
||||
raise exception.InstanceDeployFailure(reason=error)
|
||||
|
||||
return ip_addrs[0]
|
||||
|
||||
|
||||
def _get_node_ip_heartbeat(task):
|
||||
callback_url = task.node.driver_internal_info.get('agent_url', '')
|
||||
return urlparse.urlparse(callback_url).netloc.split(':')[0]
|
||||
|
||||
|
||||
def _get_node_ip(task):
|
||||
if CONF.ansible.use_ramdisk_callback:
|
||||
return _get_node_ip_heartbeat(task)
|
||||
else:
|
||||
return _get_node_ip_dhcp(task)
|
||||
|
||||
|
||||
def _prepare_extra_vars(host_list, variables=None):
|
||||
nodes_var = []
|
||||
for node_uuid, ip, user, extra in host_list:
|
||||
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
|
||||
extra_vars = dict(nodes=nodes_var)
|
||||
if variables:
|
||||
extra_vars.update(variables)
|
||||
return extra_vars
|
||||
|
||||
|
||||
def _run_playbook(name, extra_vars, key, tags=None, notags=None):
|
||||
"""Execute ansible-playbook."""
|
||||
playbook = os.path.join(CONF.ansible.playbooks_path, name)
|
||||
ironic_vars = {'ironic': extra_vars}
|
||||
args = [CONF.ansible.ansible_playbook_script, playbook,
|
||||
'-i', INVENTORY_FILE,
|
||||
'-e', json.dumps(ironic_vars),
|
||||
]
|
||||
|
||||
if CONF.ansible.config_file_path:
|
||||
env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
|
||||
args = env + args
|
||||
|
||||
if tags:
|
||||
args.append('--tags=%s' % ','.join(tags))
|
||||
|
||||
if notags:
|
||||
args.append('--skip-tags=%s' % ','.join(notags))
|
||||
|
||||
if key:
|
||||
args.append('--private-key=%s' % key)
|
||||
|
||||
verbosity = CONF.ansible.verbosity
|
||||
if verbosity is None and CONF.debug:
|
||||
verbosity = 4
|
||||
if verbosity:
|
||||
args.append('-' + 'v' * verbosity)
|
||||
|
||||
if CONF.ansible.ansible_extra_args:
|
||||
args.extend(shlex.split(CONF.ansible.ansible_extra_args))
|
||||
|
||||
try:
|
||||
out, err = utils.execute(*args)
|
||||
return out, err
|
||||
except processutils.ProcessExecutionError as e:
|
||||
raise exception.InstanceDeployFailure(reason=e)
|
||||
|
||||
|
||||
def _calculate_memory_req(task):
|
||||
image_source = task.node.instance_info['image_source']
|
||||
image_size = images.download_size(task.context, image_source)
|
||||
return image_size // units.Mi + CONF.ansible.extra_memory
|
||||
|
||||
|
||||
def _parse_partitioning_info(node):
|
||||
|
||||
info = node.instance_info
|
||||
i_info = {}
|
||||
partitions = []
|
||||
i_info['label'] = deploy_utils.get_disk_label(node) or 'msdos'
|
||||
|
||||
# prepend 1MiB bios_grub partition for GPT so that grub(2) installs
|
||||
if i_info['label'] == 'gpt':
|
||||
bios_partition = {'name': 'bios',
|
||||
'size': 1,
|
||||
'unit': 'MiB',
|
||||
'flags': {'bios_grub': 'yes'}}
|
||||
partitions.append(bios_partition)
|
||||
|
||||
ephemeral_mb = info['ephemeral_mb']
|
||||
if ephemeral_mb:
|
||||
i_info['ephemeral_format'] = info['ephemeral_format']
|
||||
ephemeral_partition = {'name': 'ephemeral',
|
||||
'size': ephemeral_mb,
|
||||
'unit': 'MiB',
|
||||
'format': i_info['ephemeral_format']}
|
||||
partitions.append(ephemeral_partition)
|
||||
|
||||
i_info['preserve_ephemeral'] = (
|
||||
'yes' if info['preserve_ephemeral'] else 'no')
|
||||
|
||||
swap_mb = info['swap_mb']
|
||||
if swap_mb:
|
||||
swap_partition = {'name': 'swap',
|
||||
'size': swap_mb,
|
||||
'unit': 'MiB',
|
||||
'format': 'linux-swap'}
|
||||
partitions.append(swap_partition)
|
||||
|
||||
# pre-create partition for configdrive
|
||||
configdrive = info.get('configdrive')
|
||||
if configdrive:
|
||||
configdrive_partition = {'name': 'configdrive',
|
||||
'size': 64,
|
||||
'unit': 'MiB',
|
||||
'format': 'fat32'}
|
||||
partitions.append(configdrive_partition)
|
||||
|
||||
# NOTE(pas-ha) make the root partition last so that
|
||||
# e.g. cloud-init can grow it on first start
|
||||
root_partition = {'name': 'root',
|
||||
'size': info['root_mb'],
|
||||
'unit': 'MiB'}
|
||||
if i_info['label'] == 'msdos':
|
||||
root_partition['flags'] = {'boot': 'yes'}
|
||||
|
||||
partitions.append(root_partition)
|
||||
|
||||
i_info['partitions'] = partitions
|
||||
return {'partition_info': i_info}
|
||||
|
||||
|
||||
def _parse_root_device_hints(node):
|
||||
"""Convert string with hints to dict. """
|
||||
root_device = node.properties.get('root_device')
|
||||
if not root_device:
|
||||
return {}
|
||||
try:
|
||||
parsed_hints = irlib_utils.parse_root_device_hints(root_device)
|
||||
except ValueError as e:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Failed to validate the root device hints for node %(node)s. '
|
||||
'Error: %(error)s') % {'node': node.uuid, 'error': e})
|
||||
root_device_hints = {}
|
||||
advanced = {}
|
||||
for hint, value in parsed_hints.items():
|
||||
if isinstance(value, six.string_types):
|
||||
if value.startswith('== '):
|
||||
root_device_hints[hint] = int(value[3:])
|
||||
elif value.startswith('s== '):
|
||||
root_device_hints[hint] = urlparse.unquote(value[4:])
|
||||
else:
|
||||
advanced[hint] = value
|
||||
else:
|
||||
root_device_hints[hint] = value
|
||||
if advanced:
|
||||
raise exception.InvalidParameterValue(
|
||||
_('Ansible-deploy does not support advanced root device hints '
|
||||
'based on oslo.utils operators. '
|
||||
'Present advanced hints for node %(node)s are %(hints)s.') % {
|
||||
'node': node.uuid, 'hints': advanced})
|
||||
return root_device_hints
|
||||
|
||||
|
||||
def _add_ssl_image_options(image):
|
||||
image['validate_certs'] = ('no' if CONF.ansible.image_store_insecure
|
||||
else 'yes')
|
||||
if CONF.ansible.image_store_cafile:
|
||||
image['cafile'] = CONF.ansible.image_store_cafile
|
||||
if CONF.ansible.image_store_certfile and CONF.ansible.image_store_keyfile:
|
||||
image['client_cert'] = CONF.ansible.image_store_certfile
|
||||
image['client_key'] = CONF.ansible.image_store_keyfile
|
||||
|
||||
|
||||
def _prepare_variables(task):
|
||||
node = task.node
|
||||
i_info = node.instance_info
|
||||
image = {}
|
||||
for i_key, i_value in i_info.items():
|
||||
if i_key.startswith('image_'):
|
||||
image[i_key[6:]] = i_value
|
||||
image['mem_req'] = _calculate_memory_req(task)
|
||||
|
||||
checksum = image.get('checksum')
|
||||
if checksum:
|
||||
# NOTE(pas-ha) checksum can be in <algo>:<checksum> format
|
||||
# as supported by various Ansible modules, mostly good for
|
||||
# standalone Ironic case when instance_info is populated manually.
|
||||
# With no <algo> we take that instance_info is populated from Glance,
|
||||
# where API reports checksum as MD5 always.
|
||||
if ':' not in checksum:
|
||||
image['checksum'] = 'md5:%s' % checksum
|
||||
_add_ssl_image_options(image)
|
||||
variables = {'image': image}
|
||||
configdrive = i_info.get('configdrive')
|
||||
if configdrive:
|
||||
if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
|
||||
cfgdrv_type = 'url'
|
||||
cfgdrv_location = configdrive
|
||||
else:
|
||||
cfgdrv_location = _get_configdrive_path(node.uuid)
|
||||
with open(cfgdrv_location, 'w') as f:
|
||||
f.write(configdrive)
|
||||
cfgdrv_type = 'file'
|
||||
variables['configdrive'] = {'type': cfgdrv_type,
|
||||
'location': cfgdrv_location}
|
||||
|
||||
root_device_hints = _parse_root_device_hints(node)
|
||||
if root_device_hints:
|
||||
variables['root_device_hints'] = root_device_hints
|
||||
|
||||
return variables
|
||||
|
||||
|
||||
def _validate_clean_steps(steps, node_uuid):
|
||||
missing = []
|
||||
for step in steps:
|
||||
name = step.get('name')
|
||||
if not name:
|
||||
missing.append({'name': 'undefined', 'field': 'name'})
|
||||
continue
|
||||
if 'interface' not in step:
|
||||
missing.append({'name': name, 'field': 'interface'})
|
||||
args = step.get('args', {})
|
||||
for arg_name, arg in args.items():
|
||||
if arg.get('required', False) and 'value' not in arg:
|
||||
missing.append({'name': name,
|
||||
'field': '%s.value' % arg_name})
|
||||
if missing:
|
||||
err_string = ', '.join(
|
||||
'name %(name)s, field %(field)s' % i for i in missing)
|
||||
msg = _("Malformed clean_steps file: %s") % err_string
|
||||
LOG.error(msg)
|
||||
raise exception.NodeCleaningFailure(node=node_uuid,
|
||||
reason=msg)
|
||||
if len(set(s['name'] for s in steps)) != len(steps):
|
||||
msg = _("Cleaning steps do not have unique names.")
|
||||
LOG.error(msg)
|
||||
raise exception.NodeCleaningFailure(node=node_uuid,
|
||||
reason=msg)
|
||||
|
||||
|
||||
def _get_clean_steps(node, interface=None, override_priorities=None):
|
||||
"""Get cleaning steps."""
|
||||
clean_steps_file = node.driver_info.get('ansible_clean_steps_config',
|
||||
DEFAULT_CLEAN_STEPS)
|
||||
path = os.path.join(CONF.ansible.playbooks_path, clean_steps_file)
|
||||
try:
|
||||
with open(path) as f:
|
||||
internal_steps = yaml.safe_load(f)
|
||||
except Exception as e:
|
||||
msg = _('Failed to load clean steps from file '
|
||||
'%(file)s: %(exc)s') % {'file': path, 'exc': e}
|
||||
raise exception.NodeCleaningFailure(node=node.uuid, reason=msg)
|
||||
|
||||
_validate_clean_steps(internal_steps, node.uuid)
|
||||
|
||||
steps = []
|
||||
override = override_priorities or {}
|
||||
for params in internal_steps:
|
||||
name = params['name']
|
||||
clean_if = params['interface']
|
||||
if interface is not None and interface != clean_if:
|
||||
continue
|
||||
new_priority = override.get(name)
|
||||
priority = (new_priority if new_priority is not None else
|
||||
params.get('priority', 0))
|
||||
args = {}
|
||||
argsinfo = params.get('args', {})
|
||||
for arg, arg_info in argsinfo.items():
|
||||
args[arg] = arg_info.pop('value', None)
|
||||
step = {
|
||||
'interface': clean_if,
|
||||
'step': name,
|
||||
'priority': priority,
|
||||
'abortable': False,
|
||||
'argsinfo': argsinfo,
|
||||
'args': args
|
||||
}
|
||||
steps.append(step)
|
||||
|
||||
return steps
|
||||
|
||||
|
||||
class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
|
||||
"""Interface for deploy-related actions."""
|
||||
|
||||
def __init__(self):
|
||||
super(AnsibleDeploy, self).__init__()
|
||||
# NOTE(pas-ha) overriding agent creation as we won't be
|
||||
# communicating with it, only processing heartbeats
|
||||
self._client = None
|
||||
|
||||
def get_properties(self):
|
||||
"""Return the properties of the interface."""
|
||||
props = COMMON_PROPERTIES.copy()
|
||||
# NOTE(pas-ha) this is to get the deploy_forces_oob_reboot property
|
||||
props.update(agent_base.VENDOR_PROPERTIES)
|
||||
return props
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.validate')
|
||||
def validate(self, task):
|
||||
"""Validate the driver-specific Node deployment info."""
|
||||
task.driver.boot.validate(task)
|
||||
|
||||
node = task.node
|
||||
iwdi = node.driver_internal_info.get('is_whole_disk_image')
|
||||
if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"Node %(node)s is configured to use the %(driver)s driver "
|
||||
"which does not support netboot.") % {'node': node.uuid,
|
||||
'driver': node.driver})
|
||||
|
||||
params = {}
|
||||
image_source = node.instance_info.get('image_source')
|
||||
params['instance_info.image_source'] = image_source
|
||||
error_msg = _('Node %s failed to validate deploy image info. Some '
|
||||
'parameters were missing') % node.uuid
|
||||
deploy_utils.check_for_missing_params(params, error_msg)
|
||||
# validate root device hints, proper exceptions are raised from there
|
||||
_parse_root_device_hints(node)
|
||||
|
||||
def _ansible_deploy(self, task, node_address):
|
||||
"""Internal function for deployment to a node."""
|
||||
notags = ['wait'] if CONF.ansible.use_ramdisk_callback else []
|
||||
node = task.node
|
||||
LOG.debug('IP of node %(node)s is %(ip)s',
|
||||
{'node': node.uuid, 'ip': node_address})
|
||||
variables = _prepare_variables(task)
|
||||
if not node.driver_internal_info.get('is_whole_disk_image'):
|
||||
variables.update(_parse_partitioning_info(task.node))
|
||||
playbook, user, key = _parse_ansible_driver_info(task.node)
|
||||
node_list = [(node.uuid, node_address, user, node.extra)]
|
||||
extra_vars = _prepare_extra_vars(node_list, variables=variables)
|
||||
|
||||
LOG.debug('Starting deploy on node %s', node.uuid)
|
||||
# any caller should manage exceptions raised from here
|
||||
_run_playbook(playbook, extra_vars, key, notags=notags)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.deploy')
|
||||
@task_manager.require_exclusive_lock
|
||||
def deploy(self, task):
|
||||
"""Perform a deployment to a node."""
|
||||
manager_utils.node_power_action(task, states.REBOOT)
|
||||
if CONF.ansible.use_ramdisk_callback:
|
||||
return states.DEPLOYWAIT
|
||||
|
||||
node = task.node
|
||||
ip_addr = _get_node_ip_dhcp(task)
|
||||
try:
|
||||
self._ansible_deploy(task, ip_addr)
|
||||
except Exception as e:
|
||||
error = _('Deploy failed for node %(node)s: '
|
||||
'Error: %(exc)s') % {'node': node.uuid,
|
||||
'exc': six.text_type(e)}
|
||||
LOG.exception(error)
|
||||
deploy_utils.set_failed_state(task, error, collect_logs=False)
|
||||
|
||||
else:
|
||||
self.reboot_to_instance(task)
|
||||
return states.DEPLOYDONE
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.tear_down')
|
||||
@task_manager.require_exclusive_lock
|
||||
def tear_down(self, task):
|
||||
"""Tear down a previous deployment on the task's node."""
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
task.driver.network.unconfigure_tenant_networks(task)
|
||||
return states.DELETED
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.prepare')
|
||||
def prepare(self, task):
|
||||
"""Prepare the deployment environment for this node."""
|
||||
node = task.node
|
||||
# TODO(pas-ha) investigate takeover scenario
|
||||
if node.provision_state == states.DEPLOYING:
|
||||
# adding network-driver dependent provisioning ports
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
task.driver.network.add_provisioning_network(task)
|
||||
if node.provision_state not in [states.ACTIVE, states.ADOPTING]:
|
||||
node.instance_info = deploy_utils.build_instance_info_for_deploy(
|
||||
task)
|
||||
node.save()
|
||||
boot_opt = deploy_utils.build_agent_options(node)
|
||||
task.driver.boot.prepare_ramdisk(task, boot_opt)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.clean_up')
|
||||
def clean_up(self, task):
|
||||
"""Clean up the deployment environment for this node."""
|
||||
task.driver.boot.clean_up_ramdisk(task)
|
||||
provider = dhcp_factory.DHCPFactory()
|
||||
provider.clean_dhcp(task)
|
||||
irlib_utils.unlink_without_raise(
|
||||
_get_configdrive_path(task.node.uuid))
|
||||
|
||||
def take_over(self, task):
|
||||
LOG.error("Ansible deploy does not support take over. "
|
||||
"You must redeploy the node %s explicitly.",
|
||||
task.node.uuid)
|
||||
|
||||
def get_clean_steps(self, task):
|
||||
"""Get the list of clean steps from the file.
|
||||
|
||||
:param task: a TaskManager object containing the node
|
||||
:returns: A list of clean step dictionaries
|
||||
"""
|
||||
new_priorities = {
|
||||
'erase_devices': CONF.deploy.erase_devices_priority,
|
||||
'erase_devices_metadata':
|
||||
CONF.deploy.erase_devices_metadata_priority
|
||||
}
|
||||
return _get_clean_steps(task.node, interface='deploy',
|
||||
override_priorities=new_priorities)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.execute_clean_step')
|
||||
def execute_clean_step(self, task, step):
|
||||
"""Execute a clean step.
|
||||
|
||||
:param task: a TaskManager object containing the node
|
||||
:param step: a clean step dictionary to execute
|
||||
:returns: None
|
||||
"""
|
||||
node = task.node
|
||||
playbook, user, key = _parse_ansible_driver_info(
|
||||
task.node, action='clean')
|
||||
stepname = step['step']
|
||||
|
||||
if (not CONF.ansible.use_ramdisk_callback and
|
||||
'ansible_cleaning_ip' in node.driver_internal_info):
|
||||
node_address = node.driver_internal_info['ansible_cleaning_ip']
|
||||
else:
|
||||
node_address = _get_node_ip(task)
|
||||
|
||||
node_list = [(node.uuid, node_address, user, node.extra)]
|
||||
extra_vars = _prepare_extra_vars(node_list)
|
||||
|
||||
LOG.debug('Starting cleaning step %(step)s on node %(node)s',
|
||||
{'node': node.uuid, 'step': stepname})
|
||||
step_tags = step['args'].get('tags', [])
|
||||
try:
|
||||
_run_playbook(playbook, extra_vars, key,
|
||||
tags=step_tags)
|
||||
except exception.InstanceDeployFailure as e:
|
||||
LOG.error("Ansible failed cleaning step %(step)s "
|
||||
"on node %(node)s.",
|
||||
{'node': node.uuid, 'step': stepname})
|
||||
manager_utils.cleaning_error_handler(task, six.text_type(e))
|
||||
else:
|
||||
LOG.info('Ansible completed cleaning step %(step)s '
|
||||
'on node %(node)s.',
|
||||
{'node': node.uuid, 'step': stepname})
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.prepare_cleaning')
|
||||
def prepare_cleaning(self, task):
|
||||
"""Boot into the ramdisk to prepare for cleaning.
|
||||
|
||||
:param task: a TaskManager object containing the node
|
||||
:raises NodeCleaningFailure: if the previous cleaning ports cannot
|
||||
be removed or if new cleaning ports cannot be created
|
||||
:returns: None or states.CLEANWAIT for async prepare.
|
||||
"""
|
||||
node = task.node
|
||||
use_callback = CONF.ansible.use_ramdisk_callback
|
||||
if use_callback:
|
||||
manager_utils.set_node_cleaning_steps(task)
|
||||
if not node.driver_internal_info['clean_steps']:
|
||||
# no clean steps configured, nothing to do.
|
||||
return
|
||||
task.driver.network.add_cleaning_network(task)
|
||||
boot_opt = deploy_utils.build_agent_options(node)
|
||||
task.driver.boot.prepare_ramdisk(task, boot_opt)
|
||||
manager_utils.node_power_action(task, states.REBOOT)
|
||||
if use_callback:
|
||||
return states.CLEANWAIT
|
||||
|
||||
ip_addr = _get_node_ip_dhcp(task)
|
||||
LOG.debug('IP of node %(node)s is %(ip)s',
|
||||
{'node': node.uuid, 'ip': ip_addr})
|
||||
driver_internal_info = node.driver_internal_info
|
||||
driver_internal_info['ansible_cleaning_ip'] = ip_addr
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
playbook, user, key = _parse_ansible_driver_info(
|
||||
task.node, action='clean')
|
||||
node_list = [(node.uuid, ip_addr, user, node.extra)]
|
||||
extra_vars = _prepare_extra_vars(node_list)
|
||||
|
||||
LOG.debug('Waiting ramdisk on node %s for cleaning', node.uuid)
|
||||
_run_playbook(playbook, extra_vars, key, tags=['wait'])
|
||||
LOG.info('Node %s is ready for cleaning', node.uuid)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.tear_down_cleaning')
|
||||
def tear_down_cleaning(self, task):
|
||||
"""Clean up the PXE and DHCP files after cleaning.
|
||||
|
||||
:param task: a TaskManager object containing the node
|
||||
:raises NodeCleaningFailure: if the cleaning ports cannot be
|
||||
removed
|
||||
"""
|
||||
node = task.node
|
||||
driver_internal_info = node.driver_internal_info
|
||||
driver_internal_info.pop('ansible_cleaning_ip', None)
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
task.driver.boot.clean_up_ramdisk(task)
|
||||
task.driver.network.remove_cleaning_network(task)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.continue_deploy')
|
||||
def continue_deploy(self, task):
|
||||
# NOTE(pas-ha) the lock should be already upgraded in heartbeat,
|
||||
# just setting its purpose for better logging
|
||||
task.upgrade_lock(purpose='deploy')
|
||||
task.process_event('resume')
|
||||
# NOTE(pas-ha) this method is called from heartbeat processing only,
|
||||
# so we are sure we need this particular method, not the general one
|
||||
node_address = _get_node_ip_heartbeat(task)
|
||||
self._ansible_deploy(task, node_address)
|
||||
self.reboot_to_instance(task)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.reboot_to_instance')
|
||||
def reboot_to_instance(self, task):
|
||||
node = task.node
|
||||
LOG.info('Ansible complete deploy on node %s', node.uuid)
|
||||
|
||||
LOG.debug('Rebooting node %s to instance', node.uuid)
|
||||
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
|
||||
self.reboot_and_finish_deploy(task)
|
||||
task.driver.boot.clean_up_ramdisk(task)
|
||||
|
||||
@METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
|
||||
def reboot_and_finish_deploy(self, task):
|
||||
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
|
||||
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
|
||||
|
||||
@retrying.retry(
|
||||
stop_max_attempt_number=attempts,
|
||||
retry_on_result=lambda state: state != states.POWER_OFF,
|
||||
wait_fixed=wait
|
||||
)
|
||||
def _wait_until_powered_off(task):
|
||||
return task.driver.power.get_power_state(task)
|
||||
|
||||
node = task.node
|
||||
oob_power_off = strutils.bool_from_string(
|
||||
node.driver_info.get('deploy_forces_oob_reboot', False))
|
||||
try:
|
||||
if not oob_power_off:
|
||||
try:
|
||||
node_address = _get_node_ip(task)
|
||||
playbook, user, key = _parse_ansible_driver_info(
|
||||
node, action='shutdown')
|
||||
node_list = [(node.uuid, node_address, user, node.extra)]
|
||||
extra_vars = _prepare_extra_vars(node_list)
|
||||
_run_playbook(playbook, extra_vars, key)
|
||||
_wait_until_powered_off(task)
|
||||
except Exception as e:
|
||||
LOG.warning('Failed to soft power off node %(node_uuid)s '
|
||||
'in at least %(timeout)d seconds. '
|
||||
'Error: %(error)s',
|
||||
{'node_uuid': node.uuid,
|
||||
'timeout': (wait * (attempts - 1)) / 1000,
|
||||
'error': e})
|
||||
# NOTE(pas-ha) flush is a part of deploy playbook
|
||||
# so if it finished successfully we can safely
|
||||
# power off the node out-of-band
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
else:
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
task.driver.network.remove_provisioning_network(task)
|
||||
task.driver.network.configure_tenant_networks(task)
|
||||
manager_utils.node_power_action(task, states.POWER_ON)
|
||||
except Exception as e:
|
||||
msg = (_('Error rebooting node %(node)s after deploy. '
|
||||
'Error: %(error)s') %
|
||||
{'node': node.uuid, 'error': e})
|
||||
agent_base.log_and_raise_deployment_error(task, msg)
|
||||
|
||||
task.process_event('done')
|
||||
LOG.info('Deployment to node %s done', task.node.uuid)
|
@ -1,11 +0,0 @@
|
||||
- hosts: conductor
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- add_host:
|
||||
group: ironic
|
||||
hostname: "{{ item.name }}"
|
||||
ansible_host: "{{ item.ip }}"
|
||||
ansible_user: "{{ item.user }}"
|
||||
ironic_extra: "{{ item.extra | default({}) }}"
|
||||
with_items: "{{ ironic.nodes }}"
|
||||
tags: always
|
@ -1,35 +0,0 @@
|
||||
[defaults]
|
||||
# retries through the ansible-deploy driver are not supported
|
||||
retry_files_enabled = False
|
||||
|
||||
# this is using supplied callback_plugin to interleave ansible event logs
|
||||
# into Ironic-conductor log as set in ironic configuration file,
|
||||
# see callback_plugin/ironic_log.ini for some options to set
|
||||
# (DevStack _needs_ some tweaks)
|
||||
callback_whitelist = ironic_log
|
||||
|
||||
# For better security, bake SSH host keys into bootstrap image,
|
||||
# add those to ~/.ssh/known_hosts for user running ironic-conductor service
|
||||
# on all nodes where ironic-conductor and ansible-deploy driver are installed,
|
||||
# and set the host_key_checking to True (or comment it out, it is the default)
|
||||
host_key_checking = False
|
||||
|
||||
# uncomment if you have problem with ramdisk locale on ansible >= 2.1
|
||||
#module_set_locale=False
|
||||
|
||||
# This sets the interval (in seconds) of Ansible internal processes polling
|
||||
# each other. Lower values improve performance with large playbooks at
|
||||
# the expense of extra CPU load. Higher values are more suitable for Ansible
|
||||
# usage in automation scenarios, when UI responsiveness is not required but
|
||||
# CPU usage might be a concern.
|
||||
# Default corresponds to the value hardcoded in Ansible ≤ 2.1:
|
||||
#internal_poll_interval = 0.001
|
||||
|
||||
[ssh_connection]
|
||||
# pipelining greatly increases speed of deployment, disable it only when
|
||||
# your version of ssh client on ironic node or server in bootstrap image
|
||||
# do not support it or if you can not disable "requiretty" for the
|
||||
# passwordless sudoer user in the bootstrap image.
|
||||
# See Ansible documentation for more info:
|
||||
# http://docs.ansible.com/ansible/intro_configuration.html#pipelining
|
||||
pipelining = True
|
@ -1,15 +0,0 @@
|
||||
[ironic]
|
||||
# If Ironic's config is not in one of default oslo_config locations,
|
||||
# specify the path to it here
|
||||
#config_file =
|
||||
|
||||
# Force usage of journald
|
||||
# use_journal = False
|
||||
|
||||
# Force usage of syslog
|
||||
#use_syslog = False
|
||||
|
||||
# Force usage of given file to log to.
|
||||
# Useful for a testing system with only stderr logging
|
||||
# (e.g. DevStack deployed w/o systemd)
|
||||
#log_file =
|
@ -1,148 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import strutils
|
||||
import pbr.version
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
DOMAIN = 'ironic'
|
||||
VERSION = pbr.version.VersionInfo(DOMAIN).release_string()
|
||||
|
||||
|
||||
# find and parse callback config file
|
||||
def parse_callback_config():
|
||||
basename = os.path.splitext(__file__)[0]
|
||||
config = ConfigParser.ConfigParser()
|
||||
callback_config = {'ironic_config': None,
|
||||
'ironic_log_file': None,
|
||||
'use_journal': False,
|
||||
'use_syslog': False}
|
||||
try:
|
||||
config.readfp(open(basename + ".ini"))
|
||||
if config.has_option('ironic', 'config_file'):
|
||||
callback_config['ironic_config'] = config.get(
|
||||
'ironic', 'config_file')
|
||||
if config.has_option('ironic', 'log_file'):
|
||||
callback_config['ironic_log_file'] = config.get(
|
||||
'ironic', 'log_file')
|
||||
if config.has_option('ironic', 'use_journal'):
|
||||
callback_config['use_journal'] = strutils.bool_from_string(
|
||||
config.get('ironic', 'use_journal'))
|
||||
if config.has_option('ironic', 'use_syslog'):
|
||||
callback_config['use_syslog'] = strutils.bool_from_string(
|
||||
config.get('ironic', 'use_syslog'))
|
||||
except Exception:
|
||||
pass
|
||||
return callback_config
|
||||
|
||||
|
||||
def setup_log():
|
||||
|
||||
logging.register_options(CONF)
|
||||
|
||||
conf_kwargs = dict(args=[], project=DOMAIN, version=VERSION)
|
||||
callback_config = parse_callback_config()
|
||||
|
||||
if callback_config['ironic_config']:
|
||||
conf_kwargs['default_config_files'] = [
|
||||
callback_config['ironic_config']]
|
||||
CONF(**conf_kwargs)
|
||||
|
||||
if callback_config['use_journal']:
|
||||
CONF.set_override('use_journal', True)
|
||||
if callback_config['use_syslog']:
|
||||
CONF.set_override('use_syslog', True)
|
||||
if callback_config['ironic_log_file']:
|
||||
CONF.set_override("log_file", callback_config['ironic_log_file'])
|
||||
|
||||
logging.setup(CONF, DOMAIN)
|
||||
|
||||
|
||||
class CallbackModule(object):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'ironic_log'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
setup_log()
|
||||
self.log = logging.getLogger(__name__)
|
||||
self.node = None
|
||||
self.opts = {}
|
||||
|
||||
# NOTE(pas-ha) this method is required for Ansible>=2.4
|
||||
# TODO(pas-ha) rewrite to support defining callback plugin options
|
||||
# in ansible.cfg after we require Ansible >=2.4
|
||||
def set_options(self, options):
|
||||
self.opts = options
|
||||
|
||||
def runner_msg_dict(self, result):
|
||||
self.node = result._host.get_name()
|
||||
name = result._task.get_name()
|
||||
res = str(result._result)
|
||||
return dict(node=self.node, name=name, res=res)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
# NOTE(pas-ha) I do not know (yet) how to obtain a ref to host
|
||||
# until first task is processed
|
||||
node = self.node or "Node"
|
||||
name = task.get_name()
|
||||
if name == 'setup':
|
||||
self.log.debug("Processing task %(name)s.", dict(name=name))
|
||||
else:
|
||||
self.log.debug("Processing task %(name)s on node %(node)s.",
|
||||
dict(name=name, node=node))
|
||||
|
||||
def v2_runner_on_failed(self, result, *args, **kwargs):
|
||||
self.log.error(
|
||||
"Ansible task %(name)s failed on node %(node)s: %(res)s",
|
||||
self.runner_msg_dict(result))
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
msg_dict = self.runner_msg_dict(result)
|
||||
if msg_dict['name'] == 'setup':
|
||||
self.log.info("Ansible task 'setup' complete on node %(node)s",
|
||||
msg_dict)
|
||||
else:
|
||||
self.log.info("Ansible task %(name)s complete on node %(node)s: "
|
||||
"%(res)s", msg_dict)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self.log.error(
|
||||
"Node %(node)s was unreachable for Ansible task %(name)s: %(res)s",
|
||||
self.runner_msg_dict(result))
|
||||
|
||||
def v2_runner_on_async_poll(self, result):
|
||||
self.log.debug("Polled ansible task %(name)s for complete "
|
||||
"on node %(node)s: %(res)s",
|
||||
self.runner_msg_dict(result))
|
||||
|
||||
def v2_runner_on_async_ok(self, result):
|
||||
self.log.info("Async Ansible task %(name)s complete on node %(node)s: "
|
||||
"%(res)s", self.runner_msg_dict(result))
|
||||
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
self.log.error("Async Ansible task %(name)s failed on node %(node)s: "
|
||||
"%(res)s", self.runner_msg_dict(result))
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self.log.debug(
|
||||
"Ansible task %(name)s skipped on node %(node)s: %(res)s",
|
||||
self.runner_msg_dict(result))
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
- include: add-ironic-nodes.yaml
|
||||
|
||||
- hosts: ironic
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: wait
|
||||
tags: wait
|
||||
|
||||
- hosts: ironic
|
||||
roles:
|
||||
- clean
|
@ -1,19 +0,0 @@
|
||||
- name: erase_devices_metadata
|
||||
priority: 99
|
||||
interface: deploy
|
||||
args:
|
||||
tags:
|
||||
required: true
|
||||
description: list of playbook tags used to erase partition table on disk devices
|
||||
value:
|
||||
- zap
|
||||
|
||||
- name: erase_devices
|
||||
priority: 10
|
||||
interface: deploy
|
||||
args:
|
||||
tags:
|
||||
required: true
|
||||
description: list of playbook tags used to erase disk devices
|
||||
value:
|
||||
- shred
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
- include: add-ironic-nodes.yaml
|
||||
|
||||
- hosts: ironic
|
||||
gather_facts: no
|
||||
roles:
|
||||
- role: wait
|
||||
tags: wait
|
||||
|
||||
- hosts: ironic
|
||||
roles:
|
||||
- discover
|
||||
- prepare
|
||||
- deploy
|
||||
- configure
|
||||
post_tasks:
|
||||
- name: flush disk state
|
||||
command: sync
|
@ -1 +0,0 @@
|
||||
conductor ansible_connection=local
|
@ -1,66 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
COLLECT_INFO = (('wwn', 'WWN'), ('serial', 'SERIAL_SHORT'),
|
||||
('wwn_with_extension', 'WWN_WITH_EXTENSION'),
|
||||
('wwn_vendor_extension', 'WWN_VENDOR_EXTENSION'))
|
||||
|
||||
|
||||
# TODO(pas-ha) replace module.log with module.warn
|
||||
# after we require Ansible >= 2.3
|
||||
def get_devices_wwn(devices, module):
|
||||
try:
|
||||
import pyudev
|
||||
# NOTE(pas-ha) creating context might fail if udev is missing
|
||||
context = pyudev.Context()
|
||||
except ImportError:
|
||||
module.log('Can not collect "wwn", "wwn_with_extension", '
|
||||
'"wwn_vendor_extension" and "serial" when using '
|
||||
'root device hints because there\'s no UDEV python '
|
||||
'binds installed')
|
||||
return {}
|
||||
|
||||
dev_dict = {}
|
||||
for device in devices:
|
||||
name = '/dev/' + device
|
||||
try:
|
||||
udev = pyudev.Device.from_device_file(context, name)
|
||||
except (ValueError, EnvironmentError, pyudev.DeviceNotFoundError) as e:
|
||||
module.log('Device %(dev)s is inaccessible, skipping... '
|
||||
'Error: %(error)s', {'dev': name, 'error': e})
|
||||
continue
|
||||
|
||||
dev_dict[device] = {}
|
||||
for key, udev_key in COLLECT_INFO:
|
||||
dev_dict[device][key] = udev.get('ID_%s' % udev_key)
|
||||
|
||||
return {"ansible_facts": {"devices_wwn": dev_dict}}
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
devices=dict(required=True, type='list'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
devices = module.params['devices']
|
||||
data = get_devices_wwn(devices, module)
|
||||
module.exit_json(**data)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,335 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE(pas-ha) might not need it when Ansible PullRequest#2971 is accepted
|
||||
|
||||
import itertools
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
PARTITION_TYPES = ('primary', 'logical', 'extended')
|
||||
SUPPORTED_UNITS = {'%', 'MiB'}
|
||||
SUPPORTED_ALIGN = {'optimal', 'minimal', 'cylinder', 'none'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ironic_parted
|
||||
short_description: Create disk partition tables and partitions
|
||||
description: uses GNU parted utility
|
||||
author: Pavlo Shchelokovskyy @pshchelo
|
||||
version_added: null
|
||||
notes:
|
||||
- IS NOT IDEMPOTENT! partitions and table (if requested) are created anyway
|
||||
- does not support all the partition labels parted supports, only msdos and gpt
|
||||
- does not support units other than % and MiB
|
||||
- check mode is supported by returning emulated list of created block devices
|
||||
- makes no validation re if given partitions will actually fit the device
|
||||
- makes some extra validations for appropriate partition types for msdos label
|
||||
requirements:
|
||||
- Python >= 2.4 (itertools.groupby available) on the managed node
|
||||
- 'simplejson' for Python < 2.6
|
||||
- 'parted' utility installed on the managed node
|
||||
- 'lsblk' available on managed node
|
||||
- 'udevadm' available on managed node
|
||||
options:
|
||||
device:
|
||||
description: device to pass to parted
|
||||
required: true
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
version_added: null
|
||||
label:
|
||||
description: |
|
||||
type of a partition table to create;
|
||||
to use an existing partition table, omit it or pass null YAML value
|
||||
required: false
|
||||
default: none
|
||||
choices: [null, msdos, gpt]
|
||||
aliases: []
|
||||
version_added: null
|
||||
dry_run:
|
||||
description: |
|
||||
if actually to write changes to disk.
|
||||
If no, simulated partitions will be reported.
|
||||
required: false
|
||||
default: no
|
||||
choices: [yes, no]
|
||||
aliases: []
|
||||
version_added: null
|
||||
partitions:
|
||||
description:|
|
||||
list of partitions. each entry is a dictionary in the form
|
||||
- size: <int>, required, must be positive
|
||||
type: [primary, extended, logical], default is primary
|
||||
format: a format to pass to parted;
|
||||
does not actually creates filesystems, only sets
|
||||
partition ID
|
||||
name: <str> (optional) name of the partition;
|
||||
only supported for gpt partitions;
|
||||
if not set will be reported as 'partN'
|
||||
unit: 'MiB' or '%' are currently supported,
|
||||
must be the same for all partitions. default is '%'
|
||||
align: one of 'optimal', 'cylinder', 'minimal' or 'none';
|
||||
the default is 'optimal'
|
||||
flags: <dict> of <flag>: <bool> to (un)set partition flags
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
aliases: []
|
||||
version_added: null
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
"""
|
||||
|
||||
RETURNS = """
|
||||
---
|
||||
{"created": {
|
||||
"<name-as-provided-to-module>": "<device-handle-without-leading-dev>"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def parse_sizes(module, partitions):
|
||||
start = 0 if partitions[0]['unit'] == '%' else 1
|
||||
sizes = {}
|
||||
for p in partitions:
|
||||
size = p.get('size')
|
||||
if not size:
|
||||
module.fail_json(msg="Partition size must be provided")
|
||||
try:
|
||||
p['size'] = int(size)
|
||||
except ValueError:
|
||||
module.fail_json(msg="Can not cast partition size to INT.")
|
||||
if p['size'] <= 0:
|
||||
module.fail_json(msg="Partition size must be positive.")
|
||||
end = start + p['size']
|
||||
sizes[p['name']] = (start, end)
|
||||
start = end
|
||||
return sizes
|
||||
|
||||
|
||||
def create_part_args(partition, label, sizes):
|
||||
|
||||
parted_args = ['-a', partition['align'],
|
||||
'--', 'unit', partition['unit'],
|
||||
'mkpart']
|
||||
if label == 'msdos':
|
||||
parted_args.append(partition['type'])
|
||||
else:
|
||||
parted_args.append(partition['name'])
|
||||
|
||||
if partition['format']:
|
||||
parted_args.append(partition['format'])
|
||||
parted_args.extend(["%i" % sizes[partition['name']][0],
|
||||
"%i" % sizes[partition['name']][1]])
|
||||
return parted_args
|
||||
|
||||
|
||||
def change_part_args(part_number, partition):
|
||||
parted_args = []
|
||||
for flag, state in partition['flags'].items():
|
||||
parted_args.extend(['set', part_number, flag, state])
|
||||
return parted_args
|
||||
|
||||
|
||||
def parse_lsblk_json(output):
|
||||
|
||||
def get_names(devices):
|
||||
names = []
|
||||
for d in devices:
|
||||
names.append(d['name'])
|
||||
names.extend(get_names(d.get('children', [])))
|
||||
return names
|
||||
|
||||
return set(get_names(json.loads(output)['blockdevices']))
|
||||
|
||||
|
||||
def parse_parted_output(output):
|
||||
partitions = set()
|
||||
for line in output.splitlines():
|
||||
out_line = line.strip().split()
|
||||
if out_line:
|
||||
try:
|
||||
int(out_line[0])
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
partitions.add(out_line[0])
|
||||
return partitions
|
||||
|
||||
|
||||
def parse_partitions(module, partitions):
|
||||
|
||||
for ind, partition in enumerate(partitions):
|
||||
# partition name might be an empty string
|
||||
partition.setdefault('unit', '%')
|
||||
partition.setdefault('align', 'optimal')
|
||||
partition['name'] = partition.get('name') or 'part%i' % (ind + 1)
|
||||
partition.setdefault('type', 'primary')
|
||||
if partition['type'] not in PARTITION_TYPES:
|
||||
module.fail_json(msg="Partition type must be one of "
|
||||
"%s." % PARTITION_TYPES)
|
||||
if partition['align'] not in SUPPORTED_ALIGN:
|
||||
module.fail_json("Unsupported partition alignmnet option. "
|
||||
"Supported are %s" % list(SUPPORTED_ALIGN))
|
||||
partition['format'] = partition.get('format', None)
|
||||
# validate and convert partition flags
|
||||
partition['flags'] = {
|
||||
k: 'on' if module.boolean(v) else 'off'
|
||||
for k, v in partition.get('flags', {}).items()
|
||||
}
|
||||
# validate name uniqueness
|
||||
names = [p['name'] for p in partitions]
|
||||
if len(list(names)) != len(set(names)):
|
||||
module.fail_json("Partition names must be unique.")
|
||||
|
||||
|
||||
def validate_units(module, partitions):
|
||||
has_units = set(p['unit'] for p in partitions)
|
||||
if not has_units.issubset(SUPPORTED_UNITS):
|
||||
module.fail_json("Unsupported partition size unit. Supported units "
|
||||
"are %s" % list(SUPPORTED_UNITS))
|
||||
|
||||
if len(has_units) > 1:
|
||||
module.fail_json("All partitions must have the same size unit. "
|
||||
"Requested units are %s" % list(has_units))
|
||||
|
||||
|
||||
def validate_msdos(module, partitions):
|
||||
"""Validate limitations of MSDOS partition table"""
|
||||
p_types = [p['type'] for p in partitions]
|
||||
# NOTE(pas-ha) no more than 4 primary
|
||||
if p_types.count('primary') > 4:
|
||||
module.fail_json("Can not create more than 4 primary partitions "
|
||||
"on a MSDOS partition table.")
|
||||
if 'extended' in p_types:
|
||||
# NOTE(pas-ha) only single extended
|
||||
if p_types.count('extended') > 1:
|
||||
module.fail_json("Can not create more than single extended "
|
||||
"partition on a MSDOS partition table.")
|
||||
allowed = ['primary', 'extended']
|
||||
if 'logical' in p_types:
|
||||
allowed.append('logical')
|
||||
|
||||
# NOTE(pas-ha) this produces list with subsequent duplicates
|
||||
# removed
|
||||
if [k for k, g in itertools.groupby(p_types)] != allowed:
|
||||
module.fail_json("Incorrect partitions order: for MSDOS, "
|
||||
"all primary, single extended, all logical")
|
||||
elif 'logical' in p_types:
|
||||
# NOTE(pas-ha) logical has sense only with extended
|
||||
module.fail_json("Logical partition w/o extended one on MSDOS "
|
||||
"partition table")
|
||||
|
||||
|
||||
# TODO(pas-ha) add more validation, e.g.
|
||||
# - add idempotency: first check the already existing partitions
|
||||
# and do not run anything unless really needed, and only what's needed
|
||||
# - if only change tags - use specific command
|
||||
# - allow fuzziness in partition sizes when alligment is 'optimal'
|
||||
# - estimate and validate available space
|
||||
# - support more units
|
||||
# - support negative units?
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
device=dict(required=True, type='str'),
|
||||
label=dict(requred=False, default=None, choices=[None,
|
||||
"gpt",
|
||||
"msdos"]),
|
||||
dry_run=dict(required=False, type='bool', default=False),
|
||||
partitions=dict(required=False, type='list')
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
device = module.params['device']
|
||||
label = module.params['label']
|
||||
partitions = module.params['partitions'] or []
|
||||
dry_run = module.params['dry_run']
|
||||
|
||||
if partitions:
|
||||
parse_partitions(module, partitions)
|
||||
if label == 'msdos':
|
||||
validate_msdos(module, partitions)
|
||||
validate_units(module, partitions)
|
||||
sizes = parse_sizes(module, partitions)
|
||||
else:
|
||||
sizes = {}
|
||||
|
||||
if module.check_mode or dry_run:
|
||||
short_dev = device.split('/')[-1]
|
||||
created_partitions = {}
|
||||
for i, p in enumerate(partitions):
|
||||
created_partitions[p['name']] = '%s%s' % (short_dev, i + 1)
|
||||
module.exit_json(changed=dry_run, created=created_partitions)
|
||||
|
||||
parted_bin = module.get_bin_path('parted', required=True)
|
||||
lsblk_bin = module.get_bin_path('lsblk', required=True)
|
||||
udevadm_bin = module.get_bin_path('udevadm', required=True)
|
||||
parted = [parted_bin, '-s', device]
|
||||
lsblk = [lsblk_bin, '-J', device]
|
||||
if label:
|
||||
module.run_command(parted + ['mklabel', label], check_rc=True)
|
||||
rc, part_output, err = module.run_command(parted + ['print'],
|
||||
check_rc=True)
|
||||
rc, lsblk_output, err = module.run_command(lsblk,
|
||||
check_rc=True)
|
||||
part_cache = parse_parted_output(part_output)
|
||||
dev_cache = parse_lsblk_json(lsblk_output)
|
||||
|
||||
created_partitions = {}
|
||||
|
||||
for partition in partitions:
|
||||
# create partition
|
||||
parted_args = create_part_args(partition, label, sizes)
|
||||
module.run_command(parted + parted_args, check_rc=True)
|
||||
rc, part_output, err = module.run_command(parted + ['print'],
|
||||
check_rc=True)
|
||||
# get created partition number
|
||||
part_current = parse_parted_output(part_output)
|
||||
part_created = part_current - part_cache
|
||||
part_cache = part_current
|
||||
# set partition flags
|
||||
parted_args = change_part_args(part_created.pop(),
|
||||
partition)
|
||||
if parted_args:
|
||||
module.run_command(parted + parted_args, check_rc=True)
|
||||
|
||||
# get created block device name
|
||||
rc, lsblk_output, err = module.run_command(lsblk, check_rc=True)
|
||||
dev_current = parse_lsblk_json(lsblk_output)
|
||||
dev_created = dev_current - dev_cache
|
||||
dev_cache = dev_current
|
||||
created_partitions[partition['name']] = dev_created.pop()
|
||||
|
||||
# NOTE(pas-ha) wait for all partitions to become available for write
|
||||
for dev_name in created_partitions.values():
|
||||
module.run_command([udevadm_bin,
|
||||
'settle',
|
||||
'--exit-if-exists=/dev/%s' % dev_name])
|
||||
|
||||
module.exit_json(changed=True, created=created_partitions)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,97 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
GIB = 1 << 30
|
||||
|
||||
EXTRA_PARAMS = set(['wwn', 'serial', 'wwn_with_extension',
|
||||
'wwn_vendor_extension'])
|
||||
|
||||
|
||||
# NOTE: ansible calculates device size as float with 2-digits precision,
|
||||
# Ironic requires size in GiB, if we will use ansible size parameter
|
||||
# a bug is possible for devices > 1 TB
|
||||
def size_gib(device_info):
|
||||
sectors = device_info.get('sectors')
|
||||
sectorsize = device_info.get('sectorsize')
|
||||
if sectors is None or sectorsize is None:
|
||||
return '0'
|
||||
|
||||
return str((int(sectors) * int(sectorsize)) // GIB)
|
||||
|
||||
|
||||
def merge_devices_info(devices, devices_wwn):
|
||||
merged_info = devices.copy()
|
||||
for device in merged_info:
|
||||
if device in devices_wwn:
|
||||
merged_info[device].update(devices_wwn[device])
|
||||
|
||||
# replace size
|
||||
merged_info[device]['size'] = size_gib(merged_info[device])
|
||||
|
||||
return merged_info
|
||||
|
||||
|
||||
def root_hint(hints, devices):
|
||||
hint = None
|
||||
name = hints.pop('name', None)
|
||||
for device in devices:
|
||||
for key in hints:
|
||||
if hints[key] != devices[device].get(key):
|
||||
break
|
||||
else:
|
||||
# If multiple hints are specified, a device must satisfy all
|
||||
# the hints
|
||||
dev_name = '/dev/' + device
|
||||
if name is None or name == dev_name:
|
||||
hint = dev_name
|
||||
break
|
||||
|
||||
return hint
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
root_device_hints=dict(required=True, type='dict'),
|
||||
ansible_devices=dict(required=True, type='dict'),
|
||||
ansible_devices_wwn=dict(required=True, type='dict')
|
||||
),
|
||||
supports_check_mode=True)
|
||||
|
||||
hints = module.params['root_device_hints']
|
||||
devices = module.params['ansible_devices']
|
||||
devices_wwn = module.params['ansible_devices_wwn']
|
||||
|
||||
if not devices_wwn:
|
||||
extra = set(hints) & EXTRA_PARAMS
|
||||
if extra:
|
||||
module.fail_json(msg='Extra hints (supported by additional ansible'
|
||||
' module) are set but this information can not be'
|
||||
' collected. Extra hints: %s' % ', '.join(extra))
|
||||
|
||||
devices_info = merge_devices_info(devices, devices_wwn or {})
|
||||
hint = root_hint(hints, devices_info)
|
||||
|
||||
if hint is None:
|
||||
module.fail_json(msg='Root device hints are set, but none of the '
|
||||
'devices satisfy them. Collected devices info: %s'
|
||||
% devices_info)
|
||||
|
||||
ret_data = {'ansible_facts': {'ironic_root_device': hint}}
|
||||
module.exit_json(**ret_data)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,118 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import string
|
||||
|
||||
import requests
|
||||
|
||||
# adapted from IPA
|
||||
DEFAULT_CHUNK_SIZE = 1024 * 1024 # 1MB
|
||||
|
||||
|
||||
class StreamingDownloader(object):
|
||||
|
||||
def __init__(self, url, chunksize, hash_algo=None, verify=True,
|
||||
certs=None):
|
||||
if hash_algo is not None:
|
||||
self.hasher = hashlib.new(hash_algo)
|
||||
else:
|
||||
self.hasher = None
|
||||
self.chunksize = chunksize
|
||||
resp = requests.get(url, stream=True, verify=verify, certs=certs)
|
||||
if resp.status_code != 200:
|
||||
raise Exception('Invalid response code: %s' % resp.status_code)
|
||||
|
||||
self._request = resp
|
||||
|
||||
def __iter__(self):
|
||||
for chunk in self._request.iter_content(chunk_size=self.chunksize):
|
||||
if self.hasher is not None:
|
||||
self.hasher.update(chunk)
|
||||
yield chunk
|
||||
|
||||
def checksum(self):
|
||||
if self.hasher is not None:
|
||||
return self.hasher.hexdigest()
|
||||
|
||||
|
||||
def stream_to_dest(url, dest, chunksize, hash_algo, verify=True, certs=None):
|
||||
downloader = StreamingDownloader(url, chunksize, hash_algo,
|
||||
verify=verify, certs=certs)
|
||||
|
||||
with open(dest, 'wb+') as f:
|
||||
for chunk in downloader:
|
||||
f.write(chunk)
|
||||
|
||||
return downloader.checksum()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
url=dict(required=True, type='str'),
|
||||
dest=dict(required=True, type='str'),
|
||||
checksum=dict(required=False, type='str', default=''),
|
||||
chunksize=dict(required=False, type='int',
|
||||
default=DEFAULT_CHUNK_SIZE),
|
||||
validate_certs=dict(required=False, type='bool', default=True),
|
||||
client_cert=dict(required=False, type='str', default=''),
|
||||
client_key=dict(required=False, type='str', default='')
|
||||
|
||||
))
|
||||
|
||||
url = module.params['url']
|
||||
dest = module.params['dest']
|
||||
checksum = module.params['checksum']
|
||||
chunksize = module.params['chunksize']
|
||||
validate = module.params['validate_certs']
|
||||
client_cert = module.params['client_cert']
|
||||
client_key = module.params['client_key']
|
||||
if client_cert:
|
||||
certs = (client_cert, client_key) if client_key else client_cert
|
||||
else:
|
||||
certs = None
|
||||
|
||||
if checksum == '':
|
||||
hash_algo, checksum = None, None
|
||||
else:
|
||||
try:
|
||||
hash_algo, checksum = checksum.rsplit(':', 1)
|
||||
except ValueError:
|
||||
module.fail_json(msg='The checksum parameter has to be in format '
|
||||
'"<algorithm>:<checksum>"')
|
||||
checksum = checksum.lower()
|
||||
if not all(c in string.hexdigits for c in checksum):
|
||||
module.fail_json(msg='The checksum must be valid HEX number')
|
||||
|
||||
if hash_algo not in hashlib.algorithms_available:
|
||||
module.fail_json(msg="%s checksums are not supported" % hash_algo)
|
||||
|
||||
try:
|
||||
actual_checksum = stream_to_dest(
|
||||
url, dest, chunksize, hash_algo, verify=validate, certs=certs)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
if hash_algo and actual_checksum != checksum:
|
||||
module.fail_json(msg='Invalid dest checksum')
|
||||
else:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
# NOTE(pas-ha) Ansible's module_utils.basic is licensed under BSD (2 clause)
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1 +0,0 @@
|
||||
sectors_to_wipe: 1024
|
@ -1,6 +0,0 @@
|
||||
- include: zap.yaml
|
||||
tags:
|
||||
- zap
|
||||
- include: shred.yaml
|
||||
tags:
|
||||
- shred
|
@ -1,8 +0,0 @@
|
||||
- name: clean block devices
|
||||
become: yes
|
||||
command: shred -f -z /dev/{{ item.key }}
|
||||
async: 3600
|
||||
poll: 30
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
- item.value.host
|
@ -1,24 +0,0 @@
|
||||
- name: store start and end of disk
|
||||
set_fact:
|
||||
start_sectors:
|
||||
- 0
|
||||
end_sectors:
|
||||
- "{{ (device.value.sectors | int) - sectors_to_wipe }}"
|
||||
when:
|
||||
- device.value.host
|
||||
|
||||
- name: update start and end sectors with such for partitions
|
||||
set_fact:
|
||||
start_sectors: "{{ start_sectors + [item.value.start | int ] }}"
|
||||
end_sectors: "{{ end_sectors + [ (item.value.start | int) + ( item.value.sectors | int) - sectors_to_wipe ] }}"
|
||||
with_dict: "{{ device.value.partitions }}"
|
||||
when:
|
||||
- device.value.host
|
||||
|
||||
- name: wipe starts and ends of disks and partitions
|
||||
command: dd if=/dev/zero of=/dev/{{ device.key }} ibs={{ device.value.sectorsize }} obs={{ device.value.sectorsize }} count={{ sectors_to_wipe }} seek={{ item }}
|
||||
with_flattened:
|
||||
- "{{ start_sectors | map('int') | list | sort (reverse=True) }}"
|
||||
- "{{ end_sectors | map('int') | list | sort (reverse=True) }}"
|
||||
when:
|
||||
- device.value.host
|
@ -1,16 +0,0 @@
|
||||
# NOTE(pas-ha) this is to ensure that partition metadata that might be stored
|
||||
# in the start or end of partiton itself also becomes unusable
|
||||
# and does not interfere with future partition scheme if new partitions
|
||||
# happen to fall on the same boundaries where old partitions were.
|
||||
# NOTE(pas-ha) loop_control works with Ansible >= 2.1
|
||||
- include: wipe.yaml
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
loop_control:
|
||||
loop_var: device
|
||||
|
||||
- name: wipe general partition table metadata
|
||||
become: yes
|
||||
command: sgdisk -Z /dev/{{ item.key }}
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
- item.value.host
|
@ -1 +0,0 @@
|
||||
tmp_rootfs_mount: /tmp/rootfs
|
@ -1,79 +0,0 @@
|
||||
- name: discover grub-install command
|
||||
find:
|
||||
paths:
|
||||
- "{{ tmp_rootfs_mount }}/usr/sbin"
|
||||
pattern: "grub*-install"
|
||||
register: grub_install_found
|
||||
|
||||
- name: discover grub-mkconfig command
|
||||
find:
|
||||
paths:
|
||||
- "{{ tmp_rootfs_mount }}/usr/sbin"
|
||||
pattern: "grub*-mkconfig"
|
||||
register: grub_config_found
|
||||
|
||||
- name: find grub config file
|
||||
find:
|
||||
paths:
|
||||
- "{{ tmp_rootfs_mount }}/boot"
|
||||
pattern: "grub*.cfg"
|
||||
recurse: yes
|
||||
register: grub_file_found
|
||||
|
||||
- name: test if all needed grub files were found
|
||||
assert:
|
||||
that:
|
||||
- "{{ grub_install_found.matched > 0 }}"
|
||||
- "{{ grub_config_found.matched > 0 }}"
|
||||
- "{{ grub_file_found.matched > 0 }}"
|
||||
|
||||
- name: set paths to grub commands
|
||||
set_fact:
|
||||
grub_install_cmd: "{{ grub_install_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
|
||||
grub_config_cmd: "{{ grub_config_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
|
||||
grub_config_file: "{{ grub_file_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
|
||||
|
||||
- name: make dirs for chroot
|
||||
become: yes
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ tmp_rootfs_mount }}/{{ item }}"
|
||||
with_items:
|
||||
- dev
|
||||
- sys
|
||||
- proc
|
||||
|
||||
- name: mount dirs for chroot
|
||||
become: yes
|
||||
command: mount -o bind /{{ item }} {{ tmp_rootfs_mount }}/{{ item }}
|
||||
with_items:
|
||||
- dev
|
||||
- sys
|
||||
- proc
|
||||
|
||||
- block:
|
||||
- name: get grub version string
|
||||
become: yes
|
||||
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} --version'
|
||||
register: grub_version_string
|
||||
- name: install grub to disk
|
||||
become: yes
|
||||
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} {{ ironic_root_device }}'
|
||||
- name: preload lvm modules for grub2
|
||||
become: yes
|
||||
lineinfile:
|
||||
dest: "{{ tmp_rootfs_mount }}/etc/default/grub"
|
||||
state: present
|
||||
line: GRUB_PRELOAD_MODULES=lvm
|
||||
when: "{{ grub_version_string.stdout.split() | last | first == '2' }}"
|
||||
- name: create grub config
|
||||
become: yes
|
||||
command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_config_cmd }} -o {{ grub_config_file }}'
|
||||
always:
|
||||
- name: unmount dirs for chroot
|
||||
become: yes
|
||||
command: umount {{ tmp_rootfs_mount }}/{{ item }}
|
||||
with_items:
|
||||
- dev
|
||||
- sys
|
||||
- proc
|
@ -1,4 +0,0 @@
|
||||
- include: mounts.yaml
|
||||
when: "{{ ironic.image.type | default('whole-disk-image') == 'partition' }}"
|
||||
- include: grub.yaml
|
||||
when: "{{ ironic.image.type | default('whole-disk-image') == 'partition' }}"
|
@ -1,8 +0,0 @@
|
||||
- name: create tmp mount point for root
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ tmp_rootfs_mount }}"
|
||||
|
||||
- name: mount user image root
|
||||
become: yes
|
||||
command: mount {{ ironic_image_target }} {{ tmp_rootfs_mount }}
|
@ -1,110 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2013 Rackspace, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# NOTE(pas-ha) this is mostly copied over from Ironic Python Agent
|
||||
# compared to the original file in IPA,
|
||||
|
||||
# TODO(pas-ha) rewrite this shell script to be a proper Ansible module
|
||||
|
||||
# This should work with almost any image that uses MBR partitioning and
|
||||
# doesn't already have 3 or more partitions -- or else you'll no longer
|
||||
# be able to create extended partitions on the disk.
|
||||
|
||||
# Takes one argument - block device
|
||||
|
||||
log() {
|
||||
echo "`basename $0`: $@"
|
||||
}
|
||||
|
||||
fail() {
|
||||
log "Error: $@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
MAX_DISK_PARTITIONS=128
|
||||
MAX_MBR_SIZE_MB=2097152
|
||||
|
||||
DEVICE="$1"
|
||||
|
||||
[ -b $DEVICE ] || fail "(DEVICE) $DEVICE is not a block device"
|
||||
|
||||
# We need to run partx -u to ensure all partitions are visible so the
|
||||
# following blkid command returns partitions just imaged to the device
|
||||
partx -u $DEVICE || fail "running partx -u $DEVICE"
|
||||
|
||||
# todo(jayf): partx -u doesn't work in all cases, but partprobe fails in
|
||||
# devstack. We run both commands now as a temporary workaround for bug 1433812
|
||||
# long term, this should all be refactored into python and share code with
|
||||
# the other partition-modifying code in the agent.
|
||||
partprobe $DEVICE || true
|
||||
|
||||
# Check for preexisting partition for configdrive
|
||||
EXISTING_PARTITION=`/sbin/blkid -l -o device $DEVICE -t LABEL=config-2`
|
||||
if [ -z $EXISTING_PARTITION ]; then
|
||||
# Check if it is GPT partition and needs to be re-sized
|
||||
if [ `partprobe $DEVICE print 2>&1 | grep "fix the GPT to use all of the space"` ]; then
|
||||
log "Fixing GPT to use all of the space on device $DEVICE"
|
||||
sgdisk -e $DEVICE || fail "move backup GPT data structures to the end of ${DEVICE}"
|
||||
|
||||
# Need to create new partition for config drive
|
||||
# Not all images have partion numbers in a sequential numbers. There are holes.
|
||||
# These holes get filled up when a new partition is created.
|
||||
TEMP_DIR="$(mktemp -d)"
|
||||
EXISTING_PARTITION_LIST=$TEMP_DIR/existing_partitions
|
||||
UPDATED_PARTITION_LIST=$TEMP_DIR/updated_partitions
|
||||
|
||||
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $EXISTING_PARTITION_LIST
|
||||
|
||||
# Create small partition at the end of the device
|
||||
log "Adding configdrive partition to $DEVICE"
|
||||
sgdisk -n 0:-64MB:0 $DEVICE || fail "creating configdrive on ${DEVICE}"
|
||||
|
||||
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $UPDATED_PARTITION_LIST
|
||||
|
||||
CONFIG_PARTITION_ID=`diff $EXISTING_PARTITION_LIST $UPDATED_PARTITION_LIST | tail -n1 |awk '{print $2}'`
|
||||
ISO_PARTITION="${DEVICE}${CONFIG_PARTITION_ID}"
|
||||
else
|
||||
log "Working on MBR only device $DEVICE"
|
||||
|
||||
# get total disk size, to detect if that exceeds 2TB msdos limit
|
||||
disksize_bytes=$(blockdev --getsize64 $DEVICE)
|
||||
disksize_mb=$(( ${disksize_bytes%% *} / 1024 / 1024))
|
||||
|
||||
startlimit=-64MiB
|
||||
endlimit=-0
|
||||
if [ "$disksize_mb" -gt "$MAX_MBR_SIZE_MB" ]; then
|
||||
# Create small partition at 2TB limit
|
||||
startlimit=$(($MAX_MBR_SIZE_MB - 65))
|
||||
endlimit=$(($MAX_MBR_SIZE_MB - 1))
|
||||
fi
|
||||
|
||||
log "Adding configdrive partition to $DEVICE"
|
||||
parted -a optimal -s -- $DEVICE mkpart primary fat32 $startlimit $endlimit || fail "creating configdrive on ${DEVICE}"
|
||||
|
||||
# Find partition we just created
|
||||
# Dump all partitions, ignore empty ones, then get the last partition ID
|
||||
ISO_PARTITION=`sfdisk --dump $DEVICE | grep -v ' 0,' | tail -n1 | awk -F ':' '{print $1}' | sed -e 's/\s*$//'` || fail "finding ISO partition created on ${DEVICE}"
|
||||
|
||||
# Wait for udev to pick up the partition
|
||||
udevadm settle --exit-if-exists=$ISO_PARTITION
|
||||
fi
|
||||
else
|
||||
log "Existing configdrive found on ${DEVICE} at ${EXISTING_PARTITION}"
|
||||
ISO_PARTITION=$EXISTING_PARTITION
|
||||
fi
|
||||
|
||||
# Output the created/discovered partition for configdrive
|
||||
echo "configdrive $ISO_PARTITION"
|
@ -1,44 +0,0 @@
|
||||
- name: download configdrive data
|
||||
get_url:
|
||||
url: "{{ ironic.configdrive.location }}"
|
||||
dest: /tmp/{{ inventory_hostname }}.gz.base64
|
||||
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
|
||||
async: 600
|
||||
poll: 15
|
||||
when: "{{ ironic.configdrive.type|default('') == 'url' }}"
|
||||
|
||||
- block:
|
||||
- name: copy configdrive file to node
|
||||
copy:
|
||||
src: "{{ ironic.configdrive.location }}"
|
||||
dest: /tmp/{{ inventory_hostname }}.gz.base64
|
||||
- name: remove configdrive from conductor
|
||||
delegate_to: conductor
|
||||
file:
|
||||
path: "{{ ironic.configdrive.location }}"
|
||||
state: absent
|
||||
when: "{{ ironic.configdrive.type|default('') == 'file' }}"
|
||||
|
||||
- name: unpack configdrive
|
||||
shell: cat /tmp/{{ inventory_hostname }}.gz.base64 | base64 --decode | gunzip > /tmp/{{ inventory_hostname }}.cndrive
|
||||
|
||||
- block:
|
||||
- name: prepare config drive partition
|
||||
become: yes
|
||||
script: partition_configdrive.sh {{ ironic_root_device }}
|
||||
register: configdrive_partition_output
|
||||
|
||||
- name: test the output of configdrive partitioner
|
||||
assert:
|
||||
that:
|
||||
- "{{ (configdrive_partition_output.stdout_lines | last).split() | length == 2 }}"
|
||||
- "{{ (configdrive_partition_output.stdout_lines | last).split() | first == 'configdrive' }}"
|
||||
|
||||
- name: store configdrive partition
|
||||
set_fact:
|
||||
ironic_configdrive_target: "{{ (configdrive_partition_output.stdout_lines | last).split() | last }}"
|
||||
when: "{{ ironic_configdrive_target is undefined }}"
|
||||
|
||||
- name: write configdrive
|
||||
become: yes
|
||||
command: dd if=/tmp/{{ inventory_hostname }}.cndrive of={{ ironic_configdrive_target }} bs=64K oflag=direct
|
@ -1,13 +0,0 @@
|
||||
- name: check that downloaded image will fit into memory
|
||||
assert:
|
||||
that: "{{ ansible_memfree_mb }} >= {{ ironic.image.mem_req }}"
|
||||
msg: "The image size is too big, no free memory available"
|
||||
|
||||
- name: download image with checksum validation
|
||||
get_url:
|
||||
url: "{{ ironic.image.url }}"
|
||||
dest: /tmp/{{ inventory_hostname }}.img
|
||||
checksum: "{{ ironic.image.checksum|default(omit) }}"
|
||||
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
|
||||
async: 600
|
||||
poll: 15
|
@ -1,7 +0,0 @@
|
||||
- include: download.yaml
|
||||
when: "{{ ironic.image.disk_format != 'raw' }}"
|
||||
|
||||
- include: write.yaml
|
||||
|
||||
- include: configdrive.yaml
|
||||
when: "{{ ironic.configdrive is defined }}"
|
@ -1,20 +0,0 @@
|
||||
- name: convert and write
|
||||
become: yes
|
||||
command: qemu-img convert -t directsync -O host_device /tmp/{{ inventory_hostname }}.img {{ ironic_image_target }}
|
||||
async: 400
|
||||
poll: 10
|
||||
when: "{{ ironic.image.disk_format != 'raw' }}"
|
||||
|
||||
- name: stream to target
|
||||
become: yes
|
||||
stream_url:
|
||||
url: "{{ ironic.image.url }}"
|
||||
dest: "{{ ironic_image_target }}"
|
||||
checksum: "{{ ironic.image.checksum|default(omit) }}"
|
||||
validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
|
||||
async: 600
|
||||
poll: 15
|
||||
when: "{{ ironic.image.disk_format == 'raw' }}"
|
||||
|
||||
- name: flush
|
||||
command: sync
|
@ -1,13 +0,0 @@
|
||||
- include: roothints.yaml
|
||||
when: ironic.root_device_hints is defined
|
||||
|
||||
- set_fact:
|
||||
ironic_root_device: /dev/{{ item.key }}
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
- ironic_root_device is undefined
|
||||
- item.value.host
|
||||
|
||||
- set_fact:
|
||||
ironic_image_target: "{{ ironic_root_device }}"
|
||||
when: ironic_image_target is undefined
|
@ -1,9 +0,0 @@
|
||||
- name: get devices wwn facts
|
||||
facts_wwn:
|
||||
devices: "{{ ansible_devices.keys() }}"
|
||||
|
||||
- name: calculate root hint
|
||||
root_hints:
|
||||
root_device_hints: "{{ ironic.root_device_hints }}"
|
||||
ansible_devices: "{{ ansible_devices }}"
|
||||
ansible_devices_wwn: "{{ devices_wwn | default({}) }}"
|
@ -1,2 +0,0 @@
|
||||
- include: parted.yaml
|
||||
when: "{{ ironic.image.type | default('whole-disk-image') == 'partition' }}"
|
@ -1,36 +0,0 @@
|
||||
- name: erase partition table
|
||||
become: yes
|
||||
command: dd if=/dev/zero of={{ ironic_root_device }} bs=512 count=36
|
||||
when: "{{ not ironic.partition_info.preserve_ephemeral|default('no')|bool }}"
|
||||
|
||||
- name: run parted
|
||||
become: yes
|
||||
ironic_parted:
|
||||
device: "{{ ironic_root_device }}"
|
||||
label: "{{ ironic.partition_info.label }}"
|
||||
dry_run: "{{ ironic.partition_info.preserve_ephemeral|default('no')|bool }}"
|
||||
partitions: "{{ ironic.partition_info.partitions }}"
|
||||
register: parts
|
||||
|
||||
- name: reset image target to root partition
|
||||
set_fact:
|
||||
ironic_image_target: "/dev/{{ parts.created.root }}"
|
||||
|
||||
- name: make swap
|
||||
become: yes
|
||||
command: mkswap -L swap1 /dev/{{ parts.created.swap }}
|
||||
when: "{{ parts.created.swap is defined }}"
|
||||
|
||||
- name: format ephemeral partition
|
||||
become: yes
|
||||
filesystem:
|
||||
dev: "/dev/{{ parts.created.ephemeral }}"
|
||||
fstype: "{{ ironic.partition_info.ephemeral_format }}"
|
||||
force: yes
|
||||
opts: "-L ephemeral0"
|
||||
when: "{{ parts.created.ephemeral is defined and not ironic.partition_info.preserve_ephemeral|default('no')|bool }}"
|
||||
|
||||
- name: save block device for configdrive if partition was created
|
||||
set_fact:
|
||||
ironic_configdrive_target: "/dev/{{ parts.created.configdrive }}"
|
||||
when: "{{ parts.created.configdrive is defined }}"
|
@ -1,6 +0,0 @@
|
||||
- name: soft power off
|
||||
become: yes
|
||||
shell: sleep 5 && poweroff
|
||||
async: 1
|
||||
poll: 0
|
||||
ignore_errors: true
|
@ -1,10 +0,0 @@
|
||||
- name: waiting for node
|
||||
become: false
|
||||
delegate_to: conductor
|
||||
wait_for:
|
||||
host: "{{ ansible_ssh_host }}"
|
||||
port: 22
|
||||
search_regex: OpenSSH
|
||||
delay: 10
|
||||
timeout: 400
|
||||
connect_timeout: 15
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: add-ironic-nodes.yaml
|
||||
|
||||
- hosts: ironic
|
||||
roles:
|
||||
- shutdown
|
@ -1 +0,0 @@
|
||||
ansible>=2.1,!=2.2.1.0,!=2.1.4.0
|
@ -18,7 +18,6 @@ from ironic.drivers.modules import iscsi_deploy
|
||||
from ironic.drivers.modules import pxe
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ironic_staging_drivers.ansible import deploy as ansible_deploy
|
||||
from ironic_staging_drivers.libvirt import power
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -82,15 +81,8 @@ class LibvirtHardware(generic.GenericHardware):
|
||||
"""Libvirt hardware type.
|
||||
|
||||
Uses Libvirt for power and management.
|
||||
Also support ansible-deploy.
|
||||
"""
|
||||
|
||||
@property
|
||||
def supported_deploy_interfaces(self):
|
||||
"""List of supported deploy interfaces."""
|
||||
return (super(LibvirtHardware, self).supported_deploy_interfaces +
|
||||
[ansible_deploy.AnsibleDeploy])
|
||||
|
||||
@property
|
||||
def supported_management_interfaces(self):
|
||||
"""List of supported management interfaces."""
|
||||
|
@ -1,996 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ironic.common import dhcp_factory
|
||||
from ironic.common import exception
|
||||
from ironic.common import states
|
||||
from ironic.common import utils as com_utils
|
||||
from ironic.conductor import task_manager
|
||||
from ironic.conductor import utils
|
||||
from ironic.drivers.modules import deploy_utils
|
||||
from ironic.drivers.modules import fake
|
||||
from ironic.drivers.modules import pxe
|
||||
from ironic.tests.unit.conductor import mgr_utils
|
||||
from ironic.tests.unit.db import base as db_base
|
||||
from ironic.tests.unit.objects import utils as object_utils
|
||||
from ironic_lib import utils as irlib_utils
|
||||
import mock
|
||||
from oslo_concurrency import processutils
|
||||
import six
|
||||
|
||||
from ironic_staging_drivers.ansible import deploy as ansible_deploy
|
||||
|
||||
|
||||
INSTANCE_INFO = {
|
||||
'image_source': 'fake-image',
|
||||
'image_url': 'http://image',
|
||||
'image_checksum': 'checksum',
|
||||
'image_disk_format': 'qcow2',
|
||||
'root_mb': 5120,
|
||||
'swap_mb': 0,
|
||||
'ephemeral_mb': 0
|
||||
}
|
||||
|
||||
DRIVER_INFO = {
|
||||
'deploy_kernel': 'glance://deploy_kernel_uuid',
|
||||
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
|
||||
'ansible_deploy_username': 'test',
|
||||
'ansible_deploy_key_file': '/path/key',
|
||||
}
|
||||
DRIVER_INTERNAL_INFO = {
|
||||
'ansible_cleaning_ip': '127.0.0.1',
|
||||
'is_whole_disk_image': True,
|
||||
'clean_steps': []
|
||||
}
|
||||
|
||||
|
||||
class TestAnsibleMethods(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(TestAnsibleMethods, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver='fake_ansible')
|
||||
node = {
|
||||
'driver': 'fake_ansible',
|
||||
'instance_info': INSTANCE_INFO,
|
||||
'driver_info': DRIVER_INFO,
|
||||
'driver_internal_info': DRIVER_INTERNAL_INFO,
|
||||
}
|
||||
self.node = object_utils.create_test_node(self.context, **node)
|
||||
|
||||
def test__parse_ansible_driver_info(self):
|
||||
playbook, user, key = ansible_deploy._parse_ansible_driver_info(
|
||||
self.node, 'deploy')
|
||||
self.assertEqual(ansible_deploy.DEFAULT_PLAYBOOKS['deploy'], playbook)
|
||||
self.assertEqual('test', user)
|
||||
self.assertEqual('/path/key', key)
|
||||
|
||||
def test__parse_ansible_driver_info_no_playbook(self):
|
||||
self.assertRaises(exception.IronicException,
|
||||
ansible_deploy._parse_ansible_driver_info,
|
||||
self.node, 'test')
|
||||
|
||||
@mock.patch.object(dhcp_factory.DHCPFactory, '_dhcp_provider',
|
||||
autospec=True)
|
||||
def test__get_node_ip_dhcp(self, dhcp_mock):
|
||||
dhcp_mock.get_ip_addresses.return_value = ['ip']
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
ansible_deploy._get_node_ip_dhcp(task)
|
||||
dhcp_mock.get_ip_addresses.assert_called_once_with(task)
|
||||
|
||||
@mock.patch.object(dhcp_factory.DHCPFactory, '_dhcp_provider',
|
||||
autospec=True)
|
||||
def test__get_node_ip_dhcp_no_ip(self, dhcp_mock):
|
||||
dhcp_mock.get_ip_addresses.return_value = []
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertRaises(exception.FailedToGetIPAddressOnPort,
|
||||
ansible_deploy._get_node_ip_dhcp, task)
|
||||
|
||||
@mock.patch.object(dhcp_factory.DHCPFactory, '_dhcp_provider',
|
||||
autospec=True)
|
||||
def test__get_node_ip_dhcp_multiple_ip(self, dhcp_mock):
|
||||
dhcp_mock.get_ip_addresses.return_value = ['ip1', 'ip2']
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
ansible_deploy._get_node_ip_dhcp, task)
|
||||
|
||||
def test__get_node_ip_heartbeat(self):
|
||||
di_info = self.node.driver_internal_info
|
||||
di_info['agent_url'] = 'http://1.2.3.4:5678'
|
||||
self.node.driver_internal_info = di_info
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual('1.2.3.4',
|
||||
ansible_deploy._get_node_ip_heartbeat(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_heartbeat',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_dhcp',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
def test__get_node_ip_callback(self, ip_dhcp_mock, ip_agent_mock):
|
||||
self.config(group='ansible', use_ramdisk_callback=True)
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
res = ansible_deploy._get_node_ip(task)
|
||||
self.assertEqual(0, ip_dhcp_mock.call_count)
|
||||
ip_agent_mock.assert_called_once_with(task)
|
||||
self.assertEqual('127.0.0.1', res)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_heartbeat',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_dhcp',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
def test__get_node_ip_no_callback(self, ip_dhcp_mock, ip_agent_mock):
|
||||
self.config(group='ansible', use_ramdisk_callback=False)
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
res = ansible_deploy._get_node_ip(task)
|
||||
self.assertEqual(0, ip_agent_mock.call_count)
|
||||
ip_dhcp_mock.assert_called_once_with(task)
|
||||
self.assertEqual('127.0.0.1', res)
|
||||
|
||||
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
|
||||
autospec=True)
|
||||
def test__run_playbook(self, execute_mock):
|
||||
self.config(group='ansible', playbooks_path='/path/to/playbooks')
|
||||
self.config(group='ansible', config_file_path='/path/to/config')
|
||||
self.config(group='ansible', verbosity=3)
|
||||
self.config(group='ansible', ansible_extra_args='--timeout=100')
|
||||
extra_vars = {'foo': 'bar'}
|
||||
|
||||
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key',
|
||||
tags=['spam'], notags=['ham'])
|
||||
|
||||
execute_mock.assert_called_once_with(
|
||||
'env', 'ANSIBLE_CONFIG=/path/to/config',
|
||||
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
|
||||
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
|
||||
'--tags=spam', '--skip-tags=ham',
|
||||
'--private-key=/path/to/key', '-vvv', '--timeout=100')
|
||||
|
||||
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
|
||||
autospec=True)
|
||||
def test__run_playbook_default_verbosity_nodebug(self, execute_mock):
|
||||
self.config(group='ansible', playbooks_path='/path/to/playbooks')
|
||||
self.config(group='ansible', config_file_path='/path/to/config')
|
||||
self.config(debug=False)
|
||||
extra_vars = {'foo': 'bar'}
|
||||
|
||||
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key')
|
||||
|
||||
execute_mock.assert_called_once_with(
|
||||
'env', 'ANSIBLE_CONFIG=/path/to/config',
|
||||
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
|
||||
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
|
||||
'--private-key=/path/to/key')
|
||||
|
||||
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
|
||||
autospec=True)
|
||||
def test__run_playbook_default_verbosity_debug(self, execute_mock):
|
||||
self.config(group='ansible', playbooks_path='/path/to/playbooks')
|
||||
self.config(group='ansible', config_file_path='/path/to/config')
|
||||
self.config(debug=True)
|
||||
extra_vars = {'foo': 'bar'}
|
||||
|
||||
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key')
|
||||
|
||||
execute_mock.assert_called_once_with(
|
||||
'env', 'ANSIBLE_CONFIG=/path/to/config',
|
||||
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
|
||||
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
|
||||
'--private-key=/path/to/key', '-vvvv')
|
||||
|
||||
@mock.patch.object(com_utils, 'execute',
|
||||
side_effect=processutils.ProcessExecutionError(
|
||||
description='VIKINGS!'),
|
||||
autospec=True)
|
||||
def test__run_playbook_fail(self, execute_mock):
|
||||
self.config(group='ansible', playbooks_path='/path/to/playbooks')
|
||||
self.config(group='ansible', config_file_path='/path/to/config')
|
||||
self.config(debug=False)
|
||||
extra_vars = {'foo': 'bar'}
|
||||
|
||||
exc = self.assertRaises(exception.InstanceDeployFailure,
|
||||
ansible_deploy._run_playbook,
|
||||
'deploy', extra_vars, '/path/to/key')
|
||||
self.assertIn('VIKINGS!', six.text_type(exc))
|
||||
execute_mock.assert_called_once_with(
|
||||
'env', 'ANSIBLE_CONFIG=/path/to/config',
|
||||
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
|
||||
ansible_deploy.INVENTORY_FILE, '-e', '{"ironic": {"foo": "bar"}}',
|
||||
'--private-key=/path/to/key')
|
||||
|
||||
def test__parse_partitioning_info_root_msdos(self):
|
||||
expected_info = {
|
||||
'partition_info': {
|
||||
'label': 'msdos',
|
||||
'partitions': [
|
||||
{'unit': 'MiB',
|
||||
'size': INSTANCE_INFO['root_mb'],
|
||||
'name': 'root',
|
||||
'flags': {'boot': 'yes'}}
|
||||
]}}
|
||||
|
||||
i_info = ansible_deploy._parse_partitioning_info(self.node)
|
||||
|
||||
self.assertEqual(expected_info, i_info)
|
||||
|
||||
def test__parse_partitioning_info_all_gpt(self):
|
||||
in_info = dict(INSTANCE_INFO)
|
||||
in_info['swap_mb'] = 128
|
||||
in_info['ephemeral_mb'] = 256
|
||||
in_info['ephemeral_format'] = 'ext4'
|
||||
in_info['preserve_ephemeral'] = True
|
||||
in_info['configdrive'] = 'some-fake-user-data'
|
||||
in_info['capabilities'] = {'disk_label': 'gpt'}
|
||||
self.node.instance_info = in_info
|
||||
self.node.save()
|
||||
|
||||
expected_info = {
|
||||
'partition_info': {
|
||||
'label': 'gpt',
|
||||
'ephemeral_format': 'ext4',
|
||||
'preserve_ephemeral': 'yes',
|
||||
'partitions': [
|
||||
{'unit': 'MiB',
|
||||
'size': 1,
|
||||
'name': 'bios',
|
||||
'flags': {'bios_grub': 'yes'}},
|
||||
{'unit': 'MiB',
|
||||
'size': 256,
|
||||
'name': 'ephemeral',
|
||||
'format': 'ext4'},
|
||||
{'unit': 'MiB',
|
||||
'size': 128,
|
||||
'name': 'swap',
|
||||
'format': 'linux-swap'},
|
||||
{'unit': 'MiB',
|
||||
'size': 64,
|
||||
'name': 'configdrive',
|
||||
'format': 'fat32'},
|
||||
{'unit': 'MiB',
|
||||
'size': INSTANCE_INFO['root_mb'],
|
||||
'name': 'root'}
|
||||
]}}
|
||||
|
||||
i_info = ansible_deploy._parse_partitioning_info(self.node)
|
||||
|
||||
self.assertEqual(expected_info, i_info)
|
||||
|
||||
@mock.patch.object(ansible_deploy.images, 'download_size', autospec=True)
|
||||
def test__calculate_memory_req(self, image_mock):
|
||||
self.config(group='ansible', extra_memory=1)
|
||||
image_mock.return_value = 2000000 # < 2MiB
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(2, ansible_deploy._calculate_memory_req(task))
|
||||
image_mock.assert_called_once_with(task.context, 'fake-image')
|
||||
|
||||
def test__get_configdrive_path(self):
|
||||
self.config(tempdir='/path/to/tmpdir')
|
||||
self.assertEqual('/path/to/tmpdir/spam.cndrive',
|
||||
ansible_deploy._get_configdrive_path('spam'))
|
||||
|
||||
def test__prepare_extra_vars(self):
|
||||
host_list = [('fake-uuid', '1.2.3.4', 'spam', 'ham'),
|
||||
('other-uuid', '5.6.7.8', 'eggs', 'vikings')]
|
||||
ansible_vars = {"foo": "bar"}
|
||||
self.assertEqual(
|
||||
{"nodes": [
|
||||
{"name": "fake-uuid", "ip": '1.2.3.4',
|
||||
"user": "spam", "extra": "ham"},
|
||||
{"name": "other-uuid", "ip": '5.6.7.8',
|
||||
"user": "eggs", "extra": "vikings"}],
|
||||
"foo": "bar"},
|
||||
ansible_deploy._prepare_extra_vars(host_list, ansible_vars))
|
||||
|
||||
def test__parse_root_device_hints(self):
|
||||
hints = {"wwn": "fake wwn", "size": "12345", "rotational": True}
|
||||
expected = {"wwn": "fake wwn", "size": 12345, "rotational": True}
|
||||
props = self.node.properties
|
||||
props['root_device'] = hints
|
||||
self.node.properties = props
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(
|
||||
expected, ansible_deploy._parse_root_device_hints(task.node))
|
||||
|
||||
def test__parse_root_device_hints_fail_advanced(self):
|
||||
hints = {"wwn": "s!= fake wwn",
|
||||
"size": ">= 12345",
|
||||
"name": "<or> spam <or> ham",
|
||||
"rotational": True}
|
||||
expected = {"wwn": "s!= fake%20wwn",
|
||||
"name": "<or> spam <or> ham",
|
||||
"size": ">= 12345"}
|
||||
props = self.node.properties
|
||||
props['root_device'] = hints
|
||||
self.node.properties = props
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
exc = self.assertRaises(
|
||||
exception.InvalidParameterValue,
|
||||
ansible_deploy._parse_root_device_hints, task.node)
|
||||
for key, value in expected.items():
|
||||
self.assertIn(six.text_type(key), six.text_type(exc))
|
||||
self.assertIn(six.text_type(value), six.text_type(exc))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
|
||||
return_value=2000)
|
||||
def test__prepare_variables(self, mem_req_mock):
|
||||
expected = {"image": {"url": "http://image",
|
||||
"validate_certs": "yes",
|
||||
"source": "fake-image",
|
||||
"mem_req": 2000,
|
||||
"disk_format": "qcow2",
|
||||
"checksum": "md5:checksum"}}
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(expected,
|
||||
ansible_deploy._prepare_variables(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
|
||||
return_value=2000)
|
||||
def test__prepare_variables_root_device_hints(self, mem_req_mock):
|
||||
props = self.node.properties
|
||||
props['root_device'] = {"wwn": "fake-wwn"}
|
||||
self.node.properties = props
|
||||
self.node.save()
|
||||
expected = {"image": {"url": "http://image",
|
||||
"validate_certs": "yes",
|
||||
"source": "fake-image",
|
||||
"mem_req": 2000,
|
||||
"disk_format": "qcow2",
|
||||
"checksum": "md5:checksum"},
|
||||
"root_device_hints": {"wwn": "fake-wwn"}}
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(expected,
|
||||
ansible_deploy._prepare_variables(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
|
||||
return_value=2000)
|
||||
def test__prepare_variables_noglance(self, mem_req_mock):
|
||||
self.config(image_store_insecure=True, group='ansible')
|
||||
i_info = self.node.instance_info
|
||||
i_info['image_checksum'] = 'sha256:checksum'
|
||||
self.node.instance_info = i_info
|
||||
self.node.save()
|
||||
expected = {"image": {"url": "http://image",
|
||||
"validate_certs": "no",
|
||||
"source": "fake-image",
|
||||
"mem_req": 2000,
|
||||
"disk_format": "qcow2",
|
||||
"checksum": "sha256:checksum"}}
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(expected,
|
||||
ansible_deploy._prepare_variables(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
|
||||
return_value=2000)
|
||||
def test__prepare_variables_configdrive_url(self, mem_req_mock):
|
||||
i_info = self.node.instance_info
|
||||
i_info['configdrive'] = 'http://configdrive_url'
|
||||
self.node.instance_info = i_info
|
||||
self.node.save()
|
||||
expected = {"image": {"url": "http://image",
|
||||
"validate_certs": "yes",
|
||||
"source": "fake-image",
|
||||
"mem_req": 2000,
|
||||
"disk_format": "qcow2",
|
||||
"checksum": "md5:checksum"},
|
||||
'configdrive': {'type': 'url',
|
||||
'location': 'http://configdrive_url'}}
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(expected,
|
||||
ansible_deploy._prepare_variables(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
|
||||
return_value=2000)
|
||||
def test__prepare_variables_configdrive_file(self, mem_req_mock):
|
||||
i_info = self.node.instance_info
|
||||
i_info['configdrive'] = 'fake-content'
|
||||
self.node.instance_info = i_info
|
||||
self.node.save()
|
||||
self.config(tempdir='/path/to/tmpfiles')
|
||||
expected = {"image": {"url": "http://image",
|
||||
"validate_certs": "yes",
|
||||
"source": "fake-image",
|
||||
"mem_req": 2000,
|
||||
"disk_format": "qcow2",
|
||||
"checksum": "md5:checksum"},
|
||||
'configdrive': {'type': 'file',
|
||||
'location': '/path/to/tmpfiles/%s.cndrive'
|
||||
% self.node.uuid}}
|
||||
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
|
||||
create=True) as open_mock:
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.assertEqual(expected,
|
||||
ansible_deploy._prepare_variables(task))
|
||||
open_mock.assert_has_calls((
|
||||
mock.call('/path/to/tmpfiles/%s.cndrive' % self.node.uuid,
|
||||
'w'),
|
||||
mock.call().__enter__(),
|
||||
mock.call().write('fake-content'),
|
||||
mock.call().__exit__(None, None, None)))
|
||||
|
||||
def test__validate_clean_steps(self):
|
||||
steps = [{"interface": "deploy",
|
||||
"name": "foo",
|
||||
"args": {"spam": {"required": True, "value": "ham"}}},
|
||||
{"name": "bar",
|
||||
"interface": "deploy"}]
|
||||
self.assertIsNone(ansible_deploy._validate_clean_steps(
|
||||
steps, self.node.uuid))
|
||||
|
||||
def test__validate_clean_steps_missing(self):
|
||||
steps = [{"name": "foo",
|
||||
"interface": "deploy",
|
||||
"args": {"spam": {"value": "ham"},
|
||||
"ham": {"required": True}}},
|
||||
{"name": "bar"},
|
||||
{"interface": "deploy"}]
|
||||
exc = self.assertRaises(exception.NodeCleaningFailure,
|
||||
ansible_deploy._validate_clean_steps,
|
||||
steps, self.node.uuid)
|
||||
self.assertIn("name foo, field ham.value", six.text_type(exc))
|
||||
self.assertIn("name bar, field interface", six.text_type(exc))
|
||||
self.assertIn("name undefined, field name", six.text_type(exc))
|
||||
|
||||
def test__validate_clean_steps_names_not_unique(self):
|
||||
steps = [{"name": "foo",
|
||||
"interface": "deploy"},
|
||||
{"name": "foo",
|
||||
"interface": "deploy"}]
|
||||
exc = self.assertRaises(exception.NodeCleaningFailure,
|
||||
ansible_deploy._validate_clean_steps,
|
||||
steps, self.node.uuid)
|
||||
self.assertIn("unique names", six.text_type(exc))
|
||||
|
||||
@mock.patch.object(ansible_deploy.yaml, 'safe_load', autospec=True)
|
||||
def test__get_clean_steps(self, load_mock):
|
||||
steps = [{"interface": "deploy",
|
||||
"name": "foo",
|
||||
"args": {"spam": {"required": True, "value": "ham"}}},
|
||||
{"name": "bar",
|
||||
"interface": "deploy",
|
||||
"priority": 100}]
|
||||
load_mock.return_value = steps
|
||||
expected = [{"interface": "deploy",
|
||||
"step": "foo",
|
||||
"priority": 10,
|
||||
"abortable": False,
|
||||
"argsinfo": {"spam": {"required": True}},
|
||||
"args": {"spam": "ham"}},
|
||||
{"interface": "deploy",
|
||||
"step": "bar",
|
||||
"priority": 100,
|
||||
"abortable": False,
|
||||
"argsinfo": {},
|
||||
"args": {}}]
|
||||
d_info = self.node.driver_info
|
||||
d_info['ansible_clean_steps_config'] = 'custom_clean'
|
||||
self.node.driver_info = d_info
|
||||
self.node.save()
|
||||
self.config(group='ansible', playbooks_path='/path/to/playbooks')
|
||||
|
||||
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
|
||||
create=True) as open_mock:
|
||||
self.assertEqual(
|
||||
expected,
|
||||
ansible_deploy._get_clean_steps(
|
||||
self.node, interface="deploy",
|
||||
override_priorities={"foo": 10}))
|
||||
open_mock.assert_has_calls((
|
||||
mock.call('/path/to/playbooks/custom_clean'),))
|
||||
load_mock.assert_called_once_with(
|
||||
open_mock().__enter__.return_value)
|
||||
|
||||
|
||||
class TestAnsibleDeploy(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(TestAnsibleDeploy, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver='fake_ansible')
|
||||
self.driver = ansible_deploy.AnsibleDeploy()
|
||||
node = {
|
||||
'driver': 'fake_ansible',
|
||||
'instance_info': INSTANCE_INFO,
|
||||
'driver_info': DRIVER_INFO,
|
||||
'driver_internal_info': DRIVER_INTERNAL_INFO,
|
||||
}
|
||||
self.node = object_utils.create_test_node(self.context, **node)
|
||||
|
||||
def test_get_properties(self):
|
||||
self.assertEqual(
|
||||
set(list(ansible_deploy.COMMON_PROPERTIES) +
|
||||
['deploy_forces_oob_reboot']),
|
||||
set(self.driver.get_properties()))
|
||||
|
||||
@mock.patch.object(deploy_utils, 'check_for_missing_params',
|
||||
autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
|
||||
def test_validate(self, pxe_boot_validate_mock, check_params_mock):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
self.driver.validate(task)
|
||||
pxe_boot_validate_mock.assert_called_once_with(
|
||||
task.driver.boot, task)
|
||||
check_params_mock.assert_called_once_with(
|
||||
{'instance_info.image_source': INSTANCE_INFO['image_source']},
|
||||
mock.ANY)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'get_boot_option',
|
||||
return_value='netboot', autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
|
||||
def test_validate_not_iwdi_netboot(self, pxe_boot_validate_mock,
|
||||
get_boot_mock):
|
||||
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
|
||||
driver_internal_info['is_whole_disk_image'] = False
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.driver.validate, task)
|
||||
pxe_boot_validate_mock.assert_called_once_with(
|
||||
task.driver.boot, task)
|
||||
get_boot_mock.assert_called_once_with(task.node)
|
||||
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
def test_deploy_wait(self, power_mock):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
driver_return = self.driver.deploy(task)
|
||||
self.assertEqual(driver_return, states.DEPLOYWAIT)
|
||||
power_mock.assert_called_once_with(task, states.REBOOT)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_dhcp',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
def test_deploy_no_callback(self, power_mock, get_ip_mock):
|
||||
self.config(group='ansible', use_ramdisk_callback=False)
|
||||
with mock.patch.multiple(self.driver,
|
||||
_ansible_deploy=mock.DEFAULT,
|
||||
reboot_to_instance=mock.DEFAULT) as moks:
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
driver_return = self.driver.deploy(task)
|
||||
self.assertEqual(driver_return, states.DEPLOYDONE)
|
||||
power_mock.assert_called_once_with(task, states.REBOOT)
|
||||
get_ip_mock.assert_called_once_with(task)
|
||||
moks['_ansible_deploy'].assert_called_once_with(task,
|
||||
'127.0.0.1')
|
||||
moks['reboot_to_instance'].assert_called_once_with(task)
|
||||
|
||||
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_dhcp',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
def test_deploy_no_callback_fail(self, power_mock, get_ip_mock, fail_mock):
|
||||
self.config(group='ansible', use_ramdisk_callback=False)
|
||||
with mock.patch.object(self.driver, '_ansible_deploy',
|
||||
side_effect=ansible_deploy.PlaybookNotFound(
|
||||
'deploy')):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node.uuid, shared=False) as task:
|
||||
self.driver.deploy(task)
|
||||
self.driver._ansible_deploy.assert_called_once_with(
|
||||
task, '127.0.0.1')
|
||||
fail_mock.assert_called_once_with(task, mock.ANY,
|
||||
collect_logs=False)
|
||||
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
def test_tear_down(self, power_mock):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
driver_return = self.driver.tear_down(task)
|
||||
power_mock.assert_called_once_with(task, states.POWER_OFF)
|
||||
self.assertEqual(driver_return, states.DELETED)
|
||||
|
||||
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
|
||||
return_value={'op1': 'test1'}, autospec=True)
|
||||
@mock.patch('ironic.drivers.modules.deploy_utils.'
|
||||
'build_instance_info_for_deploy',
|
||||
return_value={'test': 'test'}, autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
|
||||
def test_prepare(self, pxe_prepare_ramdisk_mock,
|
||||
build_instance_info_mock, build_options_mock):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
task.node.provision_state = states.DEPLOYING
|
||||
|
||||
self.driver.prepare(task)
|
||||
|
||||
build_instance_info_mock.assert_called_once_with(task)
|
||||
build_options_mock.assert_called_once_with(task.node)
|
||||
pxe_prepare_ramdisk_mock.assert_called_once_with(
|
||||
task, {'op1': 'test1'})
|
||||
|
||||
self.node.refresh()
|
||||
self.assertEqual('test', self.node.instance_info['test'])
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_configdrive_path',
|
||||
return_value='/path/test', autospec=True)
|
||||
@mock.patch.object(irlib_utils, 'unlink_without_raise', autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
|
||||
def test_clean_up(self, pxe_clean_up_mock, unlink_mock,
|
||||
get_cfdrive_path_mock):
|
||||
with task_manager.acquire(
|
||||
self.context, self.node['uuid'], shared=False) as task:
|
||||
self.driver.clean_up(task)
|
||||
pxe_clean_up_mock.assert_called_once_with(task)
|
||||
get_cfdrive_path_mock.assert_called_once_with(self.node['uuid'])
|
||||
unlink_mock.assert_called_once_with('/path/test')
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
|
||||
def test_get_clean_steps(self, get_clean_steps_mock):
|
||||
mock_steps = [{'priority': 10, 'interface': 'deploy',
|
||||
'step': 'erase_devices'},
|
||||
{'priority': 99, 'interface': 'deploy',
|
||||
'step': 'erase_devices_metadata'},
|
||||
]
|
||||
get_clean_steps_mock.return_value = mock_steps
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
steps = self.driver.get_clean_steps(task)
|
||||
get_clean_steps_mock.assert_called_once_with(
|
||||
task.node, interface='deploy',
|
||||
override_priorities={
|
||||
'erase_devices': None,
|
||||
'erase_devices_metadata': None})
|
||||
self.assertEqual(mock_steps, steps)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
|
||||
def test_get_clean_steps_priority(self, mock_get_clean_steps):
|
||||
self.config(erase_devices_priority=9, group='deploy')
|
||||
self.config(erase_devices_metadata_priority=98, group='deploy')
|
||||
mock_steps = [{'priority': 9, 'interface': 'deploy',
|
||||
'step': 'erase_devices'},
|
||||
{'priority': 98, 'interface': 'deploy',
|
||||
'step': 'erase_devices_metadata'},
|
||||
]
|
||||
mock_get_clean_steps.return_value = mock_steps
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
steps = self.driver.get_clean_steps(task)
|
||||
mock_get_clean_steps.assert_called_once_with(
|
||||
task.node, interface='deploy',
|
||||
override_priorities={'erase_devices': 9,
|
||||
'erase_devices_metadata': 98})
|
||||
self.assertEqual(mock_steps, steps)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
|
||||
return_value=('test_pl', 'test_u', 'test_k'),
|
||||
autospec=True)
|
||||
def test_execute_clean_step(self, parse_driver_info_mock,
|
||||
prepare_extra_mock, run_playbook_mock):
|
||||
|
||||
step = {'priority': 10, 'interface': 'deploy',
|
||||
'step': 'erase_devices', 'args': {'tags': ['clean']}}
|
||||
ironic_nodes = {
|
||||
'ironic_nodes': [(self.node['uuid'],
|
||||
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
|
||||
'test_u', {})]}
|
||||
prepare_extra_mock.return_value = ironic_nodes
|
||||
di_info = self.node.driver_internal_info
|
||||
di_info['agent_url'] = 'http://127.0.0.1'
|
||||
self.node.driver_internal_info = di_info
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.driver.execute_clean_step(task, step)
|
||||
|
||||
parse_driver_info_mock.assert_called_once_with(
|
||||
task.node, action='clean')
|
||||
prepare_extra_mock.assert_called_once_with(
|
||||
ironic_nodes['ironic_nodes'])
|
||||
run_playbook_mock.assert_called_once_with(
|
||||
'test_pl', ironic_nodes, 'test_k', tags=['clean'])
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
|
||||
return_value=('test_pl', 'test_u', 'test_k'),
|
||||
autospec=True)
|
||||
@mock.patch.object(utils, 'cleaning_error_handler', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
|
||||
def test_execute_clean_step_no_success_log(
|
||||
self, log_mock, run_mock, utils_mock, parse_driver_info_mock):
|
||||
|
||||
run_mock.side_effect = exception.InstanceDeployFailure('Boom')
|
||||
step = {'priority': 10, 'interface': 'deploy',
|
||||
'step': 'erase_devices', 'args': {'tags': ['clean']}}
|
||||
di_info = self.node.driver_internal_info
|
||||
di_info['agent_url'] = 'http://127.0.0.1'
|
||||
self.node.driver_internal_info = di_info
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.driver.execute_clean_step(task, step)
|
||||
log_mock.error.assert_called_once_with(
|
||||
mock.ANY, {'node': task.node['uuid'],
|
||||
'step': 'erase_devices'})
|
||||
utils_mock.assert_called_once_with(task, 'Boom')
|
||||
self.assertFalse(log_mock.info.called)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
|
||||
return_value={'op1': 'test1'}, autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
|
||||
def test_prepare_cleaning_callback(
|
||||
self, prepare_ramdisk_mock, buid_options_mock, power_action_mock,
|
||||
set_node_cleaning_steps, run_playbook_mock):
|
||||
step = {'priority': 10, 'interface': 'deploy',
|
||||
'step': 'erase_devices', 'tags': ['clean']}
|
||||
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
|
||||
driver_internal_info['clean_steps'] = [step]
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.network.add_cleaning_network = mock.Mock()
|
||||
|
||||
state = self.driver.prepare_cleaning(task)
|
||||
|
||||
set_node_cleaning_steps.assert_called_once_with(task)
|
||||
task.driver.network.add_cleaning_network.assert_called_once_with(
|
||||
task)
|
||||
buid_options_mock.assert_called_once_with(task.node)
|
||||
prepare_ramdisk_mock.assert_called_once_with(
|
||||
task, {'op1': 'test1'})
|
||||
power_action_mock.assert_called_once_with(task, states.REBOOT)
|
||||
self.assertFalse(run_playbook_mock.called)
|
||||
self.assertEqual(states.CLEANWAIT, state)
|
||||
|
||||
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
|
||||
def test_prepare_cleaning_callback_no_steps(self,
|
||||
set_node_cleaning_steps):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.network.add_cleaning_network = mock.Mock()
|
||||
|
||||
self.driver.prepare_cleaning(task)
|
||||
|
||||
set_node_cleaning_steps.assert_called_once_with(task)
|
||||
self.assertFalse(task.driver.network.add_cleaning_network.called)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
|
||||
return_value=('test_pl', 'test_u', 'test_k'),
|
||||
autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_dhcp',
|
||||
return_value='127.0.0.1', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
|
||||
return_value={'op1': 'test1'}, autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
|
||||
def test_prepare_cleaning(self, prepare_ramdisk_mock, buid_options_mock,
|
||||
power_action_mock, run_playbook_mock,
|
||||
get_ip_mock, parse_driver_info_mock,
|
||||
prepare_extra_mock):
|
||||
self.config(group='ansible', use_ramdisk_callback=False)
|
||||
ironic_nodes = {
|
||||
'ironic_nodes': [(self.node['uuid'],
|
||||
'127.0.0.1',
|
||||
'test_u', {})]}
|
||||
prepare_extra_mock.return_value = ironic_nodes
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.network.add_cleaning_network = mock.Mock()
|
||||
|
||||
state = self.driver.prepare_cleaning(task)
|
||||
|
||||
task.driver.network.add_cleaning_network.assert_called_once_with(
|
||||
task)
|
||||
buid_options_mock.assert_called_once_with(task.node)
|
||||
prepare_ramdisk_mock.assert_called_once_with(
|
||||
task, {'op1': 'test1'})
|
||||
power_action_mock.assert_called_once_with(task, states.REBOOT)
|
||||
get_ip_mock.assert_called_once_with(task)
|
||||
parse_driver_info_mock.assert_called_once_with(
|
||||
task.node, action='clean')
|
||||
prepare_extra_mock.assert_called_once_with(
|
||||
ironic_nodes['ironic_nodes'])
|
||||
run_playbook_mock.assert_called_once_with(
|
||||
'test_pl', ironic_nodes, 'test_k', tags=['wait'])
|
||||
self.assertIsNone(state)
|
||||
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
|
||||
def test_tear_down_cleaning(self, clean_ramdisk_mock, power_action_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
task.driver.network.remove_cleaning_network = mock.Mock()
|
||||
|
||||
self.driver.tear_down_cleaning(task)
|
||||
|
||||
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
|
||||
clean_ramdisk_mock.assert_called_once_with(task)
|
||||
(task.driver.network.remove_cleaning_network
|
||||
.assert_called_once_with(task))
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
|
||||
return_value=('test_pl', 'test_u', 'test_k'),
|
||||
autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
|
||||
autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
|
||||
def test__ansible_deploy(self, prepare_vars_mock, parse_part_info_mock,
|
||||
parse_dr_info_mock, prepare_extra_mock,
|
||||
run_playbook_mock):
|
||||
ironic_nodes = {
|
||||
'ironic_nodes': [(self.node['uuid'],
|
||||
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
|
||||
'test_u')]}
|
||||
prepare_extra_mock.return_value = ironic_nodes
|
||||
_vars = {
|
||||
'url': 'image_url',
|
||||
'checksum': 'aa'}
|
||||
prepare_vars_mock.return_value = _vars
|
||||
|
||||
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
|
||||
driver_internal_info['is_whole_disk_image'] = False
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.extra = {'ham': 'spam'}
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.driver._ansible_deploy(task, '127.0.0.1')
|
||||
|
||||
prepare_vars_mock.assert_called_once_with(task)
|
||||
parse_part_info_mock.assert_called_once_with(task.node)
|
||||
parse_dr_info_mock.assert_called_once_with(task.node)
|
||||
prepare_extra_mock.assert_called_once_with(
|
||||
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
|
||||
variables=_vars)
|
||||
run_playbook_mock.assert_called_once_with(
|
||||
'test_pl', {'ironic_nodes': [
|
||||
(self.node['uuid'],
|
||||
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
|
||||
'test_u')]}, 'test_k',
|
||||
notags=['wait'])
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
|
||||
return_value=('test_pl', 'test_u', 'test_k'),
|
||||
autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
|
||||
autospec=True)
|
||||
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
|
||||
def test__ansible_deploy_iwdi(self, prepare_vars_mock,
|
||||
parse_part_info_mock, parse_dr_info_mock,
|
||||
prepare_extra_mock, run_playbook_mock):
|
||||
ironic_nodes = {
|
||||
'ironic_nodes': [(self.node['uuid'],
|
||||
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
|
||||
'test_u')]}
|
||||
prepare_extra_mock.return_value = ironic_nodes
|
||||
_vars = {
|
||||
'url': 'image_url',
|
||||
'checksum': 'aa'}
|
||||
prepare_vars_mock.return_value = _vars
|
||||
driver_internal_info = self.node.driver_internal_info
|
||||
driver_internal_info['is_whole_disk_image'] = True
|
||||
self.node.driver_internal_info = driver_internal_info
|
||||
self.node.extra = {'ham': 'spam'}
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
self.driver._ansible_deploy(task, '127.0.0.1')
|
||||
|
||||
prepare_vars_mock.assert_called_once_with(task)
|
||||
self.assertFalse(parse_part_info_mock.called)
|
||||
parse_dr_info_mock.assert_called_once_with(task.node)
|
||||
prepare_extra_mock.assert_called_once_with(
|
||||
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
|
||||
variables=_vars)
|
||||
run_playbook_mock.assert_called_once_with(
|
||||
'test_pl', {'ironic_nodes': [
|
||||
(self.node['uuid'],
|
||||
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
|
||||
'test_u')]}, 'test_k',
|
||||
notags=['wait'])
|
||||
|
||||
@mock.patch.object(fake.FakePower, 'get_power_state',
|
||||
return_value=states.POWER_OFF)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
def test_reboot_and_finish_deploy_force_reboot(self, power_action_mock,
|
||||
get_pow_state_mock):
|
||||
d_info = self.node.driver_info
|
||||
d_info['deploy_forces_oob_reboot'] = True
|
||||
self.node.driver_info = d_info
|
||||
self.node.save()
|
||||
self.config(group='ansible',
|
||||
post_deploy_get_power_state_retry_interval=0)
|
||||
self.node.provision_state = states.DEPLOYING
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
with mock.patch.object(task.driver, 'network') as net_mock:
|
||||
self.driver.reboot_and_finish_deploy(task)
|
||||
net_mock.remove_provisioning_network.assert_called_once_with(
|
||||
task)
|
||||
net_mock.configure_tenant_networks.assert_called_once_with(
|
||||
task)
|
||||
expected_power_calls = [((task, states.POWER_OFF),),
|
||||
((task, states.POWER_ON),)]
|
||||
self.assertEqual(expected_power_calls,
|
||||
power_action_mock.call_args_list)
|
||||
get_pow_state_mock.assert_not_called()
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
|
||||
@mock.patch.object(utils, 'node_power_action', autospec=True)
|
||||
@mock.patch.object(fake.FakePower, 'get_power_state',
|
||||
return_value=states.POWER_ON)
|
||||
def test_reboot_and_finish_deploy_soft_poweroff_retry(self,
|
||||
get_pow_state_mock,
|
||||
power_action_mock,
|
||||
ansible_mock):
|
||||
self.config(group='ansible',
|
||||
post_deploy_get_power_state_retry_interval=0)
|
||||
self.config(group='ansible',
|
||||
post_deploy_get_power_state_retries=1)
|
||||
self.node.provision_state = states.DEPLOYING
|
||||
di_info = self.node.driver_internal_info
|
||||
di_info['agent_url'] = 'http://127.0.0.1'
|
||||
self.node.driver_internal_info = di_info
|
||||
self.node.save()
|
||||
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
with mock.patch.object(task.driver, 'network') as net_mock:
|
||||
self.driver.reboot_and_finish_deploy(task)
|
||||
net_mock.remove_provisioning_network.assert_called_once_with(
|
||||
task)
|
||||
net_mock.configure_tenant_networks.assert_called_once_with(
|
||||
task)
|
||||
power_action_mock.assert_has_calls(
|
||||
[mock.call(task, states.POWER_OFF),
|
||||
mock.call(task, states.POWER_ON)])
|
||||
get_pow_state_mock.assert_called_with(task)
|
||||
self.assertEqual(2, len(get_pow_state_mock.mock_calls))
|
||||
expected_power_calls = [((task, states.POWER_OFF),),
|
||||
((task, states.POWER_ON),)]
|
||||
self.assertEqual(expected_power_calls,
|
||||
power_action_mock.call_args_list)
|
||||
ansible_mock.assert_called_once_with('shutdown.yaml',
|
||||
mock.ANY, mock.ANY)
|
||||
|
||||
@mock.patch.object(ansible_deploy, '_get_node_ip_heartbeat', autospec=True,
|
||||
return_value='1.2.3.4')
|
||||
def test_continue_deploy(self, getip_mock):
|
||||
self.node.provision_state = states.DEPLOYWAIT
|
||||
self.node.target_provision_state = states.ACTIVE
|
||||
self.node.save()
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
with mock.patch.multiple(self.driver, autospec=True,
|
||||
_ansible_deploy=mock.DEFAULT,
|
||||
reboot_to_instance=mock.DEFAULT):
|
||||
self.driver.continue_deploy(task)
|
||||
getip_mock.assert_called_once_with(task)
|
||||
self.driver._ansible_deploy.assert_called_once_with(
|
||||
task, '1.2.3.4')
|
||||
self.driver.reboot_to_instance.assert_called_once_with(task)
|
||||
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
|
||||
self.assertEqual(states.DEPLOYING, task.node.provision_state)
|
||||
|
||||
@mock.patch.object(utils, 'node_set_boot_device', autospec=True)
|
||||
def test_reboot_to_instance(self, bootdev_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid) as task:
|
||||
with mock.patch.object(self.driver, 'reboot_and_finish_deploy',
|
||||
autospec=True):
|
||||
task.driver.boot = mock.Mock()
|
||||
self.driver.reboot_to_instance(task)
|
||||
bootdev_mock.assert_called_once_with(task, 'disk',
|
||||
persistent=True)
|
||||
self.driver.reboot_and_finish_deploy.assert_called_once_with(
|
||||
task)
|
||||
task.driver.boot.clean_up_ramdisk.assert_called_once_with(
|
||||
task)
|
@ -1,15 +0,0 @@
|
||||
- hosts: primary
|
||||
tasks:
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/logs/**
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
@ -1,176 +0,0 @@
|
||||
- hosts: all
|
||||
name: Autoconverted job legacy-tempest-dsvm-ironic-staging-drivers-ansible-wholedisk
|
||||
from old job gate-tempest-dsvm-ironic-staging-drivers-ansible-wholedisk-ubuntu-xenial-nv
|
||||
tasks:
|
||||
|
||||
- name: Ensure legacy workspace directory
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/workspace'
|
||||
state: directory
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat > clonemap.yaml << EOF
|
||||
clonemap:
|
||||
- name: openstack-infra/devstack-gate
|
||||
dest: devstack-gate
|
||||
EOF
|
||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
||||
git://git.openstack.org \
|
||||
openstack-infra/devstack-gate
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
cat << 'EOF' >> ironic-extra-vars
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True"
|
||||
# Standardize VM size for each supported ramdisk
|
||||
case "tinyipa" in
|
||||
'tinyipa')
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=384"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa"
|
||||
;;
|
||||
'tinyipa256')
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=256"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa"
|
||||
;;
|
||||
'coreos')
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1280"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=coreos"
|
||||
;;
|
||||
# if using a ramdisk without a known good value, use the devstack
|
||||
# default by not exporting any value for IRONIC_VM_SPECS_RAM
|
||||
esac
|
||||
|
||||
EOF
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
cat << 'EOF' >> ironic-extra-vars
|
||||
export PROJECTS="openstack/ironic-staging-drivers $PROJECTS"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic-staging-drivers git://git.openstack.org/openstack/ironic-staging-drivers"
|
||||
|
||||
EOF
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
cat << 'EOF' >> ironic-extra-vars
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_STAGING_DRIVER=pxe_ipmitool_ansible"
|
||||
|
||||
EOF
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
cat << 'EOF' >> ironic-extra-vars
|
||||
export DEVSTACK_GATE_TEMPEST_REGEX="ironic"
|
||||
|
||||
EOF
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
cat << 'EOF' >> ironic-vars-early
|
||||
# use tempest plugin
|
||||
if [[ "$ZUUL_BRANCH" != "master" ]] ; then
|
||||
# NOTE(jroll) if this is not a patch against master, then
|
||||
# fetch master to install the plugin
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' git+git://git.openstack.org/openstack/ironic'"
|
||||
else
|
||||
# on master, use the local change, so we can pick up any changes to the plugin
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/ironic'"
|
||||
fi
|
||||
export TEMPEST_CONCURRENCY=1
|
||||
|
||||
EOF
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
export PROJECTS="openstack/ironic $PROJECTS"
|
||||
export PROJECTS="openstack/ironic-lib $PROJECTS"
|
||||
export PROJECTS="openstack/ironic-python-agent $PROJECTS"
|
||||
export PROJECTS="openstack/python-ironicclient $PROJECTS"
|
||||
export PROJECTS="openstack/pyghmi $PROJECTS"
|
||||
export PROJECTS="openstack/virtualbmc $PROJECTS"
|
||||
export PYTHONUNBUFFERED=true
|
||||
export DEVSTACK_GATE_TEMPEST=1
|
||||
export DEVSTACK_GATE_IRONIC=1
|
||||
export DEVSTACK_GATE_NEUTRON=1
|
||||
export DEVSTACK_GATE_VIRT_DRIVER=ironic
|
||||
export DEVSTACK_GATE_CONFIGDRIVE=1
|
||||
export DEVSTACK_GATE_IRONIC_DRIVER=agent_ipmitool
|
||||
export BRANCH_OVERRIDE=default
|
||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
||||
fi
|
||||
|
||||
if [[ ! "stable/newton stable/ocata stable/pike" =~ $ZUUL_BRANCH ]] ; then
|
||||
export DEVSTACK_GATE_TLSPROXY=1
|
||||
fi
|
||||
|
||||
if [ "agent_ipmitool" == "pxe_snmp" ] ; then
|
||||
# explicitly enable pxe_snmp driver
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_ENABLED_DRIVERS=fake,pxe_snmp"
|
||||
fi
|
||||
|
||||
if [ "agent_ipmitool" == "redfish" ] ; then
|
||||
# When deploying with redfish we need to enable the "redfish"
|
||||
# hardware type
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_ENABLED_HARDWARE_TYPES=redfish"
|
||||
fi
|
||||
|
||||
if [ "wholedisk" == "wholedisk" ] ; then
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=True"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=0"
|
||||
else
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=False"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=1"
|
||||
fi
|
||||
|
||||
if [ -n "" ] ; then
|
||||
export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=1
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=True"
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_SUBNETPOOL=False"
|
||||
else
|
||||
export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=0
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=False"
|
||||
fi
|
||||
|
||||
if [ "bios" == "uefi" ] ; then
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BOOT_MODE=uefi"
|
||||
fi
|
||||
|
||||
export DEVSTACK_PROJECT_FROM_GIT=""
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=1"
|
||||
|
||||
# Ensure the ironic-vars-EARLY file exists
|
||||
touch ironic-vars-early
|
||||
# Pull in the EARLY variables injected by the optional builders
|
||||
source ironic-vars-early
|
||||
|
||||
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic"
|
||||
|
||||
# Ensure the ironic-EXTRA-vars file exists
|
||||
touch ironic-extra-vars
|
||||
# Pull in the EXTRA variables injected by the optional builders
|
||||
source ironic-extra-vars
|
||||
|
||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
||||
./safe-devstack-vm-gate-wrap.sh
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
@ -0,0 +1,27 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
Ansible-deploy interface and drivers using it were removed from
|
||||
ironic-staging-drivers project. The ``ansible`` deploy interface is now
|
||||
part of ironic project itself, please use the implementation defined there.
|
||||
|
||||
The ``ansible`` deploy interface must be explicitly enabled in ironic's
|
||||
configuartion file, can only be used with hardware types, with any hardware
|
||||
type defined in either ironic (except for ``oneview``) or
|
||||
ironic-staging-drivers (except for ``staging-amt``) projects.
|
||||
The correspondence matrix is
|
||||
|
||||
- ``staging-ansible`` deploy interface ⟶ ``ansible`` deploy interface
|
||||
- ``staging-ansible-ipmi`` hardware type ⟶ ``ipmi`` hardware type
|
||||
with ``ansible`` deploy interface
|
||||
- ``pxe-ipmitool-ansible`` classic driver ⟶ ``ipmi`` hardware type
|
||||
with ``ansible`` deploy interface
|
||||
- ``pxe-libvirt-ansible`` classic driver ⟶ ``staging-libvirt``
|
||||
hardware type with ``ansible`` deploy interface
|
||||
|
||||
Note that the version in ironic does not support heartbeat-less operation,
|
||||
so if you were using ansible interface with
|
||||
``[ansible]/use_ramdisk_callback = False`` option in the ironic's
|
||||
configuartion file, you must rebuild your ramdisk to include a client
|
||||
for ``lookup`` and ``heartbeat`` endponints of ironic API
|
||||
(for example ``ironic-python-agent``).
|
@ -38,12 +38,8 @@ ironic.drivers =
|
||||
fake_iboot_fake = ironic_staging_drivers.iboot:FakeIBootFakeDriver
|
||||
pxe_iboot_iscsi = ironic_staging_drivers.iboot:PXEIBootISCSIDriver
|
||||
pxe_iboot_agent = ironic_staging_drivers.iboot:PXEIBootAgentDriver
|
||||
fake_ansible = ironic_staging_drivers.ansible:FakeAnsibleDriver
|
||||
pxe_ipmitool_ansible = ironic_staging_drivers.ansible:AnsibleAndIPMIToolDriver
|
||||
pxe_libvirt_ansible = ironic_staging_drivers.ansible:AnsibleAndLibvirtDriver
|
||||
|
||||
ironic.hardware.interfaces.deploy =
|
||||
staging-ansible = ironic_staging_drivers.ansible.deploy:AnsibleDeploy
|
||||
staging-amt = ironic_staging_drivers.amt.deploy:AMTISCSIDeploy
|
||||
|
||||
ironic.hardware.interfaces.management =
|
||||
@ -63,7 +59,6 @@ ironic.hardware.interfaces.vendor =
|
||||
|
||||
ironic.hardware.types =
|
||||
staging-amt = ironic_staging_drivers.amt.drivers:AMTHardware
|
||||
staging-ansible-ipmi = ironic_staging_drivers.ansible:AnsibleDeployIPMI
|
||||
staging-iboot = ironic_staging_drivers.iboot:IBootHardware
|
||||
staging-nm = ironic_staging_drivers.intel_nm:IntelNMHardware
|
||||
staging-libvirt = ironic_staging_drivers.libvirt:LibvirtHardware
|
||||
|
@ -1,34 +0,0 @@
|
||||
- job:
|
||||
name: legacy-ironic-staging-drivers-dsvm-base
|
||||
parent: legacy-dsvm-base
|
||||
irrelevant-files:
|
||||
- ^test-requirements.txt$
|
||||
- ^setup.cfg$
|
||||
- ^doc/.*$
|
||||
- ^releasenotes/.*$
|
||||
- ^ironic-staging-drivers/tests/.*$
|
||||
required-projects:
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/ironic
|
||||
- openstack/ironic-staging-drivers
|
||||
|
||||
- job:
|
||||
name: ironic-staging-drivers-dsvm-all-drivers
|
||||
parent: legacy-ironic-staging-drivers-dsvm-base
|
||||
run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/run.yaml
|
||||
post-run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/post.yaml
|
||||
timeout: 4800
|
||||
|
||||
- job:
|
||||
name: ironic-staging-drivers-tempest-dsvm-ansible-wholedisk
|
||||
parent: legacy-ironic-staging-drivers-dsvm-base
|
||||
run: playbooks/legacy/tempest-dsvm-ironic-staging-drivers-ansible-wholedisk/run.yaml
|
||||
post-run: playbooks/legacy/tempest-dsvm-ironic-staging-drivers-ansible-wholedisk/post.yaml
|
||||
timeout: 4800
|
||||
required-projects:
|
||||
- openstack/ironic-lib
|
||||
- openstack/ironic-python-agent
|
||||
- openstack/pyghmi
|
||||
- openstack/python-ironicclient
|
||||
- openstack/tempest
|
||||
- openstack/virtualbmc
|
@ -1,10 +0,0 @@
|
||||
- project:
|
||||
name: openstack/ironic-staging-drivers
|
||||
check:
|
||||
jobs:
|
||||
- ironic-staging-drivers-dsvm-all-drivers
|
||||
- ironic-staging-drivers-tempest-dsvm-ansible-wholedisk:
|
||||
voting: false
|
||||
gate:
|
||||
jobs:
|
||||
- ironic-staging-drivers-dsvm-all-drivers
|
Loading…
Reference in New Issue
Block a user