Remove Ansible-deploy interface

it is now part of ironic itself, and version in ironic-staging-drivers
conflicts with version in ironic, blocking ironic-conductor from
starting when ironic-staging-drivers is installed.

Change-Id: I917b7399b3249143ba6cf75f61d96a1c64f94e7d
This commit is contained in:
Pavlo Shchelokovskyy 2017-12-15 19:39:56 +02:00
parent e3cb506e2e
commit 725b85484f
65 changed files with 58 additions and 4576 deletions

25
.zuul.yaml Normal file
View File

@ -0,0 +1,25 @@
- job:
name: ironic-staging-drivers-dsvm-all-drivers
parent: legacy-dsvm-base
run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/run.yaml
post-run: playbooks/legacy/ironic-staging-drivers-dsvm-all-drivers/post.yaml
timeout: 4800
irrelevant-files:
- ^test-requirements.txt$
- ^setup.cfg$
- ^doc/.*$
- ^releasenotes/.*$
- ^ironic-staging-drivers/tests/.*$
required-projects:
- openstack-infra/devstack-gate
- openstack/ironic
- openstack/ironic-staging-drivers
- project:
name: openstack/ironic-staging-drivers
check:
jobs:
- ironic-staging-drivers-dsvm-all-drivers
gate:
jobs:
- ironic-staging-drivers-dsvm-all-drivers

View File

@ -3,7 +3,9 @@
IRONIC_STAGING_DRIVERS_DIR=$DEST/ironic-staging-drivers
IRONIC_DRIVERS_EXCLUDED_DIRS='tests common'
IRONIC_STAGING_DRIVER=${IRONIC_STAGING_DRIVER:-}
# NOTE(pas-ha) change this back when there is any other then former
# ansible-deploy driver being able to set up by this devstack plugin
IRONIC_STAGING_DRIVER=""
# NOTE(pas-ha) skip iboot drivers by default as they require package not available on PyPI
IRONIC_STAGING_DRIVERS_SKIPS=${IRONIC_STAGING_DRIVERS_SKIPS:-"iboot"}
IRONIC_STAGING_DRIVERS_FILTERS=${IRONIC_STAGING_DRIVERS_FILTERS:-}
@ -15,6 +17,7 @@ if [[ -n "$IRONIC_STAGING_DRIVERS_FILTERS" ]]; then
IRONIC_STAGING_LIST_EP_CMD+=" -f $IRONIC_STAGING_DRIVERS_FILTERS"
fi
function setup_ironic_enabled_interfaces_for {
local iface=$1
@ -91,88 +94,11 @@ function install_drivers_dependencies {
}
function configure_ironic_testing_driver {
if [[ "$IRONIC_STAGING_DRIVER" =~ "ansible" && \
"$IRONIC_STAGING_DRIVER" =~ "ipmi" ]]; then
echo_summary "Configuring ansible deploy driver interface"
configure_ansible_deploy_driver
else
die $LINENO "Failed to configure ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
fi
}
function configure_ansible_deploy_driver {
# NOTE(pas-ha) DevStack now defaults to tls-proxy being enabled.
# Using custom CA bundle is not that easy with TinyCore,
# requiring extra rebuild steps and resulting in bigger image,
# so just disable validating SSL certs for now in DevStack
# similar to what ironic does for IPA by default in DevStack
iniset $IRONIC_CONF_FILE ansible image_store_insecure True
# set logging for ansible-deploy
# NOTE(pas-ha) w/o systemd or syslog, there will be no output
# of single ansible tasks to ironic log,
# only in the stdout returned by processutils
if [[ "$USE_SYSTEMD" == "True" ]]; then
iniset $IRONIC_STAGING_DRIVERS_DIR/ironic_staging_drivers/ansible/playbooks/callback_plugins/ironic_log.ini ironic use_journal "True"
elif [[ "$SYSLOG" == "True" ]]; then
iniset $IRONIC_STAGING_DRIVERS_DIR/ironic_staging_drivers/ansible/playbooks/callback_plugins/ironic_log.ini ironic use_syslog "True"
fi
die $LINENO "Failed to configure ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
}
function set_ironic_testing_driver {
if [[ "$IRONIC_STAGING_DRIVER" =~ "ansible" && \
"$IRONIC_STAGING_DRIVER" =~ "ipmi" && \
"$IRONIC_DEPLOY_DRIVER" == "agent_ipmitool" && \
"$IRONIC_RAMDISK_TYPE" == "tinyipa" ]]; then
echo_summary "Setting nodes to use 'staging-ansible-ipmi' hardware type with 'staging-ansible' deploy interface"
set_ansible_deploy_driver
else
die $LINENO "Failed to configure ironic to use ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
fi
}
function set_ansible_deploy_driver {
local tinyipa_ramdisk_name
local ansible_key_file
local ansible_ramdisk_id
# ensure the tinyipa ramdisk is present in Glance
tinyipa_ramdisk_name=$(openstack --os-cloud devstack-admin image show ${IRONIC_DEPLOY_RAMDISK_ID} -f value -c name)
if [ -z $tinyipa_ramdisk_name ]; then
die $LINENO "Failed to find ironic deploy ramdisk ${IRONIC_DEPLOY_RAMDISK_ID}"
fi
cd $IRONIC_STAGING_DRIVERS_DIR/imagebuild/tinyipa-ansible
# download original tinyipa ramdisk from Glance
openstack --os-cloud devstack-admin image save ${IRONIC_DEPLOY_RAMDISK_ID} --file ${tinyipa_ramdisk_name}
export TINYIPA_RAMDISK_FILE="${PWD}/${tinyipa_ramdisk_name}"
# generate SSH keys for deploy ramdisk and ansible driver
mkdir -p ${IRONIC_DATA_DIR}/ssh_keys
ansible_key_file="${IRONIC_DATA_DIR}/ssh_keys/ansible_key"
ssh-keygen -q -t rsa -N "" -f ${ansible_key_file}
export SSH_PUBLIC_KEY=${ansible_key_file}.pub
# rebuild ramdisk, produces ansible-${tinyipa_ramdisk_name} file
make
# upload rebuilt ramdisk to Glance
ansible_ramdisk_id=$(openstack --os-cloud devstack-admin image create "ansible-${tinyipa_ramdisk_name}" \
--file "${PWD}/ansible-${tinyipa_ramdisk_name}" \
--disk-format ari --container-format ari \
--public \
-f value -c id)
for node in $(openstack --os-cloud devstack baremetal node list -f value -c UUID); do
# switch driver to ansible-enabled hardware type, use minimal API version that supports setting driver interfaces,
# set nodes to use the uploaded ramdisk and appropriate SSH creds.
# TODO(pas-ha) remove API version when OSC defaults to 'latest'
# TODO(pas-ha) change the job definition in project-config to set the HW type
# when stable/pike is no longer supported
openstack --os-cloud devstack-admin --os-baremetal-api-version 1.31 baremetal node set $node \
--driver staging-ansible-ipmi \
--deploy-interface staging-ansible \
--driver-info deploy_ramdisk=$ansible_ramdisk_id \
--driver-info ansible_deploy_username=tc \
--driver-info ansible_deploy_key_file=$ansible_key_file
done
die $LINENO "Failed to configure ironic to use ${IRONIC_STAGING_DRIVER} driver/hw type: not supported by devstack plugin or other pre-conditions not met"
}
echo_summary "ironic-staging-drivers plugin.sh was called..."

View File

@ -12,4 +12,3 @@ Available drivers
drivers/iboot
drivers/libvirt
drivers/intel_nm
drivers/ansible

View File

@ -1,546 +0,0 @@
.. _ansible:
#####################
Ansible-deploy driver
#####################
Ansible is an already mature and popular automation tool, written in Python
and requiring no agents running on the node being configured.
All communications with the node are by default performed over secure SSH
transport.
The Ansible-deploy deployment driver is using Ansible playbooks to define the
deployment logic. It is not based on `Ironic Python Agent`_ (IPA)
and does not generally need it to be running in the deploy ramdisk.
.. note::
The "playbook API", that is the set and structure of variables passed
into playbooks from the driver, is not stable yet and will most probably
change in next versions.
Overview
========
The main advantage of this driver is extended flexibility in regards of
changing and adapting node deployment logic to the particular use case,
using the tooling already familiar to operators.
It also allows to shorten the usual feature development cycle of
* implementing logic in ironic,
* implementing logic in IPA,
* rebuilding deploy ramdisk,
* uploading it to Glance/HTTP storage,
* reassigning deploy ramdisk to nodes,
* restarting ironic service and
* runing a test deployment
by using a more "stable" deploy ramdisk and not requiring
ironic-conductor restarts (see `Extending playbooks`_).
The main disadvantage is a synchronous manner of performing
deployment/cleaning tasks, as Ansible is invoked as ``ansible-playbook``
CLI command via Python's ``subprocess`` library.
Each action (deploy, clean) is described by single playbook with roles,
which is run whole during deployment, or tag-wise during cleaning.
Control of cleaning steps is through tags and auxiliary clean steps file.
The playbooks for actions can be set per-node, as is cleaning steps
file.
Features
--------
Supports two modes for continuing deployment (configured in driver
options, see `Configuration file`_):
- having the deploy ramdisk calling back to ironic API's
``heartbeat`` endpoint (default)
- polling the node until the ssh port is open as part of a playbook
User images
~~~~~~~~~~~
Supports whole-disk images and partition images:
- compressed images are downloaded to RAM and converted to disk device;
- raw images are streamed to disk directly.
For partition images the driver will create root partition, and,
if requested, ephemeral and swap partitions as set in node's
``instance_info`` by nova or operator.
Partition table created will be of ``msdos`` type by default,
the node's``disk_label`` capability is honored if it is set in node's
``instance_info``.
Configdrive partition
~~~~~~~~~~~~~~~~~~~~~
Creating a configdrive partition is supported for both whole disk
and partition images, on both ``msdos`` and ``GPT`` labeled disks.
Root device hints
~~~~~~~~~~~~~~~~~
Root device hints are currently supported in their basic form only
(with exact matches, without oslo.utils operators).
If no root device hint is provided for the node, first device returned as
part of ``ansible_devices`` fact is used as root device to create partitions
on or write the whole disk image to.
Node cleaning
~~~~~~~~~~~~~
Cleaning is supported, both automated and manual.
Currently the driver has two default clean steps:
- wiping device metadata
- disk shredding
Their priority can be overridden via options in ironic configuration file's
``[deploy]`` section the same as for IPA-based drivers.
As in the case of this driver all cleaning steps are known to conductor,
booting the deploy ramdisk is completely skipped when
there are no cleaning steps to perform.
Aborting cleaning tasks is not supported.
Logging
~~~~~~~
Logging is implemented as custom Ansible callback module,
that makes use of ``oslo.log`` and ``oslo.config`` libraries
and can re-use logging configuration defined in the main ironic configuration
file (``/etc/ironic/ironic.conf`` by default) to set logging for Ansible
events, or use a separate file for this purpose.
.. note::
Currently this has some quirks in DevStack - due to default
logging system there the ``log_file`` must be set explicitly in
``$playbooks_path/callback_plugins/ironic_log.ini`` when running
DevStack in 'developer' mode using ``screen``.
Requirements
============
ironic
Requires ironic version >= 8.0.0. (Pike release or newer).
Ansible
Tested with and targets Ansible ≥ 2.1
Bootstrap image requirements
----------------------------
- password-less sudo permissions for the user used by Ansible
- python 2.7.x
- openssh-server
- GNU coreutils
- utils-linux
- parted
- gdisk
- qemu-utils
- python-requests (for ironic callback and streaming image download)
- python-netifaces (for ironic callback)
Set of scripts to build a suitable deploy ramdisk based on TinyCore Linux,
and an element for ``diskimage-builder`` is provided.
Setting up your environment
===========================
#. Install ironic (either as part of OpenStack/DevStack or standalone)
#. Install Ansible (``pip install ansible`` should suffice).
#. Install ``ironic-staging-drivers``
#. Edit ironic configuration file
A. Add one of the Ansible-enabled drivers to ``enabled_drivers`` option.
(see `Available drivers and options`_).
B. Add ``[ansible]`` config section and configure it if needed
(see `Configuration file`_).
#. (Re)start ironic-conductor service
#. Build suitable deploy kernel and ramdisk images
#. Upload them to Glance or put in your HTTP storage
#. Create new or update existing nodes to use the enabled driver
of your choice and populate `Driver properties for the Node`_ when
different from defaults.
#. Deploy the node as usual.
Available drivers and options
=============================
Three drivers are provided:
pxe_ipmitool_ansible
Uses PXE/iPXE to boot of nodes, and ``ipmitool`` for Power/Management.
This is the driver to use with real hardware nodes.
pxe_ssh_ansible
Uses PXE/iPXE to boot the nodes, and ironic's SSH driver for
Power/Management. Used only in testing environments.
pxe_libvirt_ansible
Alternative to ``pxe_ssh_ansible``, uses LibVirt-based driver for
Power/Management (part of ``ironic-staging-drivers``).
Can be used for bigger CI environments, where it is has better
performance than ironic's SSH driver.
Ansible-deploy options
----------------------
Configuration file
~~~~~~~~~~~~~~~~~~~
Driver options are configured in ``[ansible]`` section of ironic
configuration file.
use_ramdisk_callback
Whether to expect the callback from the deploy ramdisk when it is
ready to accept command or use passive polling for running SSH daemon
on the node as part of running playbooks.
Note that setting it to False *requires* Neutron to resolve the IP
of the node for Ansible to attempt connection to, and thus is not
suitable for standalone deployment.
Default is True.
verbosity
None, 0-4. Corresponds to number of 'v's passed to ``ansible-playbook``.
Default (None) will pass 'vvvv' when global debug is enabled in ironic,
and nothing otherwise.
ansible_playbook_script
Full path to the ``ansible-playbook`` script. Useful mostly for
testing environments when you e.g. run Ansible from source instead
of installing it.
Default (None) will search in ``$PATH`` of the user running
ironic-conductor service.
playbooks_path
Path to folder that contains all the Ansible-related files
(Ansible inventory, deployment/cleaning playbooks, roles etc).
Default is to use the playbooks provided with ``ironic-staging-drivers``
from where it is installed.
config_file_path
Path to Ansible's config file. When set to None will use global system
default (usually ``/etc/ansible/ansible.cfg``).
Default is ``playbooks_path``/ansible.cfg
ansible_extra_args
Extra arguments to pass to ``ansible-playbook`` on each invocation.
Default is None.
extra_memory
Memory overhead (in MiB) for the Ansible-related processes
in the deploy ramdisk.
Affects decision if the downloaded user image will fit into RAM
of the node.
Default is 10.
post_deploy_get_power_state_retries
Number of times to retry getting power state to check if
bare metal node has been powered off after a soft poweroff.
Default is 6.
post_deploy_get_power_state_retry_interval
Amount of time (in seconds) to wait between polling power state
after triggering soft poweroff.
Default is 5.
image_store_insecure
Boolean to disable validation of server SSL certificate of
the image store when downloading image and configdrive.
Default is False.
image_store_cafile
Path to custom PEM CA bundle to use for validation of server SSL
certificate of the image store when downloading image of configdrive.
Is not currently used by default playbooks included with the driver.
image_store_certfile
Path to client certificate file to use for client SSL authentication
to the image store when downloading image of configdrive.
Is not currently used by default playbooks included with the driver.
image_store_keyfile
Path to private key file to use for client SSL authentication
to the image store when downloading image of configdrive.
Is not currently used by default playbooks included with the driver.
Driver properties for the Node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Set them per-node via:
.. code-block:: shell
ironic node-update <node> <op> driver_info/<key>=<value>
or:
.. code-block:: shell
openstack baremetal node set <node> --driver-info <key>=<value>
ansible_deploy_username
User name to use for Ansible to access the node (default is ``ansible``).
ansible_deploy_key_file
Private SSH key used to access the node. If none is provided (default),
Ansible will use the default SSH keys configured for the user running
ironic-conductor service.
Also note, that for private keys with password, these must be pre-loaded
into ``ssh-agent``.
ansible_deploy_playbook
Name of the playbook file inside the ``playbooks_path`` folder
to use when deploying this node.
Default is ``deploy.yaml``.
ansible_shutdown_playbook
Name of the playbook file inside the ``playbooks_path`` folder
to use to gracefully shutdown the node in-band.
Default is ``shutdown.yaml``.
ansible_clean_playbook
Name of the playbook file inside the ``playbooks_path`` folder
to use when cleaning the node.
Default is ``clean.yaml``.
ansible_clean_steps_config
Name of the YAML file inside the ``playbooks_path`` folder
that holds description of cleaning steps used by this node,
and defines playbook tags in ``ansible_clean_playbook`` file
corresponding to each cleaning step.
Default is ``clean_steps.yaml``.
Customizing the deployment logic
================================
Expected playbooks directory layout
-----------------------------------
The ``playbooks_path`` configured in the ironic config is expected
to have a standard layout for an Ansible project with some additions::
<playbooks_path>
|
\_ inventory
\_ add-ironic-nodes.yaml
\_ roles
\_ role1
\_ role2
\_ ...
|
\_callback_plugins
\_ ...
|
\_ library
\_ ...
The extra files relied by this driver are:
inventory
Ansible inventory file containing a single entry of
``conductor ansible_connection=local``.
This basically defines an alias to ``localhost``.
Its purpose is to make logging for tasks performed by Ansible locally and
referencing the localhost in playbooks more intuitive.
This also suppresses warnings produced by Ansible about ``hosts`` file
being empty.
add-ironic-nodes.yaml
This file contains an Ansible play that populates in-memory Ansible
inventory with access info received from the ansible-deploy driver,
as well as some per-node variables.
Include it in all your custom playbooks as the first play.
The default ``deploy.yaml`` playbook is using several smaller roles that
correspond to particular stages of deployment process:
- ``discover`` - e.g. set root device and image target
- ``prepare`` - if needed, prepare system, for example create partitions
- ``deploy`` - download/convert/write user image and configdrive
- ``configure`` - post-deployment steps, e.g. installing the bootloader
Some more included roles are:
- ``wait`` - used when the driver is configured to not use callback from
node to start the deployment. This role waits for OpenSSH server to
become available on the node to connect to.
- ``shutdown`` - used to gracefully power the node off in-band
- ``clean`` - defines cleaning procedure, with each clean step defined
as separate playbook tag.
Extending playbooks
-------------------
Most probably you'd start experimenting like this:
#. Create a copy of ``deploy.yaml`` playbook, name it distinctively.
#. Create Ansible roles with your customized logic in ``roles`` folder.
A. In your custom deploy playbook, replace the ``prepare`` role
with your own one that defines steps to be run
*before* image download/writing.
This is a good place to set facts overriding those provided/omitted
by the driver, like ``ironic_partitions`` or ``ironic_root_device``,
and create custom partitions or (software) RAIDs.
B. In your custom deploy playbook, replace the ``configure`` role
with your own one that defines steps to be run
*after* image is written to disk.
This is a good place for example to configure the bootloader and
add kernel options to avoid additional reboots.
#. Assign the custom deploy playbook you've created to the node's
``driver_info/ansible_deploy_playbook`` field.
#. Run deployment.
A. No ironic-conductor restart is necessary.
B. A new deploy ramdisk must be built and assigned to nodes only when
you want to use a command/script/package not present in the current
deploy ramdisk and you can not or do not want
to install those at runtime.
Variables you have access to
----------------------------
This driver will pass the single JSON-ified extra var argument to
Ansible (as ``ansible-playbook -e ..``).
Those values are then accessible in your plays as well
(some of them are optional and might not be defined):
.. code-block:: yaml
ironic:
nodes:
- ip: <IPADDRESS>
name: <NODE_UUID>
user: <USER ANSIBLE WILL USE>
extra: <COPY OF NODE's EXTRA FIELD>
image:
url: <URL TO FETCH THE USER IMAGE FROM>
disk_format: <qcow2|raw|...>
container_format: <bare|...>
checksum: <hash-algo:hashstring>
mem_req: <REQUIRED FREE MEMORY TO DOWNLOAD IMAGE TO RAM>
tags: <LIST OF IMAGE TAGS AS DEFINED IN GLANCE>
properties: <DICT OF IMAGE PROPERTIES AS DEFINED IN GLANCE>
configdrive:
type: <url|file>
location: <URL OR PATH ON CONDUCTOR>
partition_info:
label: <msdos|gpt>
preserve_ephemeral: <bool>
ephemeral_format: <FILESYSTEM TO CREATE ON EPHEMERAL PARTITION>
partitions: <LIST OF PARTITIONS IN FORMAT EXPECTED BY PARTED MODULE>
Some more explanations:
``ironic.nodes``
List of dictionaries (currently of only one element) that will be used by
``add-ironic-nodes.yaml`` play to populate in-memory inventory.
It also contains a copy of node's ``extra`` field so you can access it in
the playbooks. The Ansible's host is set to node's UUID.
``ironic.image``
All fields of node's ``instance_info`` that start with ``image_`` are
passed inside this variable. Some extra notes and fields:
- ``mem_req`` is calculated from image size (if available) and config
option ``[ansible]extra_memory``.
- if ``checksum`` initially does not start with ``hash-algo:``, hashing
algorithm is assumed to be ``md5`` (default in Glance).
- ``validate_certs`` - boolean (``yes/no``) flag that turns validating
image store SSL certificate on or off (default is 'yes').
Governed by ``[ansible]image_store_insecure`` option
in ironic configuration file.
- ``cafile`` - custom CA bundle to use for validating image store
SSL certificate.
Takes value of ``[ansible]image_store_cafile`` if that is defined.
Currently is not used by default playbooks, as Ansible has no way to
specify the custom CA bundle to use for single HTTPS actions,
however you can use this value in your custom playbooks to for example
upload and register this CA in the ramdisk at deploy time.
- ``client_cert`` - cert file for client-side SSL authentication.
Takes value of ``[ansible]image_store_certfile`` option if defined.
Currently is not used by default playbooks as it is generally available
since Ansible 2.4 only,
however you can use this value in your custom playbooks.
- ``client_key`` - private key file for client-side SSL authentication.
Takes value of ``[ansible]image_store_keyfile`` option if defined.
Currently is not used by default playbooks as it is generally available
since Ansible 2.4 only,
however you can use this value in your custom playbooks.
``ironic.partiton_info.partitions``
Optional. List of dictionaries defining partitions to create on the node
in the form:
.. code-block:: yaml
partitions:
- name: <NAME OF PARTITION>
unit: <UNITS FOR SIZE>
size: <SIZE OF THE PARTITION>
type: <primary|extended|logical>
align: <ONE OF PARTED_SUPPORTED OPTIONS>
format: <PARTITION TYPE TO SET>
flags:
flag_name: <bool>
The driver will populate this list from ``root_gb``, ``swap_mb`` and
``ephemeral_gb`` fields of ``instance_info``.
The driver will also prepend the ``bios_grub``-labeled partition
when deploying on GPT-labeled disk,
and pre-create a 64MiB partiton for configdrive if it is set in
``instance_info``.
Please read the documentation included in the ``ironic_parted`` module's
source for more info on the module and its arguments.
``ironic.partiton_info.ephemeral_format``
Optional. Taken from ``instance_info``, it defines file system to be
created on the ephemeral partition.
Defaults to the value of ``[pxe]default_ephemeral_format`` option
in ironic configuration file.
``ironic.partiton_info.preserve_ephemeral``
Optional. Taken from the ``instance_info``, it specifies if the ephemeral
partition must be preserved or rebuilt. Defaults to ``no``.
As usual for Ansible playbooks, you also have access to standard
Ansible facts discovered by ``setup`` module.
Included custom Ansible modules
-------------------------------
The provided ``playbooks_path/library`` folder includes several custom
Ansible modules used by default implementation of ``deploy`` and
``prepare`` roles.
You can use these modules in your playbooks as well.
``stream_url``
Streaming download from HTTP(S) source to the disk device directly,
tries to be compatible with Ansible's ``get_url`` module in terms of
module arguments.
Due to the low level of such operation it is not idempotent.
``ironic_parted``
creates partition tables and partitions with ``parted`` utility.
Due to the low level of such operation it is not idempotent.
Please read the documentation included in the module's source
for more information about this module and its arguments.
The name is chosen so that the ``parted`` module included in Ansible 2.3
is not shadowed.
.. _Ironic Python Agent: http://docs.openstack.org/developer/ironic-python-agent

View File

@ -1,22 +0,0 @@
==============
ironic-ansible
==============
Builds a ramdisk for Ironic Ansible deploy driver.
This element is based on the following elements:
- ``devuser`` to create and configure a user for Ansible to access the node
- ``ironic-agent`` to provide Ironic API lookup and heartbeats via IPA
Consult docs for those elements for available options.
Additionally this element:
- ensures OpenSSH is installed and configured properly
- correctly sets hostname to avoid some Ansible problems with elevation
Note: compared to ``devuser`` element, this element **always** gives
the configured user password-less sudo permissions (*unconfigurable*).
Requires Ironic API >= 1.22.

View File

@ -1,3 +0,0 @@
ironic-agent
devuser
openssh-server

View File

@ -1 +0,0 @@
ironic-ansible-deploy

View File

@ -1 +0,0 @@
export DIB_DEV_USER_PWDLESS_SUDO="yes"

View File

@ -1,23 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
ANSIBLE_DEPLOY_HOSTAME="ironic-ansible-deploy"
echo $ANSIBLE_DEPLOY_HOSTAME > /etc/hostname
# not having a hostname in hosts produces an extra output
# on every "sudo" command like the following:
#
# sudo: unable to resolve host <HOSTNAME>\r\n
#
# which as of Ansible 2.0.1.0 fails JSON parsing
# in case of tasks using become+async.
# Ansible issues #13965 (fixed in 2.0.1.0), #14568, #14714
# ensure /etc/hosts has hostname in it
echo "127.0.0.1 $ANSIBLE_DEPLOY_HOSTAME" >> /etc/hosts

View File

@ -1,2 +0,0 @@
# Pin to this mirror because the roundrobin is fairly unreliable
export DIB_DISTRIBUTION_MIRROR=http://dl.fedoraproject.org/pub/fedora/linux

View File

@ -1,5 +0,0 @@
build_files/cache
rebuild/
*.gz
*.initramfs
*.vmlinuz

View File

@ -1,13 +0,0 @@
.PHONY: all dependencies rebuild clean
all: dependencies rebuild
dependencies:
./install-deps.sh
rebuild:
./rebuild-tinyipa.sh
clean:
sudo -v
sudo rm -rf rebuild
rm -f *.initramfs
rm -f *.gz
rm -rf build_files/cache/*

View File

@ -1,87 +0,0 @@
###################################################
TinyIPA image compatible with Ansible-deploy driver
###################################################
It is possible to rebuild the pre-built tinyipa ramdisk available from
http://tarballs.openstack.org/ironic-python-agent/tinyipa
to make it usable with Ansible-deploy driver.
Rebuilding TinyIPA
==================
#. Run the provided ``rebuild-tinyipa.sh`` script,
set environment variables as explained in `Build options`_.
#. Running this script will create a rebuilt ramdisk as
``ansible-<original-tinyipa-ramdisk-name>``.
That file must be uploaded to Glance as ARI image.
* If tinyipa kernel is not in Glance yet, an appropriate version can be
downloaded from tarballs.openstack.org and
uploaded to Glance as AKI image.
#. Update nodes that use ``*_ansible`` driver:
* Assign ramdisk uploaded in the previous step as
``driver_info/deploy_ramdisk``.
* The kernel image created during TinyIPA build
(``tinyipa[-branch_name].vmlinuz``) should be used as
``driver_info/deploy_kernel`` if not set yet.
* Set ``tc`` as ``driver_info/ansible_deploy_user``.
+ If you have used a custom ``SSH_PUBLIC_KEY`` specify it as
``driver_info/ansible_deploy_key_file``
* Ensure that the private SSH key file has correct ``600`` or ``400``
exclusive permissions for the user running the ironic-conductor process.
#. You can also assign the ramdisk created to other nodes that use
``IPA``-based ramdisks as ``driver_info/deploy_ramdisk`` to have a
unified deploy image for all nodes.
It should work for them the same as original tinyipa ramdisk.
Build options
-------------
#. If rebuilding an existing tinyipa ramdisk file, set the
``TINYIPA_RAMDISK_FILE`` environment variable to absolute path to
this file before running this script::
export TINYIPA_RAMDISK_FILE=</full/path/to/tinyipa-ramdisk-file>
#. When not provided with existing file, this script will rebuild the
tinyipa master branch build.
To use a stable branch, set ``BRANCH_PATH`` environment variable
(``master`` by default) before running the rebuild script accordingly.
Branch names for stable releases must be in the form ``stable-<release>``,
for example::
export BRANCH_PATH=stable-newton
Consult https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/
for currently available versions.
#. By default, the script will bake ``id_rsa`` or ``id_dsa`` public SSH keys
of the user running the build into the ramdisk as authorized_keys for
``tc`` user.
To provide a custom key, set absolute path to it as ``SSH_PUBLIC_KEY``
environment variable before running this script::
export SSH_PUBLIC_KEY=<path-to-public-ssh-key>
Using Makefile
--------------
For simplified configuration, a Makefile is provided to use ``make`` for
some standard operations.
make
will install required dependencies and run the ``rebuild-tinyipa`` script
without arguments, downloading and rebuilding the image available at
https://tarballs.openstack.org
All customizations through environment variables still apply.
make clean
will cleanup temporary files and images created during build

View File

@ -1,104 +0,0 @@
#!/bin/sh
S="Linux"
N="box"
R="4.2.9-tinycore64"
P="unknown"
V="#777 SMP (2016-02-29)"
M="x86_64"
I="unknown"
O="GNU/Linux"
OPT_A=false
OPT_S=false
OPT_N=false
OPT_R=false
OPT_P=false
OPT_V=false
OPT_M=false
OPT_I=false
OPT_O=false
if [ -z "$1" ]; then
echo "-ASNRPVMIO"
exit 1
fi
while :; do
case $1 in
-a)
OPT_A=true
shift
;;
-s)
OPT_S=true
shift
;;
-n)
OPT_N=true
shift
;;
-r)
OPT_R=true
shift
;;
-p)
OPT_P=true
shift
;;
-v)
OPT_V=true
shift
;;
-m)
OPT_M=true
shift
;;
-i)
OPT_I=true
shift
;;
-o)
OPT_O=true
shift
;;
*)
if [ ! -z "$1" ]; then
echo "uname -asnrpvmio"
exit 1
fi
break
;;
esac
done
if $OPT_A; then
echo "$S $N $R $V $M $O"
exit 0
fi
string=''
if $OPT_S; then
string="$string $S"
fi
if $OPT_N; then
string="$string $N"
fi
if $OPT_R; then
string="$string $R"
fi
if $OPT_P; then
string="$string $P"
fi
if $OPT_V; then
string="$string $V"
fi
if $OPT_M; then
string="$string $M"
fi
if $OPT_I; then
string="$string $I"
fi
if $OPT_O; then
string="$string $O"
fi
echo $string

View File

@ -1,53 +0,0 @@
#NOTE(pas-ha)
# The first URL is the official TC repo,
# the rest of the list is taken from
# http://wiki.tinycorelinux.net/wiki:mirrors
# as of time of this writing.
# Only HTTP mirrors were considered with the following ordering
# - those that were unavailable are moved to the bottom of the list
# - those that already responded with 404 are moved to the very bottom
# List generated on 12-Dec-2016
TC_MIRRORS="http://repo.tinycorelinux.net
http://distro.ibiblio.org/tinycorelinux
http://mirror.cedia.org.ec/tinycorelinux
http://mirror.epn.edu.ec/tinycorelinux
http://mirrors.163.com/tinycorelinux
http://kambing.ui.ac.id/tinycorelinux
http://ftp.nluug.nl/os/Linux/distr/tinycorelinux
http://ftp.vim.org/os/Linux/distr/tinycorelinux
http://www.gtlib.gatech.edu/pub/tinycore
http://tinycore.mirror.uber.com.au
http://l4u-00.jinr.ru/LinuxArchive/Ftp/tinycorelinux"
function probe_url {
wget -q --spider --tries 1 --timeout 10 "$1" 2>&1
}
function choose_tc_mirror {
if [ -z ${TINYCORE_MIRROR_URL} ]; then
for url in ${TC_MIRRORS}; do
echo "Checking Tiny Core Linux mirror ${url}"
if probe_url ${url} ; then
echo "Check succeeded: ${url} is responding."
TINYCORE_MIRROR_URL=${url}
break
else
echo "Check failed: ${url} is not responding"
fi
done
if [ -z ${TINYCORE_MIRROR_URL} ]; then
echo "Failed to find working Tiny Core Linux mirror"
exit 1
fi
else
echo "Probing provided Tiny Core Linux mirror ${TINYCORE_MIRROR_URL}"
if probe_url ${TINYCORE_MIRROR_URL} ; then
echo "Check succeeded: ${TINYCORE_MIRROR_URL} is responding."
else
echo "Check failed: ${TINYCORE_MIRROR_URL} is not responding"
exit 1
fi
fi
}

View File

@ -1,17 +0,0 @@
#!/bin/bash
PACKAGES="wget unzip sudo"
echo "Installing dependencies:"
if [ -x "/usr/bin/apt-get" ]; then
sudo -E apt-get update
sudo -E apt-get install -y $PACKAGES
elif [ -x "/usr/bin/dnf" ]; then
sudo -E dnf install -y $PACKAGES
elif [ -x "/usr/bin/yum" ]; then
sudo -E yum install -y $PACKAGES
else
echo "No supported package manager installed on system. Supported: apt, yum, dnf"
exit 1
fi

View File

@ -1,223 +0,0 @@
#!/bin/bash
# Rebuild upstream pre-built tinyipa it to be usable with ansible-deploy.
#
# Downloads the pre-built tinyipa ramdisk from tarballs.openstack.org or
# rebuilds a ramdisk under path provided as first script argument
# During rebuild this script installs and configures OpenSSH server and
# makes required changes for Ansible + Python to work in compiled/optimized
# Python environment.
#
# By default, id_rsa or id_dsa keys of the user performing the build
# are baked into the image as authorized_keys for 'tc' user.
# To supply different public ssh key, befor running this script set
# SSH_PUBLIC_KEY environment variable to point to absolute path to the key.
#
# This script produces "ansible-<tinyipa-ramdisk-name>" ramdisk that can serve
# as ramdisk for both ansible-deploy driver and agent-based Ironic drivers,
set -ex
WORKDIR=$(readlink -f $0 | xargs dirname)
SSH_PUBLIC_KEY=${SSH_PUBLIC_KEY:-}
source ${WORKDIR}/build_files/tc-mirror.sh
TINYCORE_MIRROR_URL=${TINYCORE_MIRROR_URL:-}
BRANCH_PATH=${BRANCH_PATH:-master}
TINYIPA_RAMDISK_FILE=${TINYIPA_RAMDISK_FILE:-}
TC=1001
STAFF=50
REBUILDDIR="$WORKDIR/rebuild"
CHROOT_PATH="/tmp/overides:/usr/local/sbin:/usr/local/bin:/apps/bin:/usr/sbin:/usr/bin:/sbin:/bin"
CHROOT_CMD="sudo chroot $REBUILDDIR /usr/bin/env -i PATH=$CHROOT_PATH http_proxy=$http_proxy https_proxy=$https_proxy no_proxy=$no_proxy"
TC_CHROOT_CMD="sudo chroot --userspec=$TC:$STAFF $REBUILDDIR /usr/bin/env -i PATH=$CHROOT_PATH http_proxy=$http_proxy https_proxy=$https_proxy no_proxy=$no_proxy"
function validate_params {
echo "Validating location of public SSH key"
if [ -n "$SSH_PUBLIC_KEY" ]; then
if [ -r "$SSH_PUBLIC_KEY" ]; then
_found_ssh_key="$SSH_PUBLIC_KEY"
fi
else
for fmt in rsa dsa; do
if [ -r "$HOME/.ssh/id_$fmt.pub" ]; then
_found_ssh_key="$HOME/.ssh/id_$fmt.pub"
break
fi
done
fi
if [ -z $_found_ssh_key ]; then
echo "Failed to find neither provided nor default SSH key"
exit 1
fi
choose_tc_mirror
}
function get_tinyipa {
if [ -z $TINYIPA_RAMDISK_FILE ]; then
mkdir -p $WORKDIR/build_files/cache
cd $WORKDIR/build_files/cache
wget -N https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa-${BRANCH_PATH}.gz
TINYIPA_RAMDISK_FILE="$WORKDIR/build_files/cache/tinyipa-${BRANCH_PATH}.gz"
fi
}
function unpack_ramdisk {
if [ -d "$REBUILDDIR" ]; then
sudo rm -rf "$REBUILDDIR"
fi
mkdir -p "$REBUILDDIR"
# Extract rootfs from .gz file
( cd "$REBUILDDIR" && zcat "$TINYIPA_RAMDISK_FILE" | sudo cpio -i -H newc -d )
}
function prepare_chroot {
sudo cp $REBUILDDIR/etc/resolv.conf $REBUILDDIR/etc/resolv.conf.old
sudo cp /etc/resolv.conf $REBUILDDIR/etc/resolv.conf
sudo cp -a $REBUILDDIR/opt/tcemirror $REBUILDDIR/opt/tcemirror.old
sudo sh -c "echo $TINYCORE_MIRROR_URL > $REBUILDDIR/opt/tcemirror"
mkdir -p $REBUILDDIR/tmp/builtin/optional
$CHROOT_CMD chown -R tc.staff /tmp/builtin
$CHROOT_CMD chmod -R a+w /tmp/builtin
$CHROOT_CMD ln -sf /tmp/builtin /etc/sysconfig/tcedir
echo "tc" | $CHROOT_CMD tee -a /etc/sysconfig/tcuser
$CHROOT_CMD mkdir -p /usr/local/tce.installed
$CHROOT_CMD chmod 777 /usr/local/tce.installed
mkdir -p $REBUILDDIR/tmp/overides
sudo cp -f $WORKDIR/build_files/fakeuname $REBUILDDIR/tmp/overides/uname
trap "sudo umount $REBUILDDIR/proc" EXIT
# Mount /proc for chroot commands
sudo mount --bind /proc "$REBUILDDIR/proc"
}
function clean_up_chroot {
# Unmount /proc and clean up everything
sudo umount $REBUILDDIR/proc
# all went well, remove the trap
trap - EXIT
sudo rm $REBUILDDIR/etc/sysconfig/tcuser
sudo rm $REBUILDDIR/etc/sysconfig/tcedir
sudo rm -rf $REBUILDDIR/usr/local/tce.installed
sudo rm -rf $REBUILDDIR/tmp/builtin
sudo rm -rf $REBUILDDIR/tmp/tcloop
sudo rm -rf $REBUILDDIR/tmp/overides
sudo mv $REBUILDDIR/opt/tcemirror.old $REBUILDDIR/opt/tcemirror
sudo mv $REBUILDDIR/etc/resolv.conf.old $REBUILDDIR/etc/resolv.conf
}
function install_ssh {
if [ ! -f "$REBUILDDIR/usr/local/etc/ssh/sshd_config" ]; then
# tinyipa was built without SSH server installed
# Install and configure bare minimum for SSH access
$TC_CHROOT_CMD tce-load -wic openssh
# Configure OpenSSH
$CHROOT_CMD cp /usr/local/etc/ssh/sshd_config.orig /usr/local/etc/ssh/sshd_config
echo "PasswordAuthentication no" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
# Generate and configure host keys - RSA, DSA, Ed25519
# NOTE(pas-ha) ECDSA host key will still be re-generated fresh on every image boot
$CHROOT_CMD ssh-keygen -q -t rsa -N "" -f /usr/local/etc/ssh/ssh_host_rsa_key
$CHROOT_CMD ssh-keygen -q -t dsa -N "" -f /usr/local/etc/ssh/ssh_host_dsa_key
$CHROOT_CMD ssh-keygen -q -t ed25519 -N "" -f /usr/local/etc/ssh/ssh_host_ed25519_key
echo "HostKey /usr/local/etc/ssh/ssh_host_rsa_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
echo "HostKey /usr/local/etc/ssh/ssh_host_dsa_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
echo "HostKey /usr/local/etc/ssh/ssh_host_ed25519_key" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
fi
# setup new user SSH keys anyway
$CHROOT_CMD mkdir -p /home/tc
$CHROOT_CMD chown -R tc.staff /home/tc
$TC_CHROOT_CMD mkdir -p /home/tc/.ssh
cat $_found_ssh_key | $TC_CHROOT_CMD tee /home/tc/.ssh/authorized_keys
$CHROOT_CMD chown tc.staff /home/tc/.ssh/authorized_keys
$TC_CHROOT_CMD chmod 600 /home/tc/.ssh/authorized_keys
}
function install_packages {
if [ -f "$WORKDIR/build_files/rebuildreqs.lst" ]; then
while read line; do
$TC_CHROOT_CMD tce-load -wic $line
done < $WORKDIR/build_files/rebuildreqs.lst
fi
}
function fix_python_optimize {
if grep -q "PYTHONOPTIMIZE=1" "$REBUILDDIR/opt/bootlocal.sh"; then
# tinyipa was built with optimized Python environment, apply fixes
echo "PYTHONOPTIMIZE=1" | $TC_CHROOT_CMD tee -a /home/tc/.ssh/environment
echo "PermitUserEnvironment yes" | $CHROOT_CMD tee -a /usr/local/etc/ssh/sshd_config
echo 'Defaults env_keep += "PYTHONOPTIMIZE"' | $CHROOT_CMD tee -a /etc/sudoers
fi
}
function make_symlinks {
set +x
echo "Symlink all from /usr/local/sbin to /usr/sbin"
cd "$REBUILDDIR/usr/local/sbin"
for target in *
do
if [ ! -f "$REBUILDDIR/usr/sbin/$target" ]
then
$CHROOT_CMD ln -s "/usr/local/sbin/$target" "/usr/sbin/$target"
fi
done
echo "Symlink all from /usr/local/bin to /usr/bin"
# this also includes symlinking Python to the place expected by Ansible
cd "$REBUILDDIR/usr/local/bin"
for target in *
do
if [ ! -f "$REBUILDDIR/usr/bin/$target" ]
then
$CHROOT_CMD ln -s "/usr/local/bin/$target" "/usr/bin/$target"
fi
done
set -x
}
function rebuild_ramdisk {
# Rebuild build directory into gz file
ansible_basename="ansible-$(basename $TINYIPA_RAMDISK_FILE)"
( cd "$REBUILDDIR" && sudo find | sudo cpio -o -H newc | gzip -9 > "$WORKDIR/${ansible_basename}" )
# Output file created by this script and its size
cd "$WORKDIR"
echo "Produced files:"
du -h "${ansible_basename}"
}
sudo -v
validate_params
get_tinyipa
unpack_ramdisk
prepare_chroot
# NOTE (pas-ha) default tinyipa is built without SSH access, enable it here
install_ssh
# NOTE (pas-ha) allow installing some extra pkgs by placing 'rebuildreqs.lst'
# file in the 'build_files' folder
install_packages
# NOTE(pas-ha) default tinyipa is built with PYOPTIMIZE_TINYIPA=true and
# for Ansible+python to work we need to ensure that PYTHONOPTIMIZE=1 is
# set for all sessions from 'tc' user including those that are escalated
# with 'sudo' afterwards
fix_python_optimize
# NOTE(pas-ha) Apparently on TinyCore Ansible's 'command' module is
# not searching for executables in the '/usr/local/(s)bin' paths.
# Thus we need to have everything from there symlinked to '/usr/(s)bin'
# which is being searched, so that 'command' module picks full utilities
# installed by 'util-linux' instead of built-in simplified BusyBox ones.
make_symlinks
clean_up_chroot
rebuild_ramdisk

View File

@ -1,74 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironic.drivers import base
from ironic.drivers import ipmi
from ironic.drivers.modules import fake
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import pxe
from oslo_log import log as logging
from ironic_staging_drivers.ansible import deploy as ansible_deploy
from ironic_staging_drivers.libvirt import power as libvirt_power
LOG = logging.getLogger(__name__)
class AnsibleAndIPMIToolDriver(base.BaseDriver):
"""Ansible + Ipmitool driver."""
def __init__(self):
LOG.warning("This driver is deprecated and will be removed "
"in the Rocky release. "
"Use 'staging-ansible-ipmi' hardware type instead.")
self.power = ipmitool.IPMIPower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = ipmitool.IPMIManagement()
self.vendor = ipmitool.VendorPassthru()
class FakeAnsibleDriver(base.BaseDriver):
"""Ansible + Fake driver"""
def __init__(self):
self.power = fake.FakePower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = fake.FakeManagement()
class AnsibleAndLibvirtDriver(base.BaseDriver):
"""Ansible + Libvirt driver.
NOTE: This driver is meant only for testing environments.
"""
def __init__(self):
LOG.warning("This driver is deprecated and will be removed "
"in the Rocky release. "
"Use 'staging-libvirt' hardware type instead.")
self.power = libvirt_power.LibvirtPower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = libvirt_power.LibvirtManagement()
# NOTE(yuriyz): This class is not a "real" hardware.
# Added to support the ansible deploy interface in 'ipmi' hardware
class AnsibleDeployIPMI(ipmi.IPMIHardware):
@property
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
return (super(AnsibleDeployIPMI, self).supported_deploy_interfaces +
[ansible_deploy.AnsibleDeploy])

View File

@ -1,776 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ansible deploy driver
"""
import json
import os
import shlex
from ironic_lib import metrics_utils
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import retrying
import six
import six.moves.urllib.parse as urlparse
import yaml
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor as agent_base
from ironic.drivers.modules import deploy_utils
ansible_opts = [
cfg.StrOpt('ansible_extra_args',
help=_('Extra arguments to pass on every '
'invocation of Ansible.')),
cfg.IntOpt('verbosity',
min=0,
max=4,
help=_('Set ansible verbosity level requested when invoking '
'"ansible-playbook" command. '
'4 includes detailed SSH session logging. '
'Default is 4 when global debug is enabled '
'and 0 otherwise.')),
cfg.StrOpt('ansible_playbook_script',
default='ansible-playbook',
help=_('Path to "ansible-playbook" script. '
'Default will search the $PATH configured for user '
'running ironic-conductor process. '
'Provide the full path when ansible-playbook is not in '
'$PATH or installed in not default location.')),
cfg.StrOpt('playbooks_path',
default=os.path.join(os.path.dirname(__file__), 'playbooks'),
help=_('Path to directory with playbooks, roles and '
'local inventory.')),
cfg.StrOpt('config_file_path',
default=os.path.join(
os.path.dirname(__file__), 'playbooks', 'ansible.cfg'),
help=_('Path to ansible configuration file. If set to empty, '
'system default will be used.')),
cfg.IntOpt('post_deploy_get_power_state_retries',
min=0,
default=6,
help=_('Number of times to retry getting power state to check '
'if bare metal node has been powered off after a soft '
'power off.')),
cfg.IntOpt('post_deploy_get_power_state_retry_interval',
min=0,
default=5,
help=_('Amount of time (in seconds) to wait between polling '
'power state after trigger soft poweroff.')),
cfg.IntOpt('extra_memory',
default=10,
help=_('Extra amount of memory in MiB expected to be consumed '
'by Ansible-related processes on the node. Affects '
'decision whether image will fit into RAM.')),
cfg.BoolOpt('use_ramdisk_callback',
default=True,
help=_('Use callback request from ramdisk for start deploy or '
'cleaning. Disable it when using custom ramdisk '
'without callback script. '
'When callback is disabled, Neutron is mandatory.')),
cfg.BoolOpt('image_store_insecure',
default=False,
help=_('Skip verifying SSL connections to the image store '
'when downloading the image. '
'Setting it to "True" is only recommended for testing '
'environments that use self-signed certificates.')),
cfg.StrOpt('image_store_cafile',
help=_('Specific CA bundle to use for validating '
'SSL connections to the image store. '
'If not specified, CA available in the ramdisk '
'will be used. '
'Is not used by default playbooks included with '
'the driver. '
'Suitable for environments that use self-signed '
'certificates.')),
cfg.StrOpt('image_store_certfile',
help=_('Client cert to use for SSL connections '
'to image store. '
'Is not used by default playbooks included with '
'the driver. '
'Can be used in custom playbooks and Ansible>=2.4.')),
cfg.StrOpt('image_store_keyfile',
help=_('Client key to use for SSL connections '
'to image store. '
'Is not used by default playbooks included with '
'the driver. '
'Can be used in custom playbooks and Ansible>=2.4.')),
]
CONF.register_opts(ansible_opts, group='ansible')
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
DEFAULT_PLAYBOOKS = {
'deploy': 'deploy.yaml',
'shutdown': 'shutdown.yaml',
'clean': 'clean.yaml'
}
DEFAULT_CLEAN_STEPS = 'clean_steps.yaml'
OPTIONAL_PROPERTIES = {
'ansible_deploy_username': _('Deploy ramdisk username for Ansible. '
'This user must have passwordless sudo '
'permissions. Default is "ansible". '
'Optional.'),
'ansible_deploy_key_file': _('Path to private key file. If not specified, '
'default keys for user running '
'ironic-conductor process will be used. '
'Note that for keys with password, those '
'must be pre-loaded into ssh-agent. '
'Optional.'),
'ansible_deploy_playbook': _('Name of the Ansible playbook used for '
'deployment. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['deploy'],
'ansible_shutdown_playbook': _('Name of the Ansible playbook used to '
'power off the node in-band. '
'Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['shutdown'],
'ansible_clean_playbook': _('Name of the Ansible playbook used for '
'cleaning. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['clean'],
'ansible_clean_steps_config': _('Name of the file with default cleaning '
'steps configuration. Default is %s. '
'Optional.'
) % DEFAULT_CLEAN_STEPS
}
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
INVENTORY_FILE = os.path.join(CONF.ansible.playbooks_path, 'inventory')
class PlaybookNotFound(exception.IronicException):
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
def _parse_ansible_driver_info(node, action='deploy'):
user = node.driver_info.get('ansible_deploy_username', 'ansible')
key = node.driver_info.get('ansible_deploy_key_file')
playbook = node.driver_info.get('ansible_%s_playbook' % action,
DEFAULT_PLAYBOOKS.get(action))
if not playbook:
raise PlaybookNotFound(action=action)
return playbook, user, key
def _get_configdrive_path(basename):
return os.path.join(CONF.tempdir, basename + '.cndrive')
def _get_node_ip_dhcp(task):
"""Get node IP from DHCP provider."""
api = dhcp_factory.DHCPFactory().provider
ip_addrs = api.get_ip_addresses(task)
if not ip_addrs:
raise exception.FailedToGetIPAddressOnPort(_(
"Failed to get IP address for any port on node %s.") %
task.node.uuid)
if len(ip_addrs) > 1:
error = _("Ansible driver does not support multiple IP addresses "
"during deploy or cleaning")
raise exception.InstanceDeployFailure(reason=error)
return ip_addrs[0]
def _get_node_ip_heartbeat(task):
callback_url = task.node.driver_internal_info.get('agent_url', '')
return urlparse.urlparse(callback_url).netloc.split(':')[0]
def _get_node_ip(task):
if CONF.ansible.use_ramdisk_callback:
return _get_node_ip_heartbeat(task)
else:
return _get_node_ip_dhcp(task)
def _prepare_extra_vars(host_list, variables=None):
nodes_var = []
for node_uuid, ip, user, extra in host_list:
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
extra_vars = dict(nodes=nodes_var)