Merge "Add Ansible-deploy driver"

This commit is contained in:
Jenkins 2016-09-27 13:06:32 +00:00 committed by Gerrit Code Review
commit 9337381ca7
32 changed files with 2369 additions and 0 deletions

View File

@ -9,3 +9,6 @@ fake_libvirt_fake
fake_amt_fake
pxe_amt_iscsi
pxe_amt_agent
pxe_ssh_ansible
pxe_libvirt_ansible
pxe_ipmitool_ansible

View File

@ -0,0 +1,67 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironic.drivers import base
from ironic.drivers.modules import fake
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import pxe
from ironic.drivers.modules import ssh
from ironic_staging_drivers.ansible import deploy as ansible_deploy
from ironic_staging_drivers.libvirt import power as libvirt_power
class AnsibleAndSSHDriver(base.BaseDriver):
"""Ansible + SSH driver.
NOTE: This driver is meant only for testing environments.
"""
def __init__(self):
self.power = ssh.SSHPower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = ssh.SSHManagement()
class AnsibleAndIPMIToolDriver(base.BaseDriver):
"""Ansible + Ipmitool driver."""
def __init__(self):
self.power = ipmitool.IPMIPower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = ipmitool.IPMIManagement()
self.vendor = ipmitool.VendorPassthru()
class FakeAnsibleDriver(base.BaseDriver):
"""Ansible + Fake driver"""
def __init__(self):
self.power = fake.FakePower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = fake.FakeManagement()
class AnsibleAndLibvirtDriver(base.BaseDriver):
"""Ansible + Libvirt driver.
NOTE: This driver is meant only for testing environments.
"""
def __init__(self):
self.power = libvirt_power.LibvirtPower()
self.boot = pxe.PXEBoot()
self.deploy = ansible_deploy.AnsibleDeploy()
self.management = libvirt_power.LibvirtManagement()

View File

@ -0,0 +1,746 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ansible deploy driver
"""
import json
import os
import shlex
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import retrying
import six
import six.moves.urllib.parse as urlparse
import yaml
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import rpcapi
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
ansible_opts = [
cfg.StrOpt('ansible_extra_args',
help=_('Extra arguments to pass on every '
'invocation of Ansible.')),
cfg.IntOpt('verbosity',
min=0,
max=4,
help=_('Set ansible verbosity level requested when invoking '
'"ansible-playbook" command. '
'4 includes detailed SSH session logging. '
'Default is 4 when global debug is enabled '
'and 0 otherwise.')),
cfg.StrOpt('ansible_playbook_script',
default='ansible-playbook',
help=_('Path to "ansible-playbook" script. '
'Default will search the $PATH configured for user '
'running ironic-conductor process. '
'Provide the full path when ansible-playbook is not in '
'$PATH or installed in not default location.')),
cfg.StrOpt('playbooks_path',
default=os.path.join(os.path.dirname(__file__), 'playbooks'),
help=_('Path to directory with playbooks, roles and '
'local inventory.')),
cfg.StrOpt('config_file_path',
default=os.path.join(
os.path.dirname(__file__), 'playbooks', 'ansible.cfg'),
help=_('Path to ansible configuration file. If set to empty, '
'system default will be used.')),
cfg.IntOpt('post_deploy_get_power_state_retries',
min=0,
default=6,
help=_('Number of times to retry getting power state to check '
'if bare metal node has been powered off after a soft '
'power off.')),
cfg.IntOpt('post_deploy_get_power_state_retry_interval',
min=0,
default=5,
help=_('Amount of time (in seconds) to wait between polling '
'power state after trigger soft poweroff.')),
cfg.IntOpt('extra_memory',
default=10,
help=_('Extra amount of memory in MiB expected to be consumed '
'by Ansible-related processes on the node. Affects '
'decision whether image will fit into RAM.')),
cfg.BoolOpt('use_ramdisk_callback',
default=True,
help=_('Use callback request from ramdisk for start deploy or '
'cleaning. Disable it when using custom ramdisk '
'without callback script. '
'When callback is disabled, Neutron is mandatory.')),
]
CONF.register_opts(ansible_opts, group='ansible')
LOG = log.getLogger(__name__)
DEFAULT_PLAYBOOKS = {
'deploy': 'deploy.yaml',
'clean': 'clean.yaml'
}
DEFAULT_CLEAN_STEPS = 'clean_steps.yaml'
OPTIONAL_PROPERTIES = {
'ansible_deploy_username': _('Deploy ramdisk username for Ansible. '
'This user must have passwordless sudo '
'permissions. Default is "ansible". '
'Optional.'),
'ansible_deploy_key_file': _('Path to private key file. If not specified, '
'default keys for user running '
'ironic-conductor process will be used. '
'Note that for keys with password, those '
'must be pre-loaded into ssh-agent. '
'Optional.'),
'ansible_deploy_playbook': _('Name of the Ansible playbook used for '
'deployment. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['deploy'],
'ansible_clean_playbook': _('Name of the Ansible playbook used for '
'cleaning. Default is %s. Optional.'
) % DEFAULT_PLAYBOOKS['clean'],
'ansible_clean_steps_config': _('Name of the file with default cleaning '
'steps configuration. Default is %s. '
'Optional.'
) % DEFAULT_CLEAN_STEPS
}
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
INVENTORY_FILE = os.path.join(CONF.ansible.playbooks_path, 'inventory')
class PlaybookNotFound(exception.IronicException):
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
def _parse_ansible_driver_info(node, action='deploy'):
user = node.driver_info.get('ansible_deploy_username', 'ansible')
key = node.driver_info.get('ansible_deploy_key_file')
playbook = node.driver_info.get('ansible_%s_playbook' % action,
DEFAULT_PLAYBOOKS.get(action))
if not playbook:
raise PlaybookNotFound(action=action)
return playbook, user, key
def _get_configdrive_path(basename):
return os.path.join(CONF.tempdir, basename + '.cndrive')
# NOTE(yuriyz): this is a copy from agent driver
def build_instance_info_for_deploy(task):
"""Build instance_info necessary for deploying to a node."""
node = task.node
instance_info = node.instance_info
image_source = instance_info['image_source']
if service_utils.is_glance_image(image_source):
glance = image_service.GlanceImageService(version=2,
context=task.context)
image_info = glance.show(image_source)
swift_temp_url = glance.swift_temp_url(image_info)
LOG.debug('Got image info: %(info)s for node %(node)s.',
{'info': image_info, 'node': node.uuid})
instance_info['image_url'] = swift_temp_url
instance_info['image_checksum'] = image_info['checksum']
instance_info['image_disk_format'] = image_info['disk_format']
else:
try:
image_service.HttpImageService().validate_href(image_source)
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Ansible deploy supports only HTTP(S) URLs as "
"instance_info['image_source']. Either %s "
"is not a valid HTTP(S) URL or "
"is not reachable."), image_source)
instance_info['image_url'] = image_source
return instance_info
def _get_node_ip(task):
api = dhcp_factory.DHCPFactory().provider
ip_addrs = api.get_ip_addresses(task)
if not ip_addrs:
raise exception.FailedToGetIPAddressOnPort(_(
"Failed to get IP address for any port on node %s.") %
task.node.uuid)
if len(ip_addrs) > 1:
error = _("Ansible driver does not support multiple IP addresses "
"during deploy or cleaning")
raise exception.InstanceDeployFailure(reason=error)
return ip_addrs[0]
# some good code from agent
def _reboot_and_finish_deploy(task):
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)
try:
_wait_until_powered_off(task)
except Exception as e:
LOG.warning(_LW('Failed to soft power off node %(node_uuid)s '
'in at least %(timeout)d seconds. Error: %(error)s'),
{'node_uuid': task.node.uuid,
'timeout': (wait * (attempts - 1)) / 1000,
'error': e})
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.node_power_action(task, states.POWER_ON)
def _prepare_extra_vars(host_list, variables=None):
nodes_var = []
for node_uuid, ip, user, extra in host_list:
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
extra_vars = dict(ironic_nodes=nodes_var)
if variables:
extra_vars.update(variables)
return extra_vars
def _run_playbook(name, extra_vars, key, tags=None, notags=None):
"""Execute ansible-playbook."""
playbook = os.path.join(CONF.ansible.playbooks_path, name)
args = [CONF.ansible.ansible_playbook_script, playbook,
'-i', INVENTORY_FILE,
'-e', json.dumps(extra_vars),
]
if CONF.ansible.config_file_path:
env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
args = env + args
if tags:
args.append('--tags=%s' % ','.join(tags))
if notags:
args.append('--skip-tags=%s' % ','.join(notags))
if key:
args.append('--private-key=%s' % key)
verbosity = CONF.ansible.verbosity
if verbosity is None and CONF.debug:
verbosity = 4
if verbosity:
args.append('-' + 'v' * verbosity)
if CONF.ansible.ansible_extra_args:
args.extend(shlex.split(CONF.ansible.ansible_extra_args))
try:
out, err = utils.execute(*args)
return out, err
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure(reason=e)
def _calculate_memory_req(task):
image_source = task.node.instance_info['image_source']
image_size = images.download_size(task.context, image_source)
return image_size // units.Mi + CONF.ansible.extra_memory
def _parse_partitioning_info(node):
info = node.instance_info
i_info = {}
i_info['root_gb'] = info.get('root_gb')
error_msg = _("'root_gb' is missing in node's instance_info")
deploy_utils.check_for_missing_params(i_info, error_msg)
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
err_msg_invalid = _("Cannot validate parameter for deploy. Invalid "
"parameter %(param)s. Reason: %(reason)s")
for param in DISK_LAYOUT_PARAMS:
try:
i_info[param] = int(i_info[param])
except ValueError:
reason = _("%s is not an integer value") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param,
'reason': reason})
# convert to sizes expected by 'parted' Ansible module
root_mib = 1024 * i_info.pop('root_gb')
swap_mib = i_info.pop('swap_mb')
ephemeral_mib = 1024 * i_info.pop('ephemeral_gb')
partitions = []
root_partition = {'name': 'root',
'size_mib': root_mib,
'boot': 'yes',
'swap': 'no'}
partitions.append(root_partition)
if swap_mib:
swap_partition = {'name': 'swap',
'size_mib': swap_mib,
'boot': 'no',
'swap': 'yes'}
partitions.append(swap_partition)
if ephemeral_mib:
ephemeral_partition = {'name': 'ephemeral',
'size_mib': ephemeral_mib,
'boot': 'no',
'swap': 'no'}
partitions.append(ephemeral_partition)
i_info['ephemeral_format'] = info.get('ephemeral_format')
if not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = (
strutils.bool_from_string(preserve_ephemeral, strict=True))
except ValueError as e:
raise exception.InvalidParameterValue(
err_msg_invalid % {'param': 'preserve_ephemeral', 'reason': e})
i_info['preserve_ephemeral'] = (
'yes' if i_info['preserve_ephemeral'] else 'no')
i_info['ironic_partitions'] = partitions
return i_info
def _prepare_variables(task):
node = task.node
i_info = node.instance_info
image = {
'url': i_info['image_url'],
'mem_req': _calculate_memory_req(task),
'disk_format': i_info.get('image_disk_format'),
}
checksum = i_info.get('image_checksum')
if checksum:
# NOTE(pas-ha) checksum can be in <algo>:<checksum> format
# as supported by various Ansible modules, mostly good for
# standalone Ironic case when instance_info is populated manually.
# With no <algo> we take that instance_info is populated from Glance,
# where API reports checksum as MD5 always.
if ':' not in checksum:
checksum = 'md5:%s' % checksum
image['checksum'] = checksum
variables = {'image': image}
configdrive = i_info.get('configdrive')
if configdrive:
if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
cfgdrv_type = 'url'
cfgdrv_location = configdrive
else:
cfgdrv_location = _get_configdrive_path(node.uuid)
with open(cfgdrv_location, 'w') as f:
f.write(configdrive)
cfgdrv_type = 'file'
variables['configdrive'] = {'type': cfgdrv_type,
'location': cfgdrv_location}
return variables
def _validate_clean_steps(steps, node_uuid):
missing = []
for step in steps:
name = step.setdefault('name', 'unnamed')
if 'interface' not in step:
missing.append({'name': name, 'field': 'interface'})
args = step.get('args', {})
for arg_name, arg in args.items():
if arg.get('required', False) and 'value' not in arg:
missing.append({'name': name,
'field': '%s.value' % arg_name})
if missing:
err_string = ', '.join(
'name %(name)s, field %(field)s' % i for i in missing)
msg = _("Malformed clean_steps file: %s") % err_string
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
def _get_clean_steps(task, interface=None, override_priorities=None):
"""Get cleaning steps."""
clean_steps_file = task.node.driver_info.get('ansible_clean_steps_config',
DEFAULT_CLEAN_STEPS)
path = os.path.join(CONF.ansible.playbooks_path, clean_steps_file)
try:
with open(path) as f:
internal_steps = yaml.safe_load(f)
except Exception as e:
msg = _('Failed to load clean steps from file '
'%(file)s: %(exc)s') % {'file': path, 'exc': e}
raise exception.NodeCleaningFailure(node=task.node.uuid, reason=msg)
_validate_clean_steps(internal_steps, task.node.uuid)
steps = []
override = override_priorities or {}
for params in internal_steps:
name = params['name']
clean_if = params['interface']
if interface is not None and interface != clean_if:
continue
new_priority = override.get(name)
priority = (new_priority if new_priority is not None else
params.get('priority', 0))
args = {}
argsinfo = params.get('args', {})
for arg, arg_info in argsinfo.items():
args[arg] = arg_info.pop('value', None)
step = {
'interface': clean_if,
'step': name,
'priority': priority,
'abortable': False,
'argsinfo': argsinfo,
'args': args
}
steps.append(step)
return steps
# taken from agent driver
def _notify_conductor_resume_clean(task):
LOG.debug('Sending RPC to conductor to resume cleaning for node %s',
task.node.uuid)
uuid = task.node.uuid
rpc = rpcapi.ConductorAPI()
topic = rpc.get_topic_for(task.node)
# Need to release the lock to let the conductor take it
task.release_resources()
rpc.continue_node_clean(task.context, uuid, topic=topic)
def _deploy(task, node_address):
"""Internal function for deployment to a node."""
notags = ['wait'] if CONF.ansible.use_ramdisk_callback else []
node = task.node
LOG.debug('IP of node %(node)s is %(ip)s',
{'node': node.uuid, 'ip': node_address})
iwdi = node.driver_internal_info.get('is_whole_disk_image')
variables = _prepare_variables(task)
if iwdi:
notags.append('parted')
else:
variables.update(_parse_partitioning_info(task.node))
playbook, user, key = _parse_ansible_driver_info(task.node)
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list, variables=variables)
LOG.debug('Starting deploy on node %s', node.uuid)
# any caller should manage exceptions raised from here
_run_playbook(playbook, extra_vars, key, notags=notags)
LOG.info(_LI('Ansible complete deploy on node %s'), node.uuid)
LOG.debug('Rebooting node %s to instance', node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
_reboot_and_finish_deploy(task)
task.driver.boot.clean_up_ramdisk(task)
class AnsibleDeploy(base.DeployInterface):
"""Interface for deploy-related actions."""
def get_properties(self):
"""Return the properties of the interface."""
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the driver-specific Node deployment info."""
task.driver.boot.validate(task)
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
raise exception.InvalidParameterValue(_(
"Node %(node)s is configured to use the %(driver)s driver "
"which does not support netboot.") % {'node': node.uuid,
'driver': node.driver})
params = {}
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Perform a deployment to a node."""
manager_utils.node_power_action(task, states.REBOOT)
if CONF.ansible.use_ramdisk_callback:
return states.DEPLOYWAIT
node = task.node
ip_addr = _get_node_ip(task)
try:
_deploy(task, ip_addr)
except Exception as e:
error = _('Deploy failed for node %(node)s: '
'Error: %(exc)s') % {'node': node.uuid,
'exc': six.text_type(e)}
LOG.exception(error)
self._set_failed_state(task, error)
else:
LOG.info(_LI('Deployment to node %s done'), node.uuid)
return states.DEPLOYDONE
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node."""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
def prepare(self, task):
"""Prepare the deployment environment for this node."""
node = task.node
# TODO(pas-ha) investigate takeover scenario
if node.provision_state == states.DEPLOYING:
# adding network-driver dependent provisioning ports
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.add_provisioning_network(task)
if node.provision_state not in [states.ACTIVE, states.ADOPTING]:
node.instance_info = build_instance_info_for_deploy(task)
node.save()
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
def clean_up(self, task):
"""Clean up the deployment environment for this node."""
task.driver.boot.clean_up_ramdisk(task)
provider = dhcp_factory.DHCPFactory()
provider.clean_dhcp(task)
irlib_utils.unlink_without_raise(
_get_configdrive_path(task.node.uuid))
def take_over(self, task):
LOG.error(_LE("Ansible deploy does not support take over. "
"You must redeploy the node %s explicitly."),
task.node.uuid)
def get_clean_steps(self, task):
"""Get the list of clean steps from the file.
:param task: a TaskManager object containing the node
:returns: A list of clean step dictionaries
"""
new_priorities = {
'erase_devices': CONF.deploy.erase_devices_priority,
'erase_devices_metadata':
CONF.deploy.erase_devices_metadata_priority
}
return _get_clean_steps(task, interface='deploy',
override_priorities=new_priorities)
def execute_clean_step(self, task, step):
"""Execute a clean step.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:returns: None
"""
node = task.node
playbook, user, key = _parse_ansible_driver_info(
task.node, action='clean')
stepname = step['step']
try:
ip_addr = node.driver_internal_info['ansible_cleaning_ip']
except KeyError:
raise exception.NodeCleaningFailure(node=node.uuid,
reason='undefined node IP '
'addresses')
node_list = [(node.uuid, ip_addr, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list)
LOG.debug('Starting cleaning step %(step)s on node %(node)s',
{'node': node.uuid, 'step': stepname})
step_tags = step['args'].get('tags', [])
try:
_run_playbook(playbook, extra_vars, key,
tags=step_tags)
except exception.InstanceDeployFailure as e:
LOG.error(_LE("Ansible failed cleaning step %(step)s "
"on node %(node)s."), {
'node': node.uuid, 'step': stepname})
manager_utils.cleaning_error_handler(task, six.text_type(e))
LOG.info(_LI('Ansible completed cleaning step %(step)s '
'on node %(node)s.'),
{'node': node.uuid, 'step': stepname})
def prepare_cleaning(self, task):
"""Boot into the ramdisk to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
:returns: None or states.CLEANWAIT for async prepare.
"""
node = task.node
use_callback = CONF.ansible.use_ramdisk_callback
if use_callback:
manager_utils.set_node_cleaning_steps(task)
if not node.driver_internal_info['clean_steps']:
# no clean steps configured, nothing to do.
return
deploy_utils.prepare_cleaning_ports(task)
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
manager_utils.node_power_action(task, states.REBOOT)
if use_callback:
return states.CLEANWAIT
ip_addr = _get_node_ip(task)
LOG.debug('IP of node %(node)s is %(ip)s',
{'node': node.uuid, 'ip': ip_addr})
driver_internal_info = node.driver_internal_info
driver_internal_info['ansible_cleaning_ip'] = ip_addr
node.driver_internal_info = driver_internal_info
node.save()
playbook, user, key = _parse_ansible_driver_info(
task.node, action='clean')
node_list = [(node.uuid, ip_addr, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list)
LOG.debug('Waiting ramdisk on node %s for cleaning', node.uuid)
_run_playbook(playbook, extra_vars, key, tags=['wait'])
LOG.info(_LI('Node %s is ready for cleaning'), node.uuid)
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the cleaning ports cannot be
removed
"""
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info.pop('ansible_cleaning_ip', None)
node.driver_internal_info = driver_internal_info
node.save()
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.boot.clean_up_ramdisk(task)
deploy_utils.tear_down_cleaning_ports(task)
# FIXME(pas-ha): remove this workaround after nearest Ironic release
# that contains the specified commit (next after 6.1.0)
# and require this Ironic release
def _upgrade_lock(self, task, purpose=None):
try:
task.upgrade_lock(purpose=purpose)
except TypeError:
LOG.warning(_LW("To have better logging please update your "
"Ironic installation to contain commit "
"2a73b50a7fb29c4e73511d2294aa19c37d96c969."))
task.upgrade_lock()
# FIXME(pas-ha): remove this workaround after nearest Ironic release
# that contains the specified commit (next after 6.1.0)
# and require this Ironic release
def _set_failed_state(self, task, error):
try:
deploy_utils.set_failed_state(task, error, collect_logs=False)
except TypeError:
LOG.warning(_LW("To have proper error handling please update "
"your Ironic installation to contain commit "
"bb62f256f7aa55c292ebeae73ca25a4a9f0ec8c0."))
deploy_utils.set_failed_state(task, error)
def heartbeat(self, task, callback_url):
"""Method for ansible ramdisk callback."""
node = task.node
address = urlparse.urlparse(callback_url).netloc.split(':')[0]
if node.maintenance:
# this shouldn't happen often, but skip the rest if it does.
LOG.debug('Heartbeat from node %(node)s in maintenance mode; '
'not taking any action.', {'node': node.uuid})
elif node.provision_state == states.DEPLOYWAIT:
LOG.debug('Heartbeat from %(node)s.', {'node': node.uuid})
self._upgrade_lock(task, purpose='deploy')
node = task.node
task.process_event('resume')
try:
_deploy(task, address)
except Exception as e:
error = _('Deploy failed for node %(node)s: '
'Error: %(exc)s') % {'node': node.uuid,
'exc': six.text_type(e)}
LOG.exception(error)
self._set_failed_state(task, error)
else:
LOG.info(_LI('Deployment to node %s done'), node.uuid)
task.process_event('done')
elif node.provision_state == states.CLEANWAIT:
LOG.debug('Node %s just booted to start cleaning.',
node.uuid)
self._upgrade_lock(task, purpose='clean')
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info['ansible_cleaning_ip'] = address
node.driver_internal_info = driver_internal_info
node.save()
try:
_notify_conductor_resume_clean(task)
except Exception as e:
error = _('cleaning failed for node %(node)s: '
'Error: %(exc)s') % {'node': node.uuid,
'exc': six.text_type(e)}
LOG.exception(error)
manager_utils.cleaning_error_handler(task, error)
else:
LOG.warning(_LW('Call back from %(node)s in invalid provision '
'state %(state)s'),
{'node': node.uuid, 'state': node.provision_state})

View File

@ -0,0 +1,11 @@
- hosts: conductor
gather_facts: no
tasks:
- add_host:
group: ironic
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.ip }}"
ansible_ssh_user: "{{ item.user }}"
ironic_extra: "{{ item.extra | default({}) }}"
with_items: "{{ ironic_nodes }}"
tags: always

View File

@ -0,0 +1,24 @@
[defaults]
# retries through the ansible-deploy driver are not supported
retry_files_enabled = False
# this is using supplied callback_plugin to interleave ansible event logs
# into Ironic-conductor log as set in ironic configuration file,
# see callback_plugin/ironic_log.ini for some options to set
# (DevStack _needs_ some tweaks)
callback_whitelist = ironic_log
# For better security, bake SSH host keys into bootstrap image,
# add those to ~/.ssh/known_hosts for user running ironic-conductor service
# on all nodes where ironic-conductor and ansible-deploy driver are installed,
# and set the host_key_checking to True (or comment it out, it is the default)
host_key_checking = False
# uncomment if you have problem with ramdisk locale on ansible >= 2.1
#module_set_locale=False
[ssh_connection]
# pipelining greatly increases speed of deployment, disable it only when
# your version of ssh client on ironic node or server in bootstrap image
# do not support it or if you can not disable "requiretty" for the
# passwordless sudoer user in the bootstrap image.
# See Ansible documentation for more info:
# http://docs.ansible.com/ansible/intro_configuration.html#pipelining
pipelining = True

View File

@ -0,0 +1,8 @@
[ironic]
# If Ironic's config is not in one of default oslo_config locations,
# specify the path to it here
#config_file = None
# If running a testing system with only stderr logging (e.g. DevStack)
# specify an actual file to log into here, for example Ironic-Conductor one.
#log_file = None

View File

@ -0,0 +1,122 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
from oslo_config import cfg
from oslo_log import log as logging
from ironic.common import i18n
from ironic import version
basename = os.path.splitext(__file__)[0]
config = ConfigParser.ConfigParser()
ironic_config = None
ironic_log_file = None
try:
config.readfp(open(basename + ".ini"))
if config.has_option('ironic', 'config_file'):
ironic_config = config.get('ironic', 'config_file')
if config.has_option('ironic', 'log_file'):
ironic_log_file = config.get('ironic', 'log_file')
except Exception:
pass
CONF = cfg.CONF
DOMAIN = 'ironic'
LOG = logging.getLogger(__name__, project=DOMAIN,
version=version.version_info.release_string())
logging.register_options(CONF)
conf_kwargs = dict(args=[], project=DOMAIN,
version=version.version_info.release_string())
if ironic_config:
conf_kwargs['default_config_files'] = [ironic_config]
CONF(**conf_kwargs)
if ironic_log_file:
CONF.set_override("log_file", ironic_log_file)
CONF.set_override("use_stderr", False)
logging.setup(CONF, DOMAIN)
class CallbackModule(object):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'ironic_log'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
self.node = None
def runner_msg_dict(self, result):
self.node = result._host.get_name()
name = result._task.get_name()
res = str(result._result)
return dict(node=self.node, name=name, res=res)
def v2_playbook_on_task_start(self, task, is_conditional):
# NOTE(pas-ha) I do not know (yet) how to obtain a ref to host
# until first task is processed
node = self.node or "Node"
name = task.get_name()
if name == 'setup':
LOG.debug("Processing task %(name)s.", dict(name=name))
else:
LOG.debug("Processing task %(name)s on node %(node)s.",
dict(name=name, node=node))
def v2_runner_on_failed(self, result, *args, **kwargs):
LOG.error(i18n._LE(
"Ansible task %(name)s failed on node %(node)s: %(res)s"),
self.runner_msg_dict(result))
def v2_runner_on_ok(self, result):
msg_dict = self.runner_msg_dict(result)
if msg_dict['name'] == 'setup':
LOG.info(i18n._LI(
"Ansible task 'setup' complete on node %(node)s"),
msg_dict)
else:
LOG.info(i18n._LI(
"Ansible task %(name)s complete on node %(node)s: %(res)s"),
msg_dict)
def v2_runner_on_unreachable(self, result):
LOG.error(i18n._LE(
"Node %(node)s was unreachable for Ansible task %(name)s: "
"%(res)s"),
self.runner_msg_dict(result))
def v2_runner_on_async_poll(self, result):
LOG.debug("Polled ansible task %(name)s for complete "
"on node %(node)s: %(res)s",
self.runner_msg_dict(result))
def v2_runner_on_async_ok(self, result):
LOG.info(i18n._LI(
"Async Ansible task %(name)s complete on node %(node)s: %(res)s"),
self.runner_msg_dict(result))
def v2_runner_on_async_failed(self, result):
LOG.error(i18n._LE(
"Async Ansible task %(name)s failed on node %(node)s: %(res)s"),
self.runner_msg_dict(result))
def v2_runner_on_skipped(self, result):
LOG.debug("Ansible task %(name)s skipped on node %(node)s: %(res)s",
self.runner_msg_dict(result))

View File

@ -0,0 +1,12 @@
---
- include: add-ironic-nodes.yaml
- hosts: ironic
gather_facts: no
roles:
- role: wait
tags: wait
- hosts: ironic
roles:
- clean

View File

@ -0,0 +1,19 @@
- name: erase_devices_metadata
priority: 99
interface: deploy
args:
tags:
required: true
description: list of playbook tags used to erase partition table on disk devices
value:
- zap
- name: erase_devices
priority: 10
interface: deploy
args:
tags:
required: true
description: list of playbook tags used to erase disk devices
value:
- shred

View File

@ -0,0 +1,13 @@
---
- include: add-ironic-nodes.yaml
- hosts: ironic
gather_facts: no
roles:
- role: wait
tags: wait
- hosts: ironic
roles:
- deploy
- shutdown

View File

@ -0,0 +1 @@
conductor ansible_connection=local

View File

@ -0,0 +1,111 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PARTITION_TYPES = ('primary', 'logical', 'extended')
def construct_parted_args(device):
parted_args = [
'-s', device['device'],
]
if device['label']:
parted_args.extend(['mklabel', device['label']])
partitions = device['partitions']
if partitions:
parted_args.extend(['-a', 'optimal', '--', 'unit', 'MiB'])
start = 1
for ind, partition in enumerate(device['partitions']):
parted_args.extend([
'mkpart', partition['type']])
if partition['swap']:
parted_args.append('linux-swap')
end = start + partition['size_mib']
parted_args.extend(["%i" % start, "%i" % end])
start = end
if partition['boot']:
parted_args.extend([
'set', str(ind + 1), 'boot', 'on'])
return parted_args
def validate_partitions(module, partitions):
for ind, partition in enumerate(partitions):
# partition name might be an empty string
partition['name'] = partition.get('name') or str(ind + 1)
size = partition.get('size_mib', None)
if not size:
module.fail_json(msg="Partition size must be provided")
try:
partition['size_mib'] = int(size)
except ValueError:
module.fail_json(msg="Can not cast partition size to INT.")
partition.setdefault('type', 'primary')
if partition['type'] not in PARTITION_TYPES:
module.fail_json(msg="Partition type must be one of "
"%s." % PARTITION_TYPES)
partition['swap'] = module.boolean(partition.get('swap', False))
partition['boot'] = module.boolean(partition.get('boot', False))
if partition['boot'] and partition['swap']:
module.fail_json(msg="Can not set partition to "
"boot and swap simultaneously.")
# TODO(pas-ha) add more validation, e.g.
# - only one boot partition?
# - no more than 4 primary partitions on msdos table
# - no more that one extended partition on msdos table
# - estimate and validate available space
def main():
module = AnsibleModule(
argument_spec=dict(
device=dict(required=True, type='str'),
dryrun=dict(required=False, default=False, type='bool'),
new_label=dict(required=False, default=False, type='bool'),
label=dict(requred=False, default='msdos', choices=[
"bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98", "sun"]),
partitions=dict(
required=False, type='list')
),
supports_check_mode=True)
device = module.params['device']
dryrun = module.params['dryrun']
new_label = module.params['new_label']
label = module.params['label']
if not new_label:
label = False
partitions = module.params['partitions'] or []
try:
validate_partitions(module, partitions)
except Exception as e:
module.fail_json(msg="Malformed partitions arguments: %s" % e)
parted_args = construct_parted_args(dict(device=device, label=label,
partitions=partitions))
command = [module.get_bin_path('parted', required=True)]
if not (module.check_mode or dryrun):
command.extend(parted_args)
module.run_command(command, check_rc=True)
partitions_created = {p['name']: '%s%i' % (device, i + 1)
for i, p in enumerate(partitions)}
module.exit_json(changed=not dryrun, created=partitions_created)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1,104 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import string
import requests
# adapted from IPA
DEFAULT_CHUNK_SIZE = 1024 * 1024 # 1MB
class StreamingDownloader(object):
def __init__(self, url, chunksize, hash_algo=None):
if hash_algo is not None:
self.hasher = hashlib.new(hash_algo)
else:
self.hasher = None
self.chunksize = chunksize
resp = requests.get(url, stream=True)
if resp.status_code != 200:
raise Exception('Invalid response code: %s' % resp.status_code)
self._request = resp
def __iter__(self):
for chunk in self._request.iter_content(chunk_size=self.chunksize):
if self.hasher is not None:
self.hasher.update(chunk)
yield chunk
def checksum(self):
if self.hasher is not None:
return self.hasher.hexdigest()
def stream_to_dest(url, dest, chunksize, hash_algo):
downloader = StreamingDownloader(url, chunksize, hash_algo)
with open(dest, 'wb+') as f:
for chunk in downloader:
f.write(chunk)
return downloader.checksum()
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(required=True, type='str'),
dest=dict(required=True, type='str'),
checksum=dict(required=False, type='str', default=''),
chunksize=dict(required=False, type='int',
default=DEFAULT_CHUNK_SIZE)
))
url = module.params['url']
dest = module.params['dest']
checksum = module.params['checksum']
chunksize = module.params['chunksize']
if checksum == '':
hash_algo, checksum = None, None
else:
try:
hash_algo, checksum = checksum.rsplit(':', 1)
except ValueError:
module.fail_json(msg='The checksum parameter has to be in format '
'"<algorithm>:<checksum>"')
checksum = checksum.lower()
if not all(c in string.hexdigits for c in checksum):
module.fail_json(msg='The checksum must be valid HEX number')
if hash_algo not in hashlib.algorithms_available:
module.fail_json(msg="%s checksums are not supported" % hash_algo)
try:
actual_checksum = stream_to_dest(
url, dest, chunksize, hash_algo)
except Exception as e:
module.fail_json(msg=str(e))
else:
if hash_algo and actual_checksum != checksum:
module.fail_json(msg='Invalid dest checksum')
else:
module.exit_json(changed=True)
# NOTE(pas-ha) Ansible's module_utils.basic is licensed under BSD (2 clause)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View File

@ -0,0 +1,6 @@
- include: zap.yaml
tags:
- zap
- include: shred.yaml
tags:
- shred

View File

@ -0,0 +1,6 @@
- name: clean block devices
become: yes
command: shred -f -z /dev/{{ item }}
async: 3600
poll: 30
with_items: "{{ ansible_devices }}"

View File

@ -0,0 +1,4 @@
- name: wipe partition metadata
become: yes
command: sgdisk -Z /dev/{{ item }}
with_items: "{{ ansible_devices }}"

View File

@ -0,0 +1,54 @@
#!/bin/sh
# code from DIB bash ramdisk
readonly target_disk=$1
readonly root_part=$2
readonly root_part_mount=/mnt/rootfs
# We need to run partprobe to ensure all partitions are visible
partprobe $target_disk
mkdir -p $root_part_mount
mount $root_part $root_part_mount
if [ $? != "0" ]; then
echo "Failed to mount root partition $root_part on $root_part_mount"
exit 1
fi
mkdir -p $root_part_mount/dev
mkdir -p $root_part_mount/sys
mkdir -p $root_part_mount/proc
mount -o bind /dev $root_part_mount/dev
mount -o bind /sys $root_part_mount/sys
mount -o bind /proc $root_part_mount/proc
# Find grub version
V=
if [ -x $root_part_mount/usr/sbin/grub2-install ]; then
V=2
fi
# Install grub
ret=1
if chroot $root_part_mount /bin/sh -c "/usr/sbin/grub$V-install ${target_disk}"; then
echo "Generating the grub configuration file"
# tell GRUB2 to preload its "lvm" module to gain LVM booting on direct-attached disks
if [ "$V" = "2" ]; then
echo "GRUB_PRELOAD_MODULES=lvm" >> $root_part_mount/etc/default/grub
fi
chroot $root_part_mount /bin/sh -c "/usr/sbin/grub$V-mkconfig -o /boot/grub$V/grub.cfg"
ret=$?
fi
umount $root_part_mount/dev
umount $root_part_mount/sys
umount $root_part_mount/proc
umount $root_part_mount
if [ $ret != "0" ]; then
echo "Installing grub bootloader failed"
fi
exit $ret

View File

@ -0,0 +1,115 @@
#!/bin/sh
# Copyright 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(pas-ha) this is mostly copied over from Ironic Python Agent
# compared to the original file in IPA,
# all logging is disabled to let Ansible output the full trace.
# The places that log to fail are commented out to be replaced later
# with different handler when making this script a real Ansible module
# TODO(pas-ha) rewrite this shell script to be a proper Ansible module
# This should work with almost any image that uses MBR partitioning and
# doesn't already have 3 or more partitions -- or else you'll no longer
# be able to create extended partitions on the disk.
# Takes one argument - block device
log() {
echo "`basename $0`: $@"
}
fail() {
log "Error: $@"
exit 1
}
MAX_DISK_PARTITIONS=128
MAX_MBR_SIZE_MB=2097152
DEVICE="$1"
[ -b $DEVICE ] || fail "(DEVICE) $DEVICE is not a block device"
# We need to run partx -u to ensure all partitions are visible so the
# following blkid command returns partitions just imaged to the device
partx -u $DEVICE # || fail "running partx -u $DEVICE"
# todo(jayf): partx -u doesn't work in all cases, but partprobe fails in
# devstack. We run both commands now as a temporary workaround for bug 1433812
# long term, this should all be refactored into python and share code with
# the other partition-modifying code in the agent.
partprobe $DEVICE || true
# Check for preexisting partition for configdrive
EXISTING_PARTITION=`/sbin/blkid -l -o device $DEVICE -t LABEL=config-2`
if [ $? = 0 ]; then
#log "Existing configdrive found on ${DEVICE} at ${EXISTING_PARTITION}"
ISO_PARTITION=$EXISTING_PARTITION
else
# Check if it is GPT partition and needs to be re-sized
partprobe $DEVICE print 2>&1 | grep "fix the GPT to use all of the space"
if [ $? = 0 ]; then
#log "Fixing GPT to use all of the space on device $DEVICE"
sgdisk -e $DEVICE #|| fail "move backup GPT data structures to the end of ${DEVICE}"
# Need to create new partition for config drive
# Not all images have partion numbers in a sequential numbers. There are holes.
# These holes get filled up when a new partition is created.
TEMP_DIR="$(mktemp -d)"
EXISTING_PARTITION_LIST=$TEMP_DIR/existing_partitions
UPDATED_PARTITION_LIST=$TEMP_DIR/updated_partitions
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $EXISTING_PARTITION_LIST
# Create small partition at the end of the device
#log "Adding configdrive partition to $DEVICE"
sgdisk -n 0:-64MB:0 $DEVICE #|| fail "creating configdrive on ${DEVICE}"
gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $UPDATED_PARTITION_LIST
CONFIG_PARTITION_ID=`diff $EXISTING_PARTITION_LIST $UPDATED_PARTITION_LIST | tail -n1 |awk '{print $2}'`
ISO_PARTITION="${DEVICE}${CONFIG_PARTITION_ID}"
else
#log "Working on MBR only device $DEVICE"
# get total disk size, to detect if that exceeds 2TB msdos limit
disksize_bytes=$(blockdev --getsize64 $DEVICE)
disksize_mb=$(( ${disksize_bytes%% *} / 1024 / 1024))
startlimit=-64MiB
endlimit=-0
if [ "$disksize_mb" -gt "$MAX_MBR_SIZE_MB" ]; then
# Create small partition at 2TB limit
startlimit=$(($MAX_MBR_SIZE_MB - 65))
endlimit=$(($MAX_MBR_SIZE_MB - 1))
fi
#log "Adding configdrive partition to $DEVICE"
parted -a optimal -s -- $DEVICE mkpart primary ext2 $startlimit $endlimit #|| fail "creating configdrive on ${DEVICE}"
# Find partition we just created
# Dump all partitions, ignore empty ones, then get the last partition ID
ISO_PARTITION=`sfdisk --dump $DEVICE | grep -v ' 0,' | tail -n1 | awk -F ':' '{print $1}' | sed -e 's/\s*$//'` #|| fail "finding ISO partition created on ${DEVICE}"
# Wait for udev to pick up the partition
udevadm settle --exit-if-exists=$ISO_PARTITION
fi
fi
# Output the created/discovered partition for configdrive
echo "configdrive $ISO_PARTITION"

View File

@ -0,0 +1,37 @@
- name: download configdrive data
get_url:
url: "{{ configdrive.location }}"
dest: /tmp/{{ inventory_hostname }}.gz.base64
async: 600
poll: 15
when: "{{ configdrive.type|default('') == 'url' }}"
- block:
- name: copy configdrive file to node
copy:
src: "{{ configdrive.location }}"
dest: /tmp/{{ inventory_hostname }}.gz.base64
- name: remove configdrive from conductor
delegate_to: conductor
file:
path: "{{ configdrive.location }}"
state: absent
when: "{{ configdrive.type|default('') == 'file' }}"
- name: unpack configdrive
shell: cat /tmp/{{ inventory_hostname }}.gz.base64 | base64 --decode | gunzip > /tmp/{{ inventory_hostname }}.cndrive
- name: prepare config drive partition
become: yes
script: partition_configdrive.sh {{ ironic_root_device }}
register: configdrive_partition_output
- name: test the output of configdrive partitioner
assert:
that:
- "{{ (configdrive_partition_output.stdout_lines | last).split() | length == 2 }}"
- "{{ (configdrive_partition_output.stdout_lines | last).split() | first == 'configdrive' }}"
- name: write configdrive
become: yes
command: dd if=/tmp/{{ inventory_hostname }}.cndrive of={{ (configdrive_partition_output.stdout_lines | last).split() | last }} bs=64K oflag=direct

View File

@ -0,0 +1,11 @@
- name: fail if not enough memory to store downloaded image
fail:
msg: "The image size is too big, no free memory available"
when: "{{ ansible_memfree_mb }} < {{ image.mem_req }}"
- name: download image with checksum validation
get_url:
url: "{{ image.url }}"
dest: /tmp/{{ inventory_hostname }}.img
checksum: "{{ image.checksum|default(omit) }}"
async: 600
poll: 15

View File

@ -0,0 +1,3 @@
- name: configure bootloader
become: yes
script: install_grub.sh {{ ironic_root_device }} {{ ironic_image_target }}

View File

@ -0,0 +1,17 @@
- include: root-device.yaml
- include: parted.yaml
tags:
- parted
- include: download.yaml
when: "{{ image.disk_format != 'raw' }}"
- include: write.yaml
- include: configdrive.yaml
when: configdrive is defined
- include: grub.yaml
tags:
- parted

View File

@ -0,0 +1,28 @@
- name: erase partition table
become: yes
command: dd if=/dev/zero of={{ ironic_root_device }} bs=512 count=36
when: "{{ not preserve_ephemeral|default('no')|bool }}"
- name: run parted
become: yes
parted:
device: "{{ ironic_root_device }}"
dryrun: "{{ preserve_ephemeral|default('no')|bool }}"
new_label: yes
label: msdos
partitions: "{{ ironic_partitions }}"
register: parts
- name: reset image target to root partition
set_fact:
ironic_image_target: "{{ parts.created.root }}"
- name: make swap
become: yes
command: mkswap -L swap1 {{ parts.created.swap }}
when: "{{ parts.created.swap is defined }}"
- name: format ephemeral partition
become: yes
command: mkfs -F -t {{ ephemeral_format }} -L ephemeral0 {{ parts.created.ephemeral }}
when: "{{ parts.created.ephemeral is defined and not preserve_ephemeral|default('no')|bool }}"

View File

@ -0,0 +1,7 @@
- set_fact:
ironic_root_device: /dev/{{ ansible_devices.keys()[0] }}
when: ironic_root_device is undefined
- set_fact:
ironic_image_target: "{{ ironic_root_device }}"
when: ironic_image_target is undefined

View File

@ -0,0 +1,19 @@
- name: convert and write
become: yes
command: qemu-img convert -t directsync -O host_device /tmp/{{ inventory_hostname }}.img {{ ironic_image_target }}
async: 400
poll: 10
when: "{{ image.disk_format != 'raw' }}"
- name: stream to target
become: yes
stream_url:
url: "{{ image.url }}"
dest: "{{ ironic_image_target }}"
checksum: "md5:{{ image.checksum }}"
async: 600
poll: 15
when: "{{ image.disk_format == 'raw' }}"
- name: flush
command: sync

View File

@ -0,0 +1,6 @@
- name: soft power off
become: yes
shell: sleep 5 && poweroff
async: 1
poll: 0
ignore_errors: true

View File

@ -0,0 +1,10 @@
- name: waiting for node
become: false
delegate_to: conductor
wait_for:
host: "{{ ansible_ssh_host }}"
port: 22
search_regex: OpenSSH
delay: 10
timeout: 400
connect_timeout: 15

View File

@ -0,0 +1 @@
ansible>=2.1

View File

@ -0,0 +1,778 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import image_service
from ironic.common import states
from ironic.common import utils as com_utils
from ironic.conductor import task_manager
from ironic.conductor import utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
from ironic_lib import utils as irlib_utils
import mock
from oslo_config import cfg
from ironic_staging_drivers.ansible import deploy as ansible_deploy
CONF = cfg.CONF
INSTANCE_INFO = {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum',
'image_disk_format': 'qcow2',
'root_gb': 5,
}
DRIVER_INFO = {
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
'ansible_deploy_username': 'test',
'ansible_deploy_key_file': '/path/key',
}
DRIVER_INTERNAL_INFO = {
'ansible_cleaning_ip': 'http://127.0.0.1/',
'is_whole_disk_image': True,
'clean_steps': []
}
class TestAnsibleMethods(db_base.DbTestCase):
def setUp(self):
super(TestAnsibleMethods, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_ansible')
node = {
'driver': 'fake_ansible',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **node)
def test__parse_ansible_driver_info(self):
playbook, user, key = ansible_deploy._parse_ansible_driver_info(
self.node, 'deploy')
self.assertEqual(ansible_deploy.DEFAULT_PLAYBOOKS['deploy'], playbook)
self.assertEqual('test', user)
self.assertEqual('/path/key', key)
def test__parse_ansible_driver_info_no_playbook(self):
self.assertRaises(exception.IronicException,
ansible_deploy._parse_ansible_driver_info,
self.node, 'test')
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_image(self, glance_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2'}
glance_mock.return_value.show = mock.Mock(spec_set=[],
return_value=image_info)
with task_manager.acquire(
self.context, self.node.uuid) as task:
ansible_deploy.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_image(
self, validate_href_mock):
i_info = self.node.instance_info
driver_internal_info = self.node.driver_internal_info
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
info = ansible_deploy.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonsupported_image(
self, validate_href_mock):
validate_href_mock.side_effect = iter(
[exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')])
i_info = self.node.instance_info
i_info['image_source'] = 'file://img.qcow2'
i_info['image_checksum'] = 'aa'
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.ImageRefValidationFailed,
ansible_deploy.build_instance_info_for_deploy, task)
def test__get_node_ip(self):
dhcp_provider_mock = mock.Mock()
dhcp_factory.DHCPFactory._dhcp_provider = dhcp_provider_mock
dhcp_provider_mock.get_ip_addresses.return_value = ['ip']
with task_manager.acquire(self.context, self.node.uuid) as task:
ansible_deploy._get_node_ip(task)
dhcp_provider_mock.get_ip_addresses.assert_called_once_with(
task)
def test__get_node_ip_no_ip(self):
dhcp_provider_mock = mock.Mock()
dhcp_factory.DHCPFactory._dhcp_provider = dhcp_provider_mock
dhcp_provider_mock.get_ip_addresses.return_value = []
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.FailedToGetIPAddressOnPort,
ansible_deploy._get_node_ip, task)
def test__get_node_ip_multiple_ip(self):
dhcp_provider_mock = mock.Mock()
dhcp_factory.DHCPFactory._dhcp_provider = dhcp_provider_mock
dhcp_provider_mock.get_ip_addresses.return_value = ['ip1', 'ip2']
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InstanceDeployFailure,
ansible_deploy._get_node_ip, task)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
return_value=states.POWER_OFF)
def test__reboot_and_finish_deploy(self, get_pow_state_mock,
power_action_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
with task_manager.acquire(self.context, self.node.uuid) as task:
ansible_deploy._reboot_and_finish_deploy(task)
get_pow_state_mock.assert_called_once_with(task)
power_action_mock.assert_called_once_with(task, states.POWER_ON)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
return_value=states.POWER_ON)
def test__reboot_and_finish_deploy_retry(self, get_pow_state_mock,
power_action_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
with task_manager.acquire(self.context, self.node.uuid) as task:
ansible_deploy._reboot_and_finish_deploy(task)
get_pow_state_mock.assert_called_with(task)
self.assertEqual(
CONF.ansible.post_deploy_get_power_state_retries + 1,
len(get_pow_state_mock.mock_calls))
expected_power_calls = [((task, states.POWER_OFF),),
((task, states.POWER_ON),)]
self.assertEqual(expected_power_calls,
power_action_mock.call_args_list)
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
@mock.patch.object(os.path, 'join', return_value='/path/to/playbook',
autospec=True)
def test__run_playbook(self, path_join_mock, execute_mock):
extra_vars = {"ironic_nodes": [{"name": self.node["uuid"],
"ip": "127.0.0.1", "user": "test"}]}
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key')
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path,
'ansible-playbook', '/path/to/playbook', '-i',
ansible_deploy.INVENTORY_FILE, '-e', json.dumps(extra_vars),
'--private-key=/path/to/key', '-vvvv')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
@mock.patch.object(os.path, 'join', return_value='/path/to/playbook',
autospec=True)
def test__run_playbook_tags(self, path_join_mock, execute_mock):
extra_vars = {"ironic_nodes": [{"name": self.node["uuid"],
"ip": "127.0.0.1", "user": "test"}]}
ansible_deploy._run_playbook('deploy', extra_vars, '/path/to/key',
tags=['wait'])
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path,
'ansible-playbook', '/path/to/playbook', '-i',
ansible_deploy.INVENTORY_FILE, '-e', json.dumps(extra_vars),
'--tags=wait', '--private-key=/path/to/key', '-vvvv')
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
def test__parse_partitioning_info(self, check_missing_param_mock):
expected_info = {
'ironic_partitions':
[{'boot': 'yes', 'swap': 'no',
'size_mib': 1024 * INSTANCE_INFO['root_gb'],
'name': 'root'}]}
i_info = ansible_deploy._parse_partitioning_info(self.node)
check_missing_param_mock.assert_called_once_with(
expected_info, mock.ANY)
self.assertEqual(expected_info, i_info)
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
def test__parse_partitioning_info_swap(self, check_missing_param_mock):
in_info = dict(INSTANCE_INFO)
in_info['swap_mb'] = 128
self.node.instance_info = in_info
self.node.save()
expected_info = {
'ironic_partitions':
[{'boot': 'yes', 'swap': 'no',
'size_mib': 1024 * INSTANCE_INFO['root_gb'],
'name': 'root'},
{'boot': 'no', 'swap': 'yes',
'size_mib': 128, 'name': 'swap'}]}
i_info = ansible_deploy._parse_partitioning_info(self.node)
check_missing_param_mock.assert_called_once_with(
expected_info, mock.ANY)
self.assertEqual(expected_info, i_info)
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
def test__parse_partitioning_info_invalid_param(self,
check_missing_param_mock):
in_info = dict(INSTANCE_INFO)
in_info['root_gb'] = 'five'
self.node.instance_info = in_info
self.node.save()
self.assertRaises(exception.InvalidParameterValue,
ansible_deploy._parse_partitioning_info,
self.node)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
@mock.patch.object(ansible_deploy, '_reboot_and_finish_deploy',
autospec=True)
@mock.patch.object(utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__deploy(self, prepare_vars_mock, parse_part_info_mock,
parse_dr_info_mock, prepare_extra_mock,
run_playbook_mock, set_boot_device_mock,
finish_deploy_mock, clean_ramdisk_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'],
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
ansible_deploy._deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
parse_part_info_mock.assert_called_once_with(task.node)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with(
'test_pl', {'ironic_nodes': [
(self.node['uuid'],
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
'test_u')]}, 'test_k',
notags=['wait'])
set_boot_device_mock.assert_called_once_with(
task, 'disk', persistent=True)
finish_deploy_mock.assert_called_once_with(task)
clean_ramdisk_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
@mock.patch.object(ansible_deploy, '_reboot_and_finish_deploy',
autospec=True)
@mock.patch.object(utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__deploy_iwdi(self, prepare_vars_mock, parse_part_info_mock,
parse_dr_info_mock, prepare_extra_mock,
run_playbook_mock, set_boot_device_mock,
finish_deploy_mock, clean_ramdisk_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'],
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = True
self.node.driver_internal_info = driver_internal_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
ansible_deploy._deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
self.assertFalse(parse_part_info_mock.called)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with(
'test_pl', {'ironic_nodes': [
(self.node['uuid'],
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
'test_u')]}, 'test_k',
notags=['wait', 'parted'])
set_boot_device_mock.assert_called_once_with(
task, 'disk', persistent=True)
finish_deploy_mock.assert_called_once_with(task)
clean_ramdisk_mock.assert_called_once_with(task)
class TestAnsibleDeploy(db_base.DbTestCase):
def setUp(self):
super(TestAnsibleDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_ansible')
self.driver = ansible_deploy.AnsibleDeploy()
node = {
'driver': 'fake_ansible',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **node)
def test_get_properties(self):
self.assertEqual(ansible_deploy.COMMON_PROPERTIES,
self.driver.get_properties())
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_boot_validate_mock, check_params_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
check_params_mock.assert_called_once_with(
{'instance_info.image_source': INSTANCE_INFO['image_source']},
mock.ANY)
@mock.patch.object(deploy_utils, 'get_boot_option',
return_value='netboot', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_not_iwdi_netboot(self, pxe_boot_validate_mock,
get_boot_mock):
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
get_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_deploy_wait(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ansible_deploy, '_deploy', autospec=True)
@mock.patch.object(ansible_deploy, '_get_node_ip',
return_value='127.0.0.1', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_deploy_done(self, power_mock, get_ip_mock, deploy_mock):
self.config(group='ansible', use_ramdisk_callback=False)
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYDONE)
power_mock.assert_called_once_with(task, states.REBOOT)
get_ip_mock.assert_called_once_with(task)
deploy_mock.assert_called_once_with(task, '127.0.0.1')
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch.object(ansible_deploy, 'build_instance_info_for_deploy',
return_value={'test': 'test'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
def test_prepare(self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
self.driver.prepare(task)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task, {'op1': 'test1'})
self.node.refresh()
self.assertEqual('test', self.node.instance_info['test'])
@mock.patch.object(ansible_deploy, '_get_configdrive_path',
return_value='/path/test', autospec=True)
@mock.patch.object(irlib_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up(self, pxe_clean_up_mock, unlink_mock,
get_cfdrive_path_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
pxe_clean_up_mock.assert_called_once_with(task)
get_cfdrive_path_mock.assert_called_once_with(self.node['uuid'])
unlink_mock.assert_called_once_with('/path/test')
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps(self, get_clean_steps_mock):
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 99, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
get_clean_steps_mock.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
get_clean_steps_mock.assert_called_once_with(
task, interface='deploy',
override_priorities={
'erase_devices': None,
'erase_devices_metadata': None})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps_priority(self, mock_get_clean_steps):
self.config(erase_devices_priority=9, group='deploy')
self.config(erase_devices_metadata_priority=98, group='deploy')
mock_steps = [{'priority': 9, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 98, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={'erase_devices': 9,
'erase_devices_metadata': 98})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
def test_execute_clean_step(self, parse_driver_info_mock,
prepare_extra_mock, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'args': {'tags': ['clean']}}
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'],
DRIVER_INTERNAL_INFO['ansible_cleaning_ip'],
'test_u', {})]}
prepare_extra_mock.return_value = ironic_nodes
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.execute_clean_step(task, step)
parse_driver_info_mock.assert_called_once_with(
task.node, action='clean')
prepare_extra_mock.assert_called_once_with(
ironic_nodes['ironic_nodes'])
run_playbook_mock.assert_called_once_with(
'test_pl', ironic_nodes, 'test_k', tags=['clean'])
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
def test_execute_clean_step_no_ip(self, parse_driver_info_mock,
run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'tags': ['clean']}
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
del driver_internal_info['ansible_cleaning_ip']
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
self.driver.execute_clean_step, task, step)
parse_driver_info_mock.assert_called_once_with(
task.node, action='clean')
self.assertFalse(run_playbook_mock.called)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'prepare_cleaning_ports', autospec=True)
def test_prepare_cleaning_callback(
self, prepare_cleaning_ports_mock, prepare_ramdisk_mock,
buid_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'tags': ['clean']}
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['clean_steps'] = [step]
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
state = self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
prepare_cleaning_ports_mock.assert_called_once_with(task)
buid_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
@mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(deploy_utils, 'prepare_cleaning_ports', autospec=True)
def test_prepare_cleaning_callback_no_steps(self,
prepare_cleaning_ports_mock,
set_node_cleaning_steps):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
self.assertFalse(prepare_cleaning_ports_mock.called)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_get_node_ip',
return_value='127.0.0.1', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'prepare_cleaning_ports', autospec=True)
def test_prepare_cleaning(self, prepare_cleaning_ports_mock,
prepare_ramdisk_mock, buid_options_mock,
power_action_mock, run_playbook_mock,
get_ip_mock, parse_driver_info_mock,
prepare_extra_mock):
self.config(group='ansible', use_ramdisk_callback=False)
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'],
'127.0.0.1',
'test_u', {})]}
prepare_extra_mock.return_value = ironic_nodes
with task_manager.acquire(self.context, self.node.uuid) as task:
state = self.driver.prepare_cleaning(task)
prepare_cleaning_ports_mock.assert_called_once_with(task)
buid_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
get_ip_mock.assert_called_once_with(task)
parse_driver_info_mock.assert_called_once_with(
task.node, action='clean')
prepare_extra_mock.assert_called_once_with(
ironic_nodes['ironic_nodes'])
run_playbook_mock.assert_called_once_with(
'test_pl', ironic_nodes, 'test_k', tags=['wait'])
self.assertEqual(None, state)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
@mock.patch.object(deploy_utils, 'tear_down_cleaning_ports',
autospec=True)
def test_tear_down_cleaning(self, tear_down_utils_mock,
clean_ramdisk_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
clean_ramdisk_mock.assert_called_once_with(task)
tear_down_utils_mock.assert_called_once_with(task)
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
def test_heartbeat_not_wait_state(self, log_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.heartbeat(task, 'http://127.0.0.1')
log_mock.warning.assert_called_once_with(
mock.ANY, {'node': task.node['uuid'],
'state': task.node['provision_state']})
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
@mock.patch.object(ansible_deploy, '_deploy', autospec=True)
def test_heartbeat_deploy_wait(self, deploy_mock, log_mock):
self.node['provision_state'] = states.DEPLOYWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.process_event = mock.Mock()
self.driver.heartbeat(task, 'http://127.0.0.1')
deploy_mock.assert_called_once_with(task, '127.0.0.1')
log_mock.info.assert_called_once_with(mock.ANY, task.node['uuid'])
self.assertEqual([mock.call('resume'), mock.call('done')],
task.process_event.mock_calls)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
@mock.patch.object(ansible_deploy, '_deploy',
side_effect=Exception('Boo'), autospec=True)
def test_heartbeat_deploy_wait_fail(self, deploy_mock, log_mock,
set_fail_state_mock):
self.node['provision_state'] = states.DEPLOYWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.process_event = mock.Mock()
self.driver.heartbeat(task, 'http://127.0.0.1')
deploy_mock.assert_called_once_with(task, '127.0.0.1')
log_mock.exception.assert_called_once_with(mock.ANY)
self.assertEqual([mock.call('resume')],
task.process_event.mock_calls)
set_fail_state_mock.assert_called_once_with(task, mock.ANY,
collect_logs=False)
@mock.patch.object(ansible_deploy, '_notify_conductor_resume_clean',
autospec=True)
def test_heartbeat_clean_wait(self, notify_resume_clean_mock):
self.node['provision_state'] = states.CLEANWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.process_event = mock.Mock()
self.driver.heartbeat(task, 'http://127.0.0.1')
notify_resume_clean_mock.assert_called_once_with(task)
@mock.patch.object(ansible_deploy, '_notify_conductor_resume_clean',
side_effect=Exception('Boo'), autospec=True)
@mock.patch.object(utils, 'cleaning_error_handler', autospec=True)
def test_heartbeat_clean_wait_fail(self, cleaning_error_mock,
notify_resume_clean_mock):
self.node['provision_state'] = states.CLEANWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.process_event = mock.Mock()
self.driver.heartbeat(task, 'http://127.0.0.1')
notify_resume_clean_mock.assert_called_once_with(task)
cleaning_error_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(ansible_deploy, '_notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(ansible_deploy, '_deploy', autospec=True)
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
def test_heartbeat_maintenance(self, log_mock, deploy_mock,
notify_clean_resume_mock):
self.node['maintenance'] = True
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.heartbeat(task, 'http://127.0.0.1')
self.node['provision_state'] = states.CLEANWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.heartbeat(task, 'http://127.0.0.1')
self.node['provision_state'] = states.DEPLOYWAIT
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.heartbeat(task, 'http://127.0.0.1')
self.assertFalse(log_mock.warning.called)
self.assertFalse(deploy_mock.called)
self.assertFalse(notify_clean_resume_mock.called)

View File

@ -0,0 +1,22 @@
---
features:
- |
Added Ansible-deploy driver.
Supported features of Agent-based deploy drivers
* Network separation
* Whole disk images
* Partition images with localboot
* Cleaning, both automated and manual with cleaning steps
* Configdrive partition, for both whole disk and partition images.
Main known shortcomings in comparison with IPA-based drivers
* Is not asynchronous
* Clean steps can not be aborted
* Does not support UEFI/secure/trusted boot (support is planned)
* Does not support root_device_hints (support is planned)
* Does not honor partition type capability for partiton images
(always msdos, gpt support is planned)
* Does not support partition images with netboot

View File

@ -38,6 +38,10 @@ ironic.drivers =
fake_iboot_fake = ironic_staging_drivers.iboot:FakeIBootFakeDriver
pxe_iboot_iscsi = ironic_staging_drivers.iboot:PXEIBootISCSIDriver
pxe_iboot_agent = ironic_staging_drivers.iboot:PXEIBootAgentDriver
fake_ansible = ironic_staging_drivers.ansible:FakeAnsibleDriver
pxe_ipmitool_ansible = ironic_staging_drivers.ansible:AnsibleAndIPMIToolDriver
pxe_ssh_ansible = ironic_staging_drivers.ansible:AnsibleAndSSHDriver
pxe_libvirt_ansible = ironic_staging_drivers.ansible:AnsibleAndLibvirtDriver
[build_sphinx]
source-dir = doc/source