Porting nova pxe driver to ironic

Implements: blueprint equivalent-pxe-driver

Change-Id: I59cf6e10ff00b3787e2bd60082ca872348157ec8
This commit is contained in:
Ghe Rivero 2013-06-07 13:32:17 +02:00 committed by Ghe Rivero
parent c4de596b11
commit 17b828796e
12 changed files with 1457 additions and 1094 deletions

View File

@ -0,0 +1,5 @@
# ironic-rootwrap command filters to maniputalte images
# This file should be owned by (and only-writeable by) the root user
# ironic/common/images.py: 'qemu-img'
qemu-img: CommandFilter, qemu-img, root

View File

@ -202,6 +202,18 @@ class InvalidMAC(Invalid):
message = _("Expected a MAC address but received %(mac)s.")
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance: %(reason)s")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageConvertFailed(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):

230
ironic/common/images.py Normal file
View File

@ -0,0 +1,230 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of VM disk images.
"""
import os
import re
from oslo.config import cfg
from ironic.common import exception
from ironic.common import image_service as service
from ironic.common import utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import strutils
LOG = logging.getLogger(__name__)
image_opts = [
cfg.BoolOpt('force_raw_images',
default=True,
help='Force backing images to raw format'),
]
CONF = cfg.CONF
CONF.register_opts(image_opts)
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encryption = details.get('encryption')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - ie for usage in kwargs and such...
field = field.lower().strip()
return re.sub('[ -]', '_', field)
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if real_size:
details = real_size.group(1)
try:
details = strutils.to_bytes(details)
except (TypeError):
pass
return details
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after[0].startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
del lines_after[0]
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
del lines_after[0]
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return QemuImgInfo()
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
return QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_href, path, image_service=None):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
if not image_service:
image_service = service.Service(version=1, context=context)
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(image_href, image_file)
def fetch_to_raw(context, image_href, path, image_service=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, image_service)
image_to_raw(image_href, path, path_tmp)
def image_to_raw(image_href, path, path_tmp):
with fileutils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt,
'backing_file': backing_file})
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with fileutils.remove_path_on_error(staged):
convert_image(path_tmp, staged, 'raw')
os.unlink(path_tmp)
data = qemu_img_info(staged)
if data.file_format != "raw":
raise exception.ImageConvertFailed(image_id=image_href,
reason=_("Converted to raw, but format is now %s") %
data.file_format)
os.rename(staged, path)
else:
os.rename(path_tmp, path)

View File

@ -18,32 +18,510 @@
PXE Driver and supporting meta-classes.
"""
from Cheetah import Template
import datetime
import os
import tempfile
from oslo.config import cfg
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service as service
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.drivers import base
from ironic.openstack.common import context
from ironic.openstack.common import fileutils
from ironic.openstack.common import jsonutils as json
from ironic.openstack.common import lockutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
from ironic.openstack.common import timeutils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/ironic/net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/drivers/modules/pxe_config.template',
help='Template file for PXE configuration'),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Ironic compute node\'s tftp root path'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk'),
cfg.StrOpt('tftp_master_path',
default='/tftpboot/master_images',
help='Directory where master tftp images are stored on disk'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master tftp images are stored on disk')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
CONF.import_opt('use_ipv6', 'ironic.netconf')
def _parse_driver_info(node):
"""Gets the driver-specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node to validate.
:returns: A dict with the driver_info values.
"""
info = json.loads(node.get('driver_info', '')).get('pxe')
d_info = {}
d_info['instance_name'] = info.get('instance_name', None)
d_info['image_source'] = info.get('image_source', None)
d_info['deploy_kernel'] = info.get('deploy_kernel',
CONF.pxe.deploy_kernel)
d_info['deploy_ramdisk'] = info.get('deploy_ramdisk',
CONF.pxe.deploy_ramdisk)
d_info['root_gb'] = info.get('root_gb', None)
missing_info = []
for label in d_info:
if not d_info[label]:
missing_info.append(label)
if missing_info:
raise exception.InvalidParameterValue(_(
"Can not validate PXE bootloader. The following paramenters "
"were not passed to ironic: %s") % missing_info)
#TODO(ghe): Should we get rid of swap partition?
d_info['swap_mb'] = info.get('swap_mb', 1)
d_info['key_data'] = info.get('key_data', None)
return d_info
def _build_pxe_config(node, pxe_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
:param pxe_options: A dict of values to set on the configuarion file
:returns: A formated string with the file content.
"""
LOG.debug(_("Building PXE config for deployment %s.") % node['id'])
cheetah = Template.Template
pxe_options = {
'deployment_id': node['id'],
'deployment_key': utils.random_alnum(32),
'deployment_iscsi_iqn': "iqn-%s" % node['instance_uuid'],
'deployment_aki_path': pxe_info['deploy_kernel'][1],
'deployment_ari_path': pxe_info['deploy_ramdisk'][1],
'aki_path': pxe_info['kernel'][1],
'ari_path': pxe_info['ramdisk'][1],
'pxe_append_params': CONF.pxe.pxe_append_params,
}
pxe_config = str(cheetah(
open(CONF.pxe.pxe_config_template).read(),
searchList=[{'pxe_options': pxe_options,
'ROOT': '${ROOT}',
}]))
return pxe_config
def _get_node_mac_addresses(task, node):
"""Get all mac addresses for a node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
:returns: A list of macs address in the format xx:xx:xx:xx:xx:xx.
"""
for r in task.resources:
if r.node.id == node['id']:
return [p.address for p in r.ports]
def _get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name.
:param mac: A mac address string in the format xx:xx:xx:xx:xx:xx.
:returns: the path to the config file.
"""
return os.path.join(
CONF.pxe.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def _get_pxe_config_file_path(instance_uuid):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.pxe.tftp_root, instance_uuid, 'config')
def _get_image_dir_path(d_info):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, d_info['instance_name'])
def _get_image_file_path(d_info):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(d_info), 'disk')
@lockutils.synchronized('master_image', 'ironic-')
def _link_master_image(path, dest_path):
"""Create a link from path to dest_path using locking to
avoid image manipulation during the process.
"""
if os.path.exists(path):
os.link(path, dest_path)
@lockutils.synchronized('master_image', 'ironic-')
def _unlink_master_image(path):
#TODO(ghe): keep images for a while (kind of local cache)
# If an image has been used, it-s likely to be used again
# With no space problems, we can keep it, so next time
# only a new link needs to be created.
# Replace algorithm to look: disk space (trigger)
# lru, aged...
# os.statvfs
# heapq.nlargest(1, [(f, os.stat('./' + f).st_ctime) for f in
# os.listdir('.') if os.stat('./' + f).st_nlink == 1], key=lambda s: s[1])
if os.path.exists(path) and os.stat(path).st_nlink == 1:
utils.unlink_without_raise(path)
@lockutils.synchronized('master_image', 'ironic-')
def _create_master_image(tmp_path, master_uuid, path):
"""With recently download image, use it as master image, and link to
instances uuid. Uses file locking to avoid image maniputalion
during the process.
"""
if not os.path.exists(master_uuid):
os.link(tmp_path, master_uuid)
os.link(master_uuid, path)
os.unlink(tmp_path)
@lockutils.synchronized('get_image', 'ironic-')
def _download_in_progress(lock_file):
"""Get image file lock to avoid downloading the same image
simultaneously.
"""
if not os.path.exists(lock_file):
open(lock_file, 'w')
return False
else:
return True
@lockutils.synchronized('get_image', 'ironic-')
def _remove_download_in_progress_lock(lock_file):
"""Removes image file lock to indicate that image download has finished
and we can start to use it.
"""
fileutils.delete_if_exists(lock_file)
def _get_image(ctx, path, uuid, master_path=None, image_service=None):
#TODO(ghe): Revise this logic and cdocument process Bug #1199665
# When master_path defined, we save the images in this dir using the iamge
# uuid as the file name. Deployments that use this images, creates a hard
# link to keep track of this. When the link count of a master image is
# equal to 1, can be deleted.
#TODO(ghe): have hard links and count links the same behaviour in all fs
#TODO(ghe): timeout and retry for downloads
def _wait_for_download():
if not os.path.exists(lock_file):
raise loopingcall.LoopingCallDone()
# If the download of the image needed is in progress (lock file present)
# we wait until the locks dissapears and create the link.
if master_path is None:
#NOTE(ghe): We don't share images between instances/hosts
images.fetch_to_raw(ctx, uuid, path, image_service)
else:
master_uuid = os.path.join(master_path,
service_utils.parse_image_ref(uuid)[0])
lock_file = os.path.join(master_path, master_uuid + '.lock')
_link_master_image(master_uuid, path)
if not os.path.exists(path):
fileutils.ensure_tree(master_path)
if not _download_in_progress(lock_file):
with fileutils.remove_path_on_error(lock_file):
#TODO(ghe): logging when image cannot be created
fd, tmp_path = tempfile.mkstemp(dir=master_path)
os.close(fd)
images.fetch_to_raw(ctx, uuid, tmp_path, image_service)
_create_master_image(tmp_path, master_uuid, path)
_remove_download_in_progress_lock(lock_file)
else:
#TODO(ghe): expiration time
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_download)
timer.start(interval=1).wait()
_link_master_image(master_uuid, path)
def _cache_tftp_images(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
d_info = _parse_driver_info(node)
fileutils.ensure_tree(
os.path.join(CONF.pxe.tftp_root, node['instance_uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
d_info['instance_name'])
for label in pxe_info:
(uuid, path) = pxe_info[label]
if not os.path.exists(path):
_get_image(ctx, path, uuid, CONF.pxe.tftp_master_path, None)
def _cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.pxe.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
d_info = _parse_driver_info(node)
fileutils.ensure_tree(_get_image_dir_path(d_info))
image_path = _get_image_file_path(d_info)
uuid = d_info['image_source']
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': uuid, 'name': d_info['instance_name']})
if not os.path.exists(image_path):
_get_image(ctx, image_path, uuid, CONF.pxe.instance_master_path)
return (uuid, image_path)
def _get_tftp_image_info(node):
"""Generate the paths for tftp files for this instance
Raises IronicException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
driver_info and defaults are not set
"""
#TODO(ghe): Called multiples times. Should we store image_info?
d_info = _parse_driver_info(node)
image_info = {
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
for label in image_info:
image_info[label][0] = str(d_info[label]).split('/')[-1]
image_info[label][1] = os.path.join(CONF.pxe.tftp_root,
node['instance_uuid'], label)
ctx = context.get_admin_context()
glance_service = service.Service(version=1, context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in ('kernel', 'ramdisk'):
image_info[label] = [None, None]
image_info[label][0] = str(iproperties[label + '_id']).split('/')[-1]
image_info[label][1] = os.path.join(CONF.pxe.tftp_root,
node['instance_uuid'], label)
return image_info
def _cache_images(node, pxe_info):
"""Prepare all the images for this instance."""
ctx = context.get_admin_context()
#TODO(ghe):parallized downloads
#TODO(ghe): Embedded image client in ramdisk
# - Get rid of iscsi, image location in baremetal service node and
# image service, no master image, no image outdated...
# - security concerns
_cache_tftp_images(ctx, node, pxe_info)
_cache_instance_image(ctx, node)
#TODO(ghe): file injection
# http://lists.openstack.org/pipermail/openstack-dev/2013-May/008728.html
# http://lists.openstack.org/pipermail/openstack-dev/2013-July/011769.html
# _inject_into_image(d_info, network_info, injected_files, admin_password)
def _destroy_images(d_info):
"""Delete instance's image file."""
image_uuid = service_utils.parse_image_ref(d_info['image_source'])[0]
utils.unlink_without_raise(_get_image_file_path(d_info))
utils.rmtree_without_raise(_get_image_dir_path(d_info))
master_image = os.path.join(CONF.pxe.instance_master_path, image_uuid)
_unlink_master_image(master_image)
def _create_pxe_config(task, node, pxe_info):
"""Generate pxe configuration file and link mac ports to it for
tftp booting.
"""
fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
node['instance_uuid']))
fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
'pxelinux.cfg'))
pxe_config_file_path = _get_pxe_config_file_path(node['instance_uuid'])
pxe_config = _build_pxe_config(node, pxe_info)
utils.write_to_file(pxe_config_file_path, pxe_config)
for port in _get_node_mac_addresses(task, node):
mac_path = _get_pxe_mac_path(port)
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
class PXEDeploy(base.DeployInterface):
"""PXE Deploy Interface: just a stub until the real driver is ported."""
def validate(self, nodes):
pass
def validate(self, node):
"""Validate the driver-specific Node deployment info.
def deploy(self, task, nodes):
pass
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
def tear_down(self, task, nodes):
pass
:param node: a single Node to validate.
:returns: InvalidParameterValue.
"""
_parse_driver_info(node)
def deploy(self, task, node):
"""Perform a deployment to a node.
Given a node with complete metadata, deploy the indicated image
to the node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
"""
pxe_info = _get_tftp_image_info(node)
_create_pxe_config(task, node, pxe_info)
_cache_images(node, pxe_info)
local_status = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
node.refresh()
status = node['task_state']
if (status == states.DEPLOYING
and local_status['started'] is False):
LOG.info(_("PXE deploy started for instance %s")
% node['instance_uuid'])
local_status['started'] = True
elif status in (states.DEPLOYDONE,
states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% node['instance_uuid'])
raise loopingcall.LoopingCallDone()
elif status == states.DEPLOYFAIL:
local_status['error'] = _("PXE deploy failed for"
" instance %s")
except exception.NodeNotFound:
local_status['error'] = _("Baremetal node deleted"
"while waiting for deployment"
" of instance %s")
if (CONF.pxe.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
local_status['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if local_status['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.pxe.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if local_status['error']:
raise exception.InstanceDeployFailure(
local_status['error'] % node['instance_uuid'])
def tear_down(self, task, node):
"""Tear down a previous deployment.
Given a node that has been previously deployed to,
do all cleanup and tear down necessary to "un-deploy" that node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
"""
#FIXME(ghe): Possible error to get image info if eliminated from glance
# Retrieve image info and store in db
# If we keep master images, no need to get the info, we may ignore this
pxe_info = _get_tftp_image_info(node)
d_info = _parse_driver_info(node)
for label in pxe_info:
(uuid, path) = pxe_info[label]
master_path = os.path.join(CONF.pxe.tftp_master_path, uuid)
utils.unlink_without_raise(path)
_unlink_master_image(master_path)
utils.unlink_without_raise(_get_pxe_config_file_path(
node['instance_uuid']))
for port in _get_node_mac_addresses(task, node):
mac_path = _get_pxe_mac_path(port)
utils.unlink_without_raise(mac_path)
utils.rmtree_without_raise(
os.path.join(CONF.pxe.tftp_root, node['instance_uuid']))
_destroy_images(d_info)
class PXERescue(base.RescueInterface):
"""PXE Rescue Interface: just a stub until the real driver is ported."""
def validate(self, nodes):
def validate(self, node):
pass
def rescue(self, task, nodes):
def rescue(self, task, node):
pass
def unrescue(self, task, nodes):
def unrescue(self, task, node):
pass
@ -58,7 +536,7 @@ class IPMIVendorPassthru(base.VendorInterface):
if method == 'set_boot_device':
return node.driver.vendor._set_boot_device(
task, node,
args.get('device'),
args.get('persistent'))
kwargs.get('device'),
kwargs.get('persistent'))
else:
return

View File

@ -2,7 +2,7 @@ default deploy
label deploy
kernel ${pxe_options.deployment_aki_path}
append initrd=${pxe_options.deployment_ari_path} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=${pxe_options.deployment_iscsi_iqn} deployment_id=${pxe_options.deployment_id} deployment_key=${pxe_options.deployment_key} troubleshoot=0 ${pxe_options.pxe_append_params}
append initrd=${pxe_options.deployment_ari_path} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=${pxe_options.deployment_iscsi_iqn} deployment_id=${pxe_options.deployment_id} deployment_key=${pxe_options.deployment_key} ${pxe_options.pxe_append_params}
ipappend 3

View File

@ -1,492 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for PXE bare-metal nodes.
"""
import datetime
import os
from oslo.config import cfg
from nova.compute import instance_types
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic import db
from ironic.manager import base
from ironic.openstack.common.db import exception as db_exc
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
from ironic.openstack.common import timeutils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/ironic/net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/ironic/pxe_config.template',
help='Template file for PXE configuration'),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(pxe_opts)
CONF.import_opt('use_ipv6', 'ironic.netconf')
CHEETAH = None
def _get_cheetah():
global CHEETAH
if CHEETAH is None:
from Cheetah import Template
CHEETAH = Template.Template
return CHEETAH
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.pxe_append_params,
}
cheetah = _get_cheetah()
pxe_config = str(cheetah(
open(CONF.pxe_config_template).read(),
searchList=[{'pxe_options': pxe_options,
'ROOT': '${ROOT}',
}]))
return pxe_config
def build_network_config(network_info):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption about ordering
try:
assert isinstance(network_info, list)
except AssertionError:
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.net_config_template).read(),
searchList=[
{'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6,
}
]))
return network_config
def get_deploy_aki_id(instance_type):
return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.deploy_kernel)
def get_deploy_ari_id(instance_type):
return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
instance_type = instance_types.extract_instance_type(instance)
root_mb = instance_type['root_gb'] * 1024
swap_mb = instance_type['swap']
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance, instance_type):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
instance_type['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(instance_type)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance_type)
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver."""
def __init__(self, virtapi):
super(PXE, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we dont modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
instance_type = self.virtapi.instance_type_get(
context, instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, instance_type)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
utils.unlink_without_raise(get_image_file_path(instance))
utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
instance_type = self.virtapi.instance_type_get(
context, instance['instance_type_id'])
image_info = get_tftp_image_info(instance, instance_type)
(root_mb, swap_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': pxe_config_file_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
pxe_config = build_pxe_config(
node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
)
utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
# NOTE(danms): the instance_type extra_specs do not need to be
# present/correct at deactivate time, so pass something empty
# to avoid an extra lookup
instance_type = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
try:
image_info = get_tftp_image_info(instance, instance_type)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.unlink_without_raise(path)
utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
else:
for mac in macs:
utils.unlink_without_raise(get_pxe_mac_path(mac))
utils.rmtree_without_raise(
os.path.join(CONF.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete."""
locals = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
raise loopingcall.LoopingCallDone()
status = row.get('task_state')
if (status == states.DEPLOYING
and locals['started'] is False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
elif status in (states.DEPLOYDONE,
states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid'])
raise loopingcall.LoopingCallDone()
elif status == states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if (CONF.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if locals['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass

View File

@ -47,9 +47,11 @@ ssh_info = json.dumps(
pxe_info = json.dumps(
{
'pxe': {
"image_path": "/path/to/image.qcow2",
"image_source": "glance://image-uuid",
"deploy_image_source": "glance://deploy-image-uuid",
"instance_name": "fake_instance_name",
"image_source": "glance://image_uuid",
"deploy_kernel": "glance://deploy_kernel_uuid",
"deploy_ramdisk": "glance://deploy_ramdisk_uuid",
"root_gb": 100,
}
})

View File

@ -0,0 +1,11 @@
default deploy
label deploy
kernel /tftpboot/instance_uuid_123/deploy_kernel
append initrd=/tftpboot/instance_uuid_123/deploy_ramdisk selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=iqn-instance_uuid_123 deployment_id=123 deployment_key=0123456789ABCDEFGHIJKLMNOPQRSTUV test_param
ipappend 3
label boot
kernel /tftpboot/instance_uuid_123/kernel
append initrd=/tftpboot/instance_uuid_123/ramdisk root=${ROOT} ro test_param

View File

@ -16,7 +16,610 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for PXE driver.
"""Test class for PXE driver."""
TODO
"""
import mox
import os
import tempfile
import threading
import time
from oslo.config import cfg
from ironic.common import exception
from ironic.common.glance_service import base_image_service
from ironic.common.glance_service import service_utils
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic.drivers.modules import pxe
from ironic.openstack.common import context
from ironic.openstack.common import fileutils
from ironic.openstack.common import jsonutils as json
from ironic.tests import base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
CONF = cfg.CONF
INFO_DICT = json.loads(db_utils.pxe_info).get('pxe')
class PXEValidateParametersTestCase(base.TestCase):
def test__parse_driver_info_good(self):
# make sure we get back the expected things
node = db_utils.get_test_node(
driver='fake_pxe',
driver_info=db_utils.pxe_info)
info = pxe._parse_driver_info(node)
self.assertIsNotNone(info.get('instance_name'))
self.assertIsNotNone(info.get('image_source'))
self.assertIsNotNone(info.get('deploy_kernel'))
self.assertIsNotNone(info.get('deploy_ramdisk'))
self.assertIsNotNone(info.get('root_gb'))
self.mox.VerifyAll()
def test__parse_driver_info_missing_instance_name(self):
# make sure error is raised when info is missing
tmp_dict = dict(INFO_DICT)
del tmp_dict['instance_name']
info = json.dumps({'pxe': tmp_dict})
node = db_utils.get_test_node(driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
pxe._parse_driver_info,
node)
self.mox.VerifyAll()
def test__parse_driver_info_missing_instance_source(self):
# make sure error is raised when info is missing
tmp_dict = dict(INFO_DICT)
del tmp_dict['image_source']
info = json.dumps({'pxe': tmp_dict})
node = db_utils.get_test_node(driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
pxe._parse_driver_info,
node)
self.mox.VerifyAll()
def test__parse_driver_info_missing_deploy_kernel(self):
# make sure error is raised when info is missing
tmp_dict = dict(INFO_DICT)
del tmp_dict['deploy_kernel']
info = json.dumps({'pxe': tmp_dict})
node = db_utils.get_test_node(driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
pxe._parse_driver_info,
node)
self.mox.VerifyAll()
def test__parse_driver_info_missing_deploy_ramdisk(self):
# make sure error is raised when info is missing
tmp_dict = dict(INFO_DICT)
del tmp_dict['deploy_ramdisk']
info = json.dumps({'pxe': tmp_dict})
node = db_utils.get_test_node(driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
pxe._parse_driver_info,
node)
self.mox.VerifyAll()
def test__parse_driver_info_missing_root_gb(self):
# make sure error is raised when info is missing
tmp_dict = dict(INFO_DICT)
del tmp_dict['root_gb']
info = json.dumps({'pxe': tmp_dict})
node = db_utils.get_test_node(driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
pxe._parse_driver_info,
node)
self.mox.VerifyAll()
def test__get_pxe_mac_path(self):
mac = '00:11:22:33:44:55:66'
self.assertEqual(pxe._get_pxe_mac_path(mac),
'/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66')
def test__link_master_image(self):
temp_dir = tempfile.mkdtemp()
orig_path = os.path.join(temp_dir, 'orig_path')
dest_path = os.path.join(temp_dir, 'dest_path')
open(orig_path, 'w').close()
pxe._link_master_image(orig_path, dest_path)
self.assertIsNotNone(os.path.exists(dest_path))
self.assertEqual(os.stat(dest_path).st_nlink, 2)
def test__unlink_master_image(self):
temp_dir = tempfile.mkdtemp()
orig_path = os.path.join(temp_dir, 'orig_path')
open(orig_path, 'w').close()
pxe._unlink_master_image(orig_path)
self.assertFalse(os.path.exists(orig_path))
def test__create_master_image(self):
temp_dir = tempfile.mkdtemp()
master_path = os.path.join(temp_dir, 'master_path')
instance_path = os.path.join(temp_dir, 'instance_path')
tmp_path = os.path.join(temp_dir, 'tmp_path')
open(tmp_path, 'w').close()
pxe._create_master_image(tmp_path, master_path, instance_path)
self.assertTrue(os.path.exists(master_path))
self.assertTrue(os.path.exists(instance_path))
self.assertFalse(os.path.exists(tmp_path))
self.assertEqual(os.stat(master_path).st_nlink, 2)
def test__download_in_progress(self):
temp_dir = tempfile.mkdtemp()
lock_file = os.path.join(temp_dir, 'lock_file')
self.assertFalse(pxe._download_in_progress(lock_file))
self.assertTrue(os.path.exists(lock_file))
def test__download_in_progress_wait(self):
try:
CONF.set_default('auth_strategy', 'keystone')
except Exception:
opts = [
cfg.StrOpt('auth_strategy', default='keystone'),
]
CONF.register_opts(opts)
ctx = context.RequestContext(auth_token=True)
uuid = 'instance_uuid'
temp_dir = tempfile.mkdtemp()
master_path = os.path.join(temp_dir, 'master_path')
instance_path = os.path.join(temp_dir, 'instance_path')
os.mkdir(master_path)
os.mkdir(instance_path)
lock_file = os.path.join(master_path, 'instance_uuid.lock')
open(lock_file, 'w').close()
class handler_deploying(threading.Thread):
def __init__(self, lock_file):
threading.Thread.__init__(self)
self.lock_file = lock_file
def run(self):
time.sleep(2)
open(os.path.join(master_path, 'instance_uuid'), 'w').close()
pxe._remove_download_in_progress_lock(self.lock_file)
handler = handler_deploying(lock_file)
handler.start()
pxe._get_image(ctx, os.path.join(instance_path, 'instance_uuid'),
uuid, master_path)
self.assertFalse(os.path.exists(lock_file))
self.assertTrue(os.path.exists(os.path.join(instance_path,
'instance_uuid')))
self.assertEqual(os.stat(os.path.join(master_path, 'instance_uuid')).
st_nlink, 2)
class PXEPrivateMethodsTestCase(base.TestCase):
def setUp(self):
super(PXEPrivateMethodsTestCase, self).setUp()
self.node = db_utils.get_test_node(
driver='fake_pxe',
driver_info=db_utils.pxe_info,
instance_uuid='instance_uuid_123',
id=123)
def test__get_tftp_image_info(self):
properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
u'ramdisk_id': u'instance_ramdisk_uuid'}}
expected_info = {'ramdisk':
['instance_ramdisk_uuid',
'/tftpboot/instance_uuid_123/ramdisk'],
'kernel':
['instance_kernel_uuid',
'/tftpboot/instance_uuid_123/kernel'],
'deploy_ramdisk':
['deploy_ramdisk_uuid',
'/tftpboot/instance_uuid_123/deploy_ramdisk'],
'deploy_kernel':
['deploy_kernel_uuid',
'/tftpboot/instance_uuid_123/deploy_kernel']}
self.mox.StubOutWithMock(base_image_service.BaseImageService, '_show')
base_image_service.BaseImageService._show(
'glance://image_uuid',
method='get').AndReturn(properties)
self.mox.ReplayAll()
image_info = pxe._get_tftp_image_info(self.node)
self.assertEqual(image_info, expected_info)
def test__build_pxe_config(self):
instance_uuid = 'instance_uuid_123'
CONF.set_default('pxe_append_params', 'test_param', group='pxe')
template = 'ironic/tests/drivers/pxe_config.template'
pxe_config_template = open(template, 'r').read()
self.mox.StubOutWithMock(utils, 'random_alnum')
utils.random_alnum(32).AndReturn('0123456789ABCDEFGHIJKLMNOPQRSTUV')
image_info = {'deploy_kernel': ['deploy_kernel',
CONF.pxe.tftp_root + '/' +
instance_uuid + '/deploy_kernel'],
'deploy_ramdisk': ['deploy_ramdisk',
CONF.pxe.tftp_root + '/' +
instance_uuid + '/deploy_ramdisk'],
'kernel': ['kernel_id',
CONF.pxe.tftp_root + '/' + instance_uuid +
'/kernel'],
'ramdisk': ['ramdisk_id',
CONF.pxe.tftp_root + '/' + instance_uuid +
'/ramdisk']
}
self.mox.ReplayAll()
pxe_config = pxe._build_pxe_config(self.node, image_info)
self.assertEqual(pxe_config, pxe_config_template)
def test__get_pxe_config_file_path(self):
self.assertEqual('/tftpboot/instance_uuid_123/config',
pxe._get_pxe_config_file_path('instance_uuid_123'))
def test__get_image_dir_path(self):
node = db_utils.get_test_node(
driver='fake_pxe',
driver_info=db_utils.pxe_info,
)
info = pxe._parse_driver_info(node)
self.assertEqual('/var/lib/ironic/images/fake_instance_name',
pxe._get_image_dir_path(info))
def test__get_image_file_path(self):
node = db_utils.get_test_node(
driver='fake_pxe',
driver_info=db_utils.pxe_info,
)
info = pxe._parse_driver_info(node)
self.assertEqual('/var/lib/ironic/images/fake_instance_name/disk',
pxe._get_image_file_path(info))
def test__cache_tftp_images_master_path(self):
temp_dir = tempfile.mkdtemp()
CONF.set_default('tftp_root', temp_dir, group='pxe')
CONF.set_default('tftp_master_path', os.path.join(temp_dir,
'tftp_master_path'),
group='pxe')
image_info = {'deploy_kernel': ['deploy_kernel', temp_dir +
'/instance_uuid_123/deploy_kernel']}
fileutils.ensure_tree(CONF.pxe.tftp_master_path)
fd, tmp_master_image = tempfile.mkstemp(dir=CONF.pxe.tftp_master_path)
self.mox.StubOutWithMock(images, 'fetch_to_raw')
self.mox.StubOutWithMock(tempfile, 'mkstemp')
tempfile.mkstemp(dir=CONF.pxe.tftp_master_path).\
AndReturn((fd, tmp_master_image))
images.fetch_to_raw(None, 'deploy_kernel', tmp_master_image, None).\
AndReturn(None)
self.mox.ReplayAll()
pxe._cache_tftp_images(None, self.node, image_info)
self.mox.VerifyAll()
def test__cache_tftp_images_no_master_path(self):
temp_dir = tempfile.mkdtemp()
CONF.set_default('tftp_root', temp_dir, group='pxe')
CONF.set_default('tftp_master_path', None, group='pxe')
image_info = {'deploy_kernel': ['deploy_kernel',
os.path.join(temp_dir,
'instance_uuid_123/deploy_kernel')]}
self.mox.StubOutWithMock(images, 'fetch_to_raw')
images.fetch_to_raw(None, 'deploy_kernel',
os.path.join(temp_dir,
'instance_uuid_123/deploy_kernel'),
None).AndReturn(None)
self.mox.ReplayAll()
pxe._cache_tftp_images(None, self.node, image_info)
self.mox.VerifyAll()
def test__cache_instance_images_no_master_path(self):
temp_dir = tempfile.mkdtemp()
CONF.set_default('images_path', temp_dir, group='pxe')
CONF.set_default('instance_master_path', None, group='pxe')
self.mox.StubOutWithMock(images, 'fetch_to_raw')
images.fetch_to_raw(None, 'glance://image_uuid',
os.path.join(temp_dir,
'fake_instance_name/disk'),
None).AndReturn(None)
self.mox.ReplayAll()
(uuid, image_path) = pxe._cache_instance_image(None, self.node)
self.mox.VerifyAll()
self.assertEqual(uuid, 'glance://image_uuid')
self.assertEqual(image_path,
os.path.join(temp_dir,
'fake_instance_name/disk'))
def test__cache_instance_images_master_path(self):
temp_dir = tempfile.mkdtemp()
CONF.set_default('images_path', temp_dir, group='pxe')
CONF.set_default('instance_master_path',
os.path.join(temp_dir, 'instance_master_path'),
group='pxe')
fileutils.ensure_tree(CONF.pxe.instance_master_path)
fd, tmp_master_image = tempfile.mkstemp(
dir=CONF.pxe.instance_master_path)
self.mox.StubOutWithMock(images, 'fetch_to_raw')
self.mox.StubOutWithMock(tempfile, 'mkstemp')
self.mox.StubOutWithMock(service_utils, 'parse_image_ref')
tempfile.mkstemp(dir=CONF.pxe.instance_master_path).\
AndReturn((fd, tmp_master_image))
images.fetch_to_raw(None, 'glance://image_uuid',
tmp_master_image,
None).\
AndReturn(None)
service_utils.parse_image_ref('glance://image_uuid').\
AndReturn(('image_uuid', None, None, None))
self.mox.ReplayAll()
(uuid, image_path) = pxe._cache_instance_image(None, self.node)
self.mox.VerifyAll()
self.assertEqual(uuid, 'glance://image_uuid')
self.assertEqual(image_path, temp_dir + '/fake_instance_name/disk')
def test__get_image_download_in_progress(self):
def _create_instance_path(*args):
open(master_path, 'w').close()
return True
temp_dir = tempfile.mkdtemp()
instance_path = os.path.join(temp_dir, 'instance_path')
fileutils.ensure_tree(temp_dir)
master_uuid = 'instance_uuid'
master_path = os.path.join(temp_dir, master_uuid)
lock_file = os.path.join(temp_dir, 'instance_uuid.lock')
self.mox.StubOutWithMock(pxe, '_download_in_progress')
pxe._download_in_progress(lock_file).\
WithSideEffects(_create_instance_path).\
AndReturn(True)
self.mox.ReplayAll()
pxe._get_image(None, instance_path, master_uuid, temp_dir)
self.mox.VerifyAll()
self.assertTrue(os.path.exists(instance_path))
class PXEDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEDriverTestCase, self).setUp()
self.driver = mgr_utils.get_mocked_node_manager(driver='fake_pxe')
self.node = db_utils.get_test_node(
driver='fake_pxe',
driver_info=db_utils.pxe_info,
instance_uuid='instance_uuid_123')
self.dbapi = dbapi.get_instance()
self.dbapi.create_node(self.node)
def test_validate_good(self):
with task_manager.acquire([self.node['uuid']], shared=True) as task:
task.resources[0].driver.deploy.validate(self.node)
def test_validate_fail(self):
tmp_dict = dict(INFO_DICT)
del tmp_dict['image_source']
self.node['driver_info'] = json.dumps({'pxe': tmp_dict})
with task_manager.acquire([self.node['uuid']], shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.resources[0].driver.deploy.validate,
self.node)
def test__get_nodes_mac_addresses(self):
ports = []
ports.append(
self.dbapi.create_port(
db_utils.get_test_port(
id=6,
address='aa:bb:cc',
uuid='bb43dc0b-03f2-4d2e-ae87-c02d7f33cc53',
node_id='123')))
ports.append(
self.dbapi.create_port(
db_utils.get_test_port(
id=7,
address='dd:ee:ff',
uuid='4fc26c0b-03f2-4d2e-ae87-c02d7f33c234',
node_id='123')))
with task_manager.acquire([self.node['uuid']]) as task:
node_macs = pxe._get_node_mac_addresses(task, self.node)
self.assertEqual(node_macs, ['aa:bb:cc', 'dd:ee:ff'])
def test_deploy_good(self):
class node_dict(dict):
@staticmethod
def refresh():
pass
self.node = node_dict(self.node)
self.mox.StubOutWithMock(pxe, '_create_pxe_config')
self.mox.StubOutWithMock(pxe, '_cache_images')
self.mox.StubOutWithMock(pxe, '_get_tftp_image_info')
pxe._get_tftp_image_info(self.node).AndReturn(None)
pxe._create_pxe_config(mox.IgnoreArg(), self.node, None).\
AndReturn(None)
pxe._cache_images(self.node, None).AndReturn(None)
self.mox.ReplayAll()
class handler_deploying(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
self.node = node
def run(self):
self.node['task_state'] = states.DEPLOYING
time.sleep(2)
self.node['task_state'] = states.ACTIVE
handler = handler_deploying(self.node)
handler.start()
with task_manager.acquire([self.node['uuid']], shared=False) as task:
task.resources[0].driver.deploy.deploy(task, self.node)
self.mox.VerifyAll()
def test_deploy_fail(self):
class node_dict(dict):
@staticmethod
def refresh():
pass
self.node = node_dict(self.node)
self.mox.StubOutWithMock(pxe, '_create_pxe_config')
self.mox.StubOutWithMock(pxe, '_cache_images')
self.mox.StubOutWithMock(pxe, '_get_tftp_image_info')
pxe._get_tftp_image_info(self.node).AndReturn(None)
pxe._create_pxe_config(mox.IgnoreArg(), self.node, None).\
AndReturn(None)
pxe._cache_images(self.node, None).AndReturn(None)
self.mox.ReplayAll()
class handler_deploying(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
self.node = node
def run(self):
self.node['task_state'] = states.DEPLOYING
time.sleep(2)
self.node['task_state'] = states.DEPLOYFAIL
handler = handler_deploying(self.node)
handler.start()
with task_manager.acquire([self.node['uuid']], shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
task.resources[0].driver.deploy.deploy,
task,
self.node)
self.mox.VerifyAll()
def test_deploy_timeout_fail(self):
class node_dict(dict):
@staticmethod
def refresh():
pass
self.node = node_dict(self.node)
self.mox.StubOutWithMock(pxe, '_create_pxe_config')
self.mox.StubOutWithMock(pxe, '_cache_images')
self.mox.StubOutWithMock(pxe, '_get_tftp_image_info')
pxe._get_tftp_image_info(self.node).AndReturn(None)
pxe._create_pxe_config(mox.IgnoreArg(), self.node, None).\
AndReturn(None)
pxe._cache_images(self.node, None).AndReturn(None)
self.mox.ReplayAll()
CONF.set_default('pxe_deploy_timeout', 2, group='pxe')
with task_manager.acquire([self.node['uuid']], shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
task.resources[0].driver.deploy.deploy,
task,
self.node)
self.mox.VerifyAll()
def tear_down_config(self, master=None):
temp_dir = tempfile.mkdtemp()
CONF.set_default('tftp_root', temp_dir, group='pxe')
CONF.set_default('images_path', temp_dir, group='pxe')
ports = []
ports.append(
self.dbapi.create_port(
db_utils.get_test_port(
id=6,
address='aa:bb:cc',
uuid='bb43dc0b-03f2-4d2e-ae87-c02d7f33cc53',
node_id='123')))
d_kernel_path = os.path.join(temp_dir,
'instance_uuid_123/deploy_kernel')
image_info = {'deploy_kernel': ['deploy_kernel_uuid', d_kernel_path]}
self.mox.StubOutWithMock(pxe, '_get_tftp_image_info')
pxe._get_tftp_image_info(self.node).AndReturn(image_info)
self.mox.ReplayAll()
pxecfg_dir = os.path.join(temp_dir, 'pxelinux.cfg')
os.makedirs(pxecfg_dir)
instance_dir = os.path.join(temp_dir, 'instance_uuid_123')
image_dir = os.path.join(temp_dir, 'fake_instance_name')
os.makedirs(instance_dir)
os.makedirs(image_dir)
config_path = os.path.join(instance_dir, 'config')
deploy_kernel_path = os.path.join(instance_dir, 'deploy_kernel')
pxe_mac_path = os.path.join(pxecfg_dir, '01-aa-bb-cc')
image_path = os.path.join(image_dir, 'disk')
open(config_path, 'w').close()
os.link(config_path, pxe_mac_path)
if master:
tftp_master_dir = os.path.join(temp_dir, 'tftp_master')
instance_master_dir = os.path.join(temp_dir, 'instance_master')
CONF.set_default('tftp_master_path', tftp_master_dir, group='pxe')
CONF.set_default('instance_master_path', instance_master_dir,
group='pxe')
os.makedirs(tftp_master_dir)
os.makedirs(instance_master_dir)
master_deploy_kernel_path = os.path.join(tftp_master_dir,
'deploy_kernel_uuid')
master_instance_path = os.path.join(instance_master_dir,
'image_uuid')
open(master_deploy_kernel_path, 'w').close()
open(master_instance_path, 'w').close()
os.link(master_deploy_kernel_path, deploy_kernel_path)
os.link(master_instance_path, image_path)
if master == 'in_use':
deploy_kernel_link = os.path.join(temp_dir,
'deploy_kernel_link')
image_link = os.path.join(temp_dir, 'image_link')
os.link(master_deploy_kernel_path, deploy_kernel_link)
os.link(master_instance_path, image_link)
else:
CONF.set_default('tftp_master_path', '', group='pxe')
CONF.set_default('instance_master_path', '', group='pxe')
open(deploy_kernel_path, 'w').close()
open(image_path, 'w').close()
with task_manager.acquire([self.node['uuid']], shared=False) as task:
task.resources[0].driver.deploy.tear_down(task, self.node)
self.mox.VerifyAll()
assert_false_path = [config_path, deploy_kernel_path, image_path,
pxe_mac_path, image_dir, instance_dir]
for path in assert_false_path:
self.assertFalse(os.path.exists(path))
return temp_dir
def test_tear_down_no_master_images(self):
self.tear_down_config(master=None)
def test_tear_down_master_images_not_in_use(self):
temp_dir = self.tear_down_config(master='not_in_use')
master_d_kernel_path = os.path.join(temp_dir,
'tftp_master/deploy_kernel_uuid')
master_instance_path = os.path.join(temp_dir,
'instance_master/image_uuid')
self.assertFalse(os.path.exists(master_d_kernel_path))
self.assertFalse(os.path.exists(master_instance_path))
def test_tear_down_master_images_in_use(self):
temp_dir = self.tear_down_config(master='in_use')
master_d_kernel_path = os.path.join(temp_dir,
'tftp_master/deploy_kernel_uuid')
master_instance_path = os.path.join(temp_dir,
'instance_master/image_uuid')
self.assertTrue(os.path.exists(master_d_kernel_path))
self.assertTrue(os.path.exists(master_instance_path))

View File

@ -1,583 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal pxe driver."""
import os
import mox
from oslo.config import cfg
from testtools import matchers
from ironic.common import exception
from ironic.common import states
from ironic import db
from ironic.manager import pxe
from ironic.openstack.common.db import exception as db_exc
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.image import fake as fake_image
from ironic.tests import utils
from ironic.virt.disk import api as disk_api
from ironic.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_CONFIG = dict(
firewall_driver='ironic.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_CONFIG = dict(
driver='ironic.pxe.PXE',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='ironic.fake.FakePowerManager',
vif_driver='ironic.fake.FakeVifDriver',
volume_driver='ironic.fake.FakeVolumeDriver',
)
class BareMetalPXETestCase(db_base.BMDBTestCase):
def setUp(self):
super(BareMetalPXETestCase, self).setUp()
self.config(**COMMON_CONFIG)
self.config(**BAREMETAL_CONFIG)
self.driver = pxe.PXE(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
prov_mac_address='11:11:11:11:11:11',
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_pxe_config(self):
args = {
'deployment_id': 'aaa',
'deployment_key': 'bbb',
'deployment_iscsi_iqn': 'ccc',
'deployment_aki_path': 'ddd',
'deployment_ari_path': 'eee',
'aki_path': 'fff',
'ari_path': 'ggg',
}
config = pxe.build_pxe_config(**args)
self.assertThat(config, matchers.StartsWith('default deploy'))
# deploy bits are in the deploy section
start = config.index('label deploy')
end = config.index('label boot')
self.assertThat(config[start:end], matchers.MatchesAll(
matchers.Contains('kernel ddd'),
matchers.Contains('initrd=eee'),
matchers.Contains('deployment_id=aaa'),
matchers.Contains('deployment_key=bbb'),
matchers.Contains('iscsi_target_iqn=ccc'),
matchers.Not(matchers.Contains('kernel fff')),
))
# boot bits are in the boot section
start = config.index('label boot')
self.assertThat(config[start:], matchers.MatchesAll(
matchers.Contains('kernel fff'),
matchers.Contains('initrd=ggg'),
matchers.Not(matchers.Contains('kernel ddd')),
))
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.config(
net_config_template='$pybasedir/ironic/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.config(
net_config_template='$pybasedir/ironic/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
pxe.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
pxe.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_pxe_config_file_path(self):
self.instance['uuid'] = 'aaaa-bbbb-cccc'
self.assertEqual(
pxe.get_pxe_config_file_path(self.instance),
os.path.join(CONF.baremetal.tftp_root,
'aaaa-bbbb-cccc', 'config'))
def test_pxe_mac_path(self):
self.assertEqual(
pxe.get_pxe_mac_path('23:45:67:89:AB'),
os.path.join(CONF.baremetal.tftp_root,
'pxelinux.cfg', '01-23-45-67-89-ab'))
def test_get_instance_deploy_ids(self):
self.instance['extra_specs'] = {
'baremetal:deploy_kernel_id': 'aaaa',
'baremetal:deploy_ramdisk_id': 'bbbb',
}
self.config(deploy_kernel="fail", group='baremetal')
self.config(deploy_ramdisk="fail", group='baremetal')
self.assertEqual(
pxe.get_deploy_aki_id(self.instance), 'aaaa')
self.assertEqual(
pxe.get_deploy_ari_id(self.instance), 'bbbb')
def test_get_default_deploy_ids(self):
self.instance['extra_specs'] = {}
self.config(deploy_kernel="aaaa", group='baremetal')
self.config(deploy_ramdisk="bbbb", group='baremetal')
self.assertEqual(
pxe.get_deploy_aki_id(self.instance), 'aaaa')
self.assertEqual(
pxe.get_deploy_ari_id(self.instance), 'bbbb')
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
instance_type = utils.get_test_instance_type(self.context)
instance_type['swap'] = 0
self.instance = utils.get_test_instance(self.context, instance_type)
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
instance_type = utils.get_test_instance_type()
# Raises an exception when options are neither specified
# on the instance nor in configuration file
CONF.baremetal.deploy_kernel = None
CONF.baremetal.deploy_ramdisk = None
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# Test that other non-true values also raise an exception
CONF.baremetal.deploy_kernel = ""
CONF.baremetal.deploy_ramdisk = ""
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
# config file, then we raise an exception.
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
# If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
# but defaults are set in the config file, we should use those.
# Here, we confirm both that all four values were set
# and that the proper paths are getting set for all of them
CONF.baremetal.deploy_kernel = 'cccc'
CONF.baremetal.deploy_ramdisk = 'dddd'
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = pxe.get_tftp_image_info(self.instance, instance_type)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
'deploy_ramdisk': ['dddd',
os.path.join(base, 'deploy_ramdisk')],
}
self.assertEqual(res, expected)
# If deploy_kernel_id and deploy_ramdisk_id are specified on
# image extra_specs, this should override any default configuration.
# Note that it is passed on the 'instance' object, despite being
# inherited from the instance_types_extra_specs table.
extra_specs = {
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
}
instance_type['extra_specs'] = extra_specs
res = pxe.get_tftp_image_info(self.instance, instance_type)
self.assertEqual(res['deploy_kernel'][0], 'eeee')
self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
# However, if invalid values are passed on the image extra_specs,
# this should still raise an exception.
extra_specs = {
'baremetal:deploy_kernel_id': '',
'baremetal:deploy_ramdisk_id': '',
}
instance_type['extra_specs'] = extra_specs
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, instance_type)
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
instance_type = utils.get_test_instance_type()
extra_specs = {
'baremetal:deploy_kernel_id': 'cccc',
'baremetal:deploy_ramdisk_id': 'dddd',
}
instance_type['extra_specs'] = extra_specs
image_info = pxe.get_tftp_image_info(self.instance, instance_type)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(pxe.get_image_dir_path(self.instance)).\
AndReturn(True)
os.path.exists(pxe.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
# NOTE(deva): we could also test this method by stubbing
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = pxe.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=pxe.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files, # this is what we're really testing
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(utils, 'unlink_without_raise')
self.mox.StubOutWithMock(utils, 'rmtree_without_raise')
utils.unlink_without_raise(pxe.get_image_file_path(self.instance))
utils.rmtree_without_raise(pxe.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.sort()
image_info = {
'deploy_kernel': [None, 'aaaa'],
'deploy_ramdisk': [None, 'bbbb'],
'kernel': [None, 'cccc'],
'ramdisk': [None, 'dddd'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
pxe_config = 'this is a fake pxe config'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
pxe.get_image_file_path(self.instance)
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(utils, 'random_alnum')
self.mox.StubOutWithMock(pxe, 'build_pxe_config')
self.mox.StubOutWithMock(utils, 'write_to_file')
self.mox.StubOutWithMock(utils, 'create_link_without_raise')
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0))
utils.random_alnum(32).AndReturn('alnum')
pxe.build_pxe_config(
self.node['id'], 'alnum', iqn,
'aaaa', 'bbbb', 'cccc', 'dddd').AndReturn(pxe_config)
utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
utils.create_link_without_raise(
pxe_path, pxe.get_pxe_mac_path(mac))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
instance_type = {
'extra_specs': {
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
}
}
self.instance['uuid'] = 'fake-uuid'
self.mox.StubOutWithMock(self.driver.virtapi, 'instance_type_get')
self.mox.StubOutWithMock(utils, 'write_to_file')
self.mox.StubOutWithMock(utils, 'create_link_without_raise')
self.mox.StubOutWithMock(utils, 'unlink_without_raise')
self.mox.StubOutWithMock(utils, 'rmtree_without_raise')
self.driver.virtapi.instance_type_get(
self.context, self.instance['instance_type_id']).AndReturn(
instance_type)
# create the config file
utils.write_to_file(mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink and link the 2 interfaces
for i in range(2):
utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
utils.create_link_without_raise(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink all 2 interfaces, 4 images, and the config file
for i in range(7):
utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
utils.rmtree_without_raise(mox.StrContains('fake-uuid'))
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.driver.activate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
self.mox.StubOutWithMock(utils, 'unlink_without_raise')
self.mox.StubOutWithMock(utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
extra_specs = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
pxe.get_tftp_image_info(self.instance, extra_specs).\
AndRaise(exception.NovaException)
utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
self.config(pxe_deploy_timeout=1, group='baremetal')
db.bm_node_update(self.context, 1,
{'task_state': states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test timeout
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)

View File

@ -0,0 +1,96 @@
# Vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ironic.common import exception
from ironic.common import images
from ironic.common import utils
from ironic.openstack.common import fileutils
from ironic.tests import base
class IronicImagesTestCase(base.TestCase):
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_errror(path):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_errror)
context = 'opaque context'
image_id = '4'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target)
self.assertEqual(self.executes, expected_commands)
del self.executes

View File

@ -22,3 +22,4 @@ websockify>=0.5.1,<0.6
oslo.config>=1.1.0
pecan>=0.2.0
WSME>=0.5b2
Cheetah>=2.4.4