Re-add support for configdrive

Re-add support for configdrive usage that was previously
dropped.

This commit adds functional tests for configdrive usage
via the nova-lxd tempest plugin.

Change-Id: Ie2e857437df3032e0daa23ed302febb9497fa7d1
Signed-off-by: Chuck Short <chuck.short@canonical.com>
This commit is contained in:
Chuck Short 2016-09-21 10:37:56 -04:00
parent 6283816360
commit 6d365ac537
4 changed files with 277 additions and 3 deletions

View File

@ -77,6 +77,7 @@ class LXDDriverTest(test.NoDBTestCase):
self.CONF = self.CONF_patcher.start()
self.CONF.instances_path = '/path/to/instances'
self.CONF.my_ip = '0.0.0.0'
self.CONF.config_drive_format = 'iso9660'
# NOTE: mock out fileutils to ensure that unit tests don't try
# to manipulate the filesystem (breaks in package builds).
@ -136,10 +137,12 @@ class LXDDriverTest(test.NoDBTestCase):
self.assertEqual(['mock-instance-1', 'mock-instance-2'], instances)
def test_spawn(self):
@mock.patch('nova.virt.configdrive.required_by')
def test_spawn(self, configdrive):
def container_get(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(404))
self.client.containers.get.side_effect = container_get
configdrive.return_value = False
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(ctx, name='test')
@ -199,7 +202,60 @@ class LXDDriverTest(test.NoDBTestCase):
ctx, instance, image_meta, injected_files, admin_password,
None, None)
def test_spawn_profile_fail(self):
@mock.patch('nova.virt.configdrive.required_by')
def test_spawn_with_configdrive(self, configdrive):
def container_get(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(404))
self.client.containers.get.side_effect = container_get
configdrive.return_value = True
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(ctx, name='test')
image_meta = mock.Mock()
injected_files = mock.Mock()
admin_password = mock.Mock()
network_info = [mock.Mock()]
block_device_info = mock.Mock()
lxd_driver = driver.LXDDriver(None)
lxd_driver.init_host(None)
# XXX: rockstar (6 Jul 2016) - There are a number of XXX comments
# related to these calls in spawn. They require some work before we
# can take out these mocks and follow the real codepaths.
lxd_driver.setup_image = mock.Mock()
lxd_driver.vif_driver = mock.Mock()
lxd_driver.firewall_driver = mock.Mock()
lxd_driver._add_ephemeral = mock.Mock()
lxd_driver.create_profile = mock.Mock(return_value={
'name': instance.name, 'config': {}, 'devices': {}})
lxd_driver._add_configdrive = mock.Mock()
lxd_driver.spawn(
ctx, instance, image_meta, injected_files, admin_password,
network_info, block_device_info)
lxd_driver.setup_image.assert_called_once_with(
ctx, instance, image_meta)
lxd_driver.vif_driver.plug.assert_called_once_with(
instance, network_info[0])
lxd_driver.create_profile.assert_called_once_with(
instance, network_info, block_device_info)
fd = lxd_driver.firewall_driver
fd.setup_basic_filtering.assert_called_once_with(
instance, network_info)
fd.prepare_instance_filter.assert_called_once_with(
instance, network_info)
fd.apply_instance_filter.assert_called_once_with(
instance, network_info)
lxd_driver._add_ephemeral.assert_called_once_with(
block_device_info, lxd_driver.client.host_info, instance)
configdrive.assert_called_once_with(instance)
lxd_driver.client.profiles.get.assert_called_once_with(instance.name)
@mock.patch('nova.virt.configdrive.required_by')
def test_spawn_profile_fail(self, configdrive):
"""Cleanup is called when profile creation fails."""
def container_get(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(404))
@ -207,6 +263,7 @@ class LXDDriverTest(test.NoDBTestCase):
def side_effect(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(200))
self.client.containers.get.side_effect = container_get
configdrive.return_value = False
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(ctx, name='test')
image_meta = mock.Mock()
@ -230,7 +287,8 @@ class LXDDriverTest(test.NoDBTestCase):
lxd_driver.cleanup.assert_called_once_with(
ctx, instance, network_info, block_device_info)
def test_spawn_container_fail(self):
@mock.patch('nova.virt.configdrive.required_by')
def test_spawn_container_fail(self, configdrive):
"""Cleanup is called when container creation fails."""
def container_get(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(404))
@ -238,6 +296,7 @@ class LXDDriverTest(test.NoDBTestCase):
def side_effect(*args, **kwargs):
raise lxdcore_exceptions.LXDAPIException(MockResponse(200))
self.client.containers.get.side_effect = container_get
configdrive.return_value = False
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(ctx, name='test')
image_meta = mock.Mock()

View File

@ -38,6 +38,7 @@ from nova.network import model as network_model
from nova import objects
from nova.virt import driver
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
@ -49,7 +50,9 @@ from nova.virt.lxd import vif as lxd_vif
from nova.virt.lxd import session
from nova.virt.lxd import utils as container_utils
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.virt import configdrive
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import vm_mode
@ -312,6 +315,22 @@ class LXDDriver(driver.ComputeDriver):
# and hasn't really been audited. It may need a cleanup
lxd_config = self.client.host_info
self._add_ephemeral(block_device_info, lxd_config, instance)
if configdrive.required_by(instance):
configdrive_path = self._add_configdrive(
context, instance,
injected_files, admin_password,
network_info)
profile = self.client.profiles.get(instance.name)
config_drive = {
'configdrive': {
'path': '/var/lib/cloud/data',
'source': configdrive_path,
'type': 'disk',
}
}
profile.devices.update(config_drive)
profile.save()
container.start()
@ -1069,6 +1088,72 @@ class LXDDriver(driver.ComputeDriver):
utils.execute('umount', lvm_path, run_as_root=True)
utils.execute('lvremove', '-f', lvm_path, run_as_root=True)
def _add_configdrive(self, context, instance,
injected_files, admin_password, network_info):
"""Create configdrive for the instance."""
if CONF.config_drive_format != 'iso9660':
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
container = self.client.containers.get(instance.name)
container_id_map = container.config[
'volatile.last_state.idmap'].split(',')
storage_id = container_id_map[2].split(':')[1]
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(
instance, content=injected_files, extra_md=extra_md,
network_info=network_info, request_context=context)
iso_path = os.path.join(
container_utils.get_instance_dir(instance.name),
'configdrive.iso')
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(iso_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
configdrive_dir = \
container_utils.get_container_configdrive(instance.name)
if not os.path.exists(configdrive_dir):
fileutils.ensure_tree(configdrive_dir)
with utils.tempdir() as tmpdir:
mounted = False
try:
_, err = utils.execute('mount',
'-o',
'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
iso_path, tmpdir,
run_as_root=True)
mounted = True
# Copy and adjust the files from the ISO so that we
# dont have the ISO mounted during the life cycle of the
# instance and the directory can be removed once the instance
# is terminated
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(configdrive_dir, ent))
utils.execute('chmod', '-R', '775', configdrive_dir,
run_as_root=True)
utils.execute('chown', '-R', storage_id, configdrive_dir,
run_as_root=True)
finally:
if mounted:
utils.execute('umount', tmpdir, run_as_root=True)
return configdrive_dir
def _save_lxd_image(self, instance, image_id):
"""Creates an LXD image from the LXD continaer

View File

@ -52,3 +52,7 @@ def get_container_rootfs(instance):
def get_container_rescue(instance):
return os.path.join(CONF.lxd.root_dir, 'containers', instance, 'rootfs')
def get_container_configdrive(instance):
return os.path.join(CONF.instances_path, instance, 'configdrive')

View File

@ -0,0 +1,126 @@
# Copyright 2106 Canonical Ltd
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import test_utils
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class TestServerBasicOps(manager.ScenarioTest):
"""The test suite for server basic operations
This smoke test case follows this basic set of operations:
* Create a keypair for use in launching an instance
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance
* Perform ssh to instance
* Verify metadata service
* Verify metadata on config_drive
* Terminate the instance
"""
def setUp(self):
super(TestServerBasicOps, self).setUp()
self.image_ref = CONF.compute.image_ref
self.flavor_ref = CONF.compute.flavor_ref
self.run_ssh = CONF.validation.run_validation
self.ssh_user = CONF.validation.image_ssh_user
def verify_ssh(self, keypair):
if self.run_ssh:
# Obtain a floating IP
self.fip = self.create_floating_ip(self.instance)['ip']
# Check ssh
self.ssh_client = self.get_remote_client(
ip_address=self.fip,
username=self.ssh_user,
private_key=keypair['private_key'])
def verify_metadata(self):
if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
# Verify metadata service
md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
def exec_cmd_and_verify_output():
cmd = 'curl ' + md_url
result = self.ssh_client.exec_command(cmd)
if result:
msg = ('Failed while verifying metadata on server. Result '
'of command "%s" is NOT "%s".' % (cmd, self.fip))
self.assertEqual(self.fip, result, msg)
return 'Verification is successful!'
if not test_utils.call_until_true(exec_cmd_and_verify_output,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException('Timed out while waiting to '
'verify metadata on server. '
'%s is empty.' % md_url)
def verify_metadata_on_config_drive(self):
if self.run_ssh and CONF.compute_feature_enabled.config_drive:
# Verify metadata on config_drive
cmd_md = \
'cat /var/lib/cloud/data/openstack/latest/meta_data.json'
result = self.ssh_client.exec_command(cmd_md)
result = json.loads(result)
self.assertIn('meta', result)
msg = ('Failed while verifying metadata on config_drive on server.'
' Result of command "%s" is NOT "%s".' % (cmd_md, self.md))
self.assertEqual(self.md, result['meta'], msg)
def verify_networkdata_on_config_drive(self):
if self.run_ssh and CONF.compute_feature_enabled.config_drive:
# Verify network data on config_drive
cmd_md = \
'cat /var/lib/cloud/data/openstack/latest/network_data.json'
result = self.ssh_client.exec_command(cmd_md)
result = json.loads(result)
self.assertIn('services', result)
self.assertIn('links', result)
self.assertIn('networks', result)
# TODO(clarkb) construct network_data from known network
# instance info and do direct comparison.
@test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_server_basic_ops(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
self.instance = self.create_server(
image_id=self.image_ref,
flavor=self.flavor_ref,
key_name=keypair['name'],
security_groups=security_groups,
config_drive=CONF.compute_feature_enabled.config_drive,
metadata=self.md,
wait_until='ACTIVE')
self.verify_ssh(keypair)
self.verify_metadata()
self.verify_metadata_on_config_drive()
self.verify_networkdata_on_config_drive()
self.servers_client.delete_server(self.instance['id'])