Add support for resize and cold migration of emulated TPM files

When using emulated TPM, libvirt will store the persistent TPM data
under '/var/lib/libvirt/swtpm/<instance_uuid>' which is owned by the
"tss" or "root" user depending how libvirt is configured (the parent
directory, '/var/lib/libvirt/swtpm' is always owned by root). When doing
a resize or a cold migration between nodes, this data needs to be copied
to the other node to ensure that the TPM data is not lost.  Libvirt
won't do this automatically for us since cold migrations, or offline
migrations in libvirt lingo, do not currently support "copying
non-shared storage or other file based storages", which includes the
vTPM device [1].

To complicate things further, even if migration/resize is supported,
only the user that nova-compute runs as is guaranteed to be able to have
SSH keys set up for passwordless access, and it's only guaranteed to be
able to copy files to the instance directory on the dest node.

The solution is to have nova (via privsep) copy the TPM files into the
local instance directory on the source and changes the ownership. This
is handled through an additional call in 'migrate_disk_and_power_off'.
As itself, nova then copies them into the instance directory on the
dest. Nova then (once again, via privsep) changes the ownership back and
moves the files to where libvirt expects to find them. This second step
is handled by 'finish_migration'. Confirming the resize will result in
the original TPM data at '/var/lib/libvirt/swtpm' being deleted by
libvirt and the copied TPM data in the instance data being cleaned up by
nova (via 'confirm_migration'), while reverting it will result on the
same on the host.

Part of blueprint add-emulated-virtual-tpm

[1] https://libvirt.org/migration.html#offline

Change-Id: I9b053919bb499c308912c8c9bff4c1fc396c1193
Signed-off-by: Chris Friesen <chris.friesen@windriver.com>
Co-authored-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-08-24 20:08:26 +01:00
parent b2fbaa8767
commit eb819c8c63
12 changed files with 713 additions and 195 deletions

View File

@ -81,7 +81,6 @@ class MigrateServerController(wsgi.Controller):
except ( except (
exception.InstanceIsLocked, exception.InstanceIsLocked,
exception.InstanceNotReady, exception.InstanceNotReady,
exception.OperationNotSupportedForVTPM,
exception.ServiceUnavailable, exception.ServiceUnavailable,
) as e: ) as e:
raise exc.HTTPConflict(explanation=e.format_message()) raise exc.HTTPConflict(explanation=e.format_message())

View File

@ -968,7 +968,6 @@ class ServersController(wsgi.Controller):
exception.InstanceIsLocked, exception.InstanceIsLocked,
exception.InstanceNotReady, exception.InstanceNotReady,
exception.MixedInstanceNotSupportByComputeService, exception.MixedInstanceNotSupportByComputeService,
exception.OperationNotSupportedForVTPM,
exception.ServiceUnavailable, exception.ServiceUnavailable,
) as e: ) as e:
raise exc.HTTPConflict(explanation=e.format_message()) raise exc.HTTPConflict(explanation=e.format_message())

View File

@ -4015,31 +4015,6 @@ class API(base.Base):
if same_instance_type and flavor_id: if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor() raise exception.CannotResizeToSameFlavor()
# NOTE(stephenfin): We use this instead of the 'reject_vtpm_instances'
# decorator since the operation can differ depending on args, and for
# resize we have two flavors to worry about
if same_instance_type:
if hardware.get_vtpm_constraint(
current_instance_type, instance.image_meta,
):
raise exception.OperationNotSupportedForVTPM(
instance_uuid=instance.uuid,
operation=instance_actions.MIGRATE)
else:
if hardware.get_vtpm_constraint(
current_instance_type, instance.image_meta,
):
raise exception.OperationNotSupportedForVTPM(
instance_uuid=instance.uuid,
operation=instance_actions.RESIZE)
if hardware.get_vtpm_constraint(
new_instance_type, instance.image_meta,
):
raise exception.OperationNotSupportedForVTPM(
instance_uuid=instance.uuid,
operation=instance_actions.RESIZE)
# ensure there is sufficient headroom for upsizes # ensure there is sufficient headroom for upsizes
if flavor_id: if flavor_id:
self._check_quota_for_upsize(context, instance, self._check_quota_for_upsize(context, instance,

View File

@ -1429,6 +1429,40 @@ libvirt_vtpm_opts = [
default=False, default=False,
help=""" help="""
Enable emulated TPM (Trusted Platform Module) in guests. Enable emulated TPM (Trusted Platform Module) in guests.
"""),
cfg.StrOpt('swtpm_user',
default='tss',
help="""
User that swtpm binary runs as.
When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM
device. The user this binary runs as depends on libvirt configuration, with
``tss`` being the default.
In order to support cold migration and resize, nova needs to know what user
the swtpm binary is running as in order to ensure that files get the proper
ownership after being moved between nodes.
Related options:
* ``swtpm_group`` must also be set.
"""),
cfg.StrOpt('swtpm_group',
default='tss',
help="""
Group that swtpm binary runs as.
When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM
device. The user this binary runs as depends on libvirt configuration, with
``tss`` being the default.
In order to support cold migration and resize, nova needs to know what group
the swtpm binary is running as in order to ensure that files get the proper
ownership after being moved between nodes.
Related options:
* ``swtpm_user`` must also be set.
"""), """),
] ]

View File

@ -14,11 +14,9 @@
# under the License. # under the License.
import mock import mock
import shutil
from castellan.common.objects import passphrase from castellan.common.objects import passphrase
from castellan.key_manager import key_manager from castellan.key_manager import key_manager
import fixtures
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import uuidutils from oslo_utils import uuidutils
from oslo_utils import versionutils from oslo_utils import versionutils
@ -129,14 +127,12 @@ class VTPMServersTest(base.ServersTestBase):
super().setUp() super().setUp()
original_which = shutil.which # mock the '_check_vtpm_support' function which validates things like
# the presence of users on the host, none of which makes sense here
def which(cmd, *args, **kwargs): _p = mock.patch(
if cmd == 'swtpm': 'nova.virt.libvirt.driver.LibvirtDriver._check_vtpm_support')
return True self.mock_conn = _p.start()
return original_which(cmd, *args, **kwargs) self.addCleanup(_p.stop)
self.useFixture(fixtures.MonkeyPatch('shutil.which', which))
self.key_mgr = crypto._get_key_manager() self.key_mgr = crypto._get_key_manager()
@ -287,9 +283,26 @@ class VTPMServersTest(base.ServersTestBase):
'.migrate_disk_and_power_off', return_value='{}', '.migrate_disk_and_power_off', return_value='{}',
): ):
# resize the server to a new flavor *with* vTPM # resize the server to a new flavor *with* vTPM
self.assertRaises( server = self._resize_server(server, flavor_id=flavor_id)
client.OpenStackApiException,
self._resize_server, server, flavor_id=flavor_id) # ensure our instance's system_metadata field and key manager inventory
# is updated to reflect the new vTPM requirement
self.assertInstanceHasSecret(server)
# revert the instance rather than confirming it, and ensure the secret
# is correctly cleaned up
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver'
'.migrate_disk_and_power_off', return_value='{}',
):
# revert back to the old flavor *without* vTPM
server = self._revert_resize(server)
# ensure we delete the new key since we no longer need it
self.assertInstanceHasNoSecret(server)
def test_resize_server__vtpm_to_no_vtpm(self): def test_resize_server__vtpm_to_no_vtpm(self):
self.computes = {} self.computes = {}
@ -313,10 +326,26 @@ class VTPMServersTest(base.ServersTestBase):
'.migrate_disk_and_power_off', return_value='{}', '.migrate_disk_and_power_off', return_value='{}',
): ):
# resize the server to a new flavor *without* vTPM # resize the server to a new flavor *without* vTPM
# TODO(stephenfin): Add support for this operation server = self._resize_server(server, flavor_id=flavor_id)
self.assertRaises(
client.OpenStackApiException, # ensure we still have the key for the vTPM device in storage in case
self._resize_server, server, flavor_id=flavor_id) # we revert
self.assertInstanceHasSecret(server)
# confirm the instance and ensure the secret is correctly cleaned up
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver'
'.migrate_disk_and_power_off', return_value='{}',
):
# revert back to the old flavor *with* vTPM
server = self._confirm_resize(server)
# ensure we have finally deleted the key for the vTPM device since
# there is no going back now
self.assertInstanceHasNoSecret(server)
def test_migrate_server(self): def test_migrate_server(self):
self.computes = {} self.computes = {}
@ -337,10 +366,10 @@ class VTPMServersTest(base.ServersTestBase):
'.migrate_disk_and_power_off', return_value='{}', '.migrate_disk_and_power_off', return_value='{}',
): ):
# cold migrate the server # cold migrate the server
# TODO(stephenfin): Add support for this operation self._migrate_server(server)
self.assertRaises(
client.OpenStackApiException, # ensure nothing has changed
self._migrate_server, server) self.assertInstanceHasSecret(server)
def test_live_migrate_server(self): def test_live_migrate_server(self):
self.computes = {} self.computes = {}

View File

@ -138,11 +138,6 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
allowed=0) allowed=0)
self._test_migrate_exception(exc_info, webob.exc.HTTPForbidden) self._test_migrate_exception(exc_info, webob.exc.HTTPForbidden)
def test_migrate_vtpm_not_supported(self):
exc_info = exception.OperationNotSupportedForVTPM(
instance_uuid=uuids.instance, operation='foo')
self._test_migrate_exception(exc_info, webob.exc.HTTPConflict)
def _test_migrate_live_succeeded(self, param): def _test_migrate_live_succeeded(self, param):
instance = self._stub_instance_get() instance = self._stub_instance_get()

View File

@ -856,17 +856,6 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller._action_resize, self.controller._action_resize,
self.req, FAKE_UUID, body=body) self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize')
def test_resize__vtpm_rejected(self, mock_resize):
"""Test that 'OperationNotSupportedForVTPM' exception is translated."""
mock_resize.side_effect = exception.OperationNotSupportedForVTPM(
instance_uuid=uuids.instance, operation='foo')
body = {'resize': {'flavorRef': 'http://localhost/3'}}
self.assertRaises(
webob.exc.HTTPConflict,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_server(self): def test_confirm_resize_server(self):
body = dict(confirmResize=None) body = dict(confirmResize=None)

View File

@ -66,7 +66,6 @@ from nova.tests.unit import matchers
from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_migration from nova.tests.unit.objects import test_migration
from nova import utils from nova import utils
from nova.virt import hardware
from nova.volume import cinder from nova.volume import cinder
@ -2137,35 +2136,6 @@ class _ComputeAPIUnitTestMixIn(object):
project_values={'cores': 1, 'ram': 2560}, project_values={'cores': 1, 'ram': 2560},
project_id=fake_inst.project_id, user_id=fake_inst.user_id) project_id=fake_inst.project_id, user_id=fake_inst.user_id)
@mock.patch(
'nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=fields_obj.HostStatus.UP))
@mock.patch(
'nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize__with_vtpm(self, mock_get_flavor):
"""Ensure resizes are rejected if either flavor requests vTPM."""
fake_inst = self._create_instance_obj()
current_flavor = fake_inst.flavor
new_flavor = self._create_flavor(
id=200, flavorid='new-flavor-id', name='new_flavor',
disabled=False, extra_specs={'hw:tpm_version': '2.0'})
mock_get_flavor.return_value = new_flavor
orig_get_vtpm_constraint = hardware.get_vtpm_constraint
with mock.patch.object(hardware, 'get_vtpm_constraint') as get_vtpm:
get_vtpm.side_effect = orig_get_vtpm_constraint
self.assertRaises(
exception.OperationNotSupportedForVTPM,
self.compute_api.resize,
self.context, fake_inst, flavor_id=new_flavor.flavorid)
get_vtpm.assert_has_calls([
mock.call(current_flavor, mock.ANY),
mock.call(new_flavor, mock.ANY),
])
@mock.patch('nova.compute.api.API.get_instance_host_status', @mock.patch('nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=fields_obj.HostStatus.UP)) new=mock.Mock(return_value=fields_obj.HostStatus.UP))
@mock.patch('nova.compute.utils.is_volume_backed_instance', @mock.patch('nova.compute.utils.is_volume_backed_instance',
@ -2214,28 +2184,6 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_migrate(host_name='target_host', self._test_migrate(host_name='target_host',
allow_cross_cell_resize=True) allow_cross_cell_resize=True)
@mock.patch(
'nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=fields_obj.HostStatus.UP))
@mock.patch(
'nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_migrate__with_vtpm(self):
"""Ensure migrations are rejected if instance uses vTPM."""
flavor = self._create_flavor(
extra_specs={'hw:tpm_version': '2.0'})
instance = self._create_instance_obj(flavor=flavor)
orig_get_vtpm_constraint = hardware.get_vtpm_constraint
with mock.patch.object(hardware, 'get_vtpm_constraint') as get_vtpm:
get_vtpm.side_effect = orig_get_vtpm_constraint
self.assertRaises(
exception.OperationNotSupportedForVTPM,
self.compute_api.resize,
self.context, instance)
get_vtpm.assert_called_once_with(flavor, mock.ANY)
@mock.patch('nova.compute.api.API.get_instance_host_status', @mock.patch('nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=fields_obj.HostStatus.UP)) new=mock.Mock(return_value=fields_obj.HostStatus.UP))
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host', @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host',

View File

@ -1527,7 +1527,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
exc = self.assertRaises(exception.InvalidConfiguration, exc = self.assertRaises(exception.InvalidConfiguration,
drvr.init_host, 'dummyhost') drvr.init_host, 'dummyhost')
self.assertIn("vTPM support requires '[libvirt] virt_type' of 'qemu' " self.assertIn("vTPM support requires '[libvirt] virt_type' of 'qemu' "
"or 'kvm'; found lxc.", six.text_type(exc)) "or 'kvm'; found 'lxc'.", six.text_type(exc))
@mock.patch.object(host.Host, 'has_min_version') @mock.patch.object(host.Host, 'has_min_version')
def test__check_vtpm_support_old_qemu(self, mock_version): def test__check_vtpm_support_old_qemu(self, mock_version):
@ -1570,9 +1570,68 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[mock.call('swtpm_setup'), mock.call('swtpm')], [mock.call('swtpm_setup'), mock.call('swtpm')],
) )
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('shutil.which')
@mock.patch('pwd.getpwnam')
def test__check_vtpm_support_invalid_user(
self, mock_getpwnam, mock_which, mock_version,
):
"""Test checking for vTPM support when the configured user is
invalid.
"""
self.flags(
swtpm_user='lionel', swtpm_enabled=True, virt_type='kvm',
group='libvirt')
mock_which.return_value = True
mock_getpwnam.side_effect = KeyError
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
exc = self.assertRaises(
exception.InvalidConfiguration,
drvr.init_host, "dummyhost")
self.assertIn(
"The user configured in '[libvirt] swtpm_user' does not exist "
"on this host; expected 'lionel'.",
str(exc),
)
mock_getpwnam.assert_called_with('lionel')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('shutil.which')
@mock.patch('pwd.getpwnam')
@mock.patch('grp.getgrnam')
def test__check_vtpm_support_invalid_group(
self, mock_getgrnam, mock_getpwnam, mock_which, mock_version,
):
"""Test checking for vTPM support when the configured group is
invalid.
"""
self.flags(
swtpm_group='admins', swtpm_enabled=True, virt_type='kvm',
group='libvirt')
mock_which.return_value = True
mock_getgrnam.side_effect = KeyError
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
exc = self.assertRaises(
exception.InvalidConfiguration,
drvr.init_host, "dummyhost")
self.assertIn(
"The group configured in '[libvirt] swtpm_group' does not exist "
"on this host; expected 'admins'.",
str(exc),
)
mock_getgrnam.assert_called_with('admins')
@mock.patch.object(host.Host, 'has_min_version') @mock.patch.object(host.Host, 'has_min_version')
@mock.patch('shutil.which') @mock.patch('shutil.which')
def test__check_vtpm_support(self, mock_which, mock_version): @mock.patch('pwd.getpwnam')
@mock.patch('grp.getgrnam')
def test__check_vtpm_support(
self, mock_getgrnam, mock_getpwnam, mock_which, mock_version,
):
"""Test checking for vTPM support when everything is configured """Test checking for vTPM support when everything is configured
correctly. correctly.
""" """
@ -21897,8 +21956,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
context.get_admin_context(), ins_ref, '10.0.0.2', context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None) flavor_obj, None)
@mock.patch(('nova.virt.libvirt.driver.LibvirtDriver.' @mock.patch('nova.virt.libvirt.utils.save_and_migrate_vtpm_dir')
'_get_instance_disk_info')) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr', @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr',
return_value='10.0.0.1') return_value='10.0.0.1')
@ -21910,9 +21970,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def _test_migrate_disk_and_power_off( def _test_migrate_disk_and_power_off(
self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename, self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename,
mock_is_shared, mock_get_host_ip, mock_destroy, mock_is_shared, mock_get_host_ip, mock_destroy,
mock_get_disk_info, block_device_info=None, mock_get_disk_info, mock_vtpm, block_device_info=None,
params_for_instance=None): params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection """Test for nova.virt.libvirt.driver.LivirtConnection
.migrate_disk_and_power_off. .migrate_disk_and_power_off.
""" """
@ -21925,13 +21985,19 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
out = self.drvr.migrate_disk_and_power_off( out = self.drvr.migrate_disk_and_power_off(
ctxt, instance, '10.0.0.2', flavor_obj, None, ctxt, instance, '10.0.0.2', flavor_obj, None,
block_device_info=block_device_info) block_device_info=block_device_info)
self.assertEqual(out, disk_info_text) self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.2', mock.ANY, mock.ANY)
# dest is same host case # dest is same host case
out = self.drvr.migrate_disk_and_power_off( out = self.drvr.migrate_disk_and_power_off(
ctxt, instance, '10.0.0.1', flavor_obj, None, ctxt, instance, '10.0.0.1', flavor_obj, None,
block_device_info=block_device_info) block_device_info=block_device_info)
self.assertEqual(out, disk_info_text) self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.1', mock.ANY, mock.ANY)
def test_migrate_disk_and_power_off(self): def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor = {'root_gb': 10, 'ephemeral_gb': 20}
@ -22049,6 +22115,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.migrate_disk_and_power_off, self.drvr.migrate_disk_and_power_off,
None, instance, '10.0.0.1', flavor_obj, None) None, instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.utils.save_and_migrate_vtpm_dir')
@mock.patch('oslo_concurrency.processutils.execute') @mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('os.rename') @mock.patch('os.rename')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@ -22062,7 +22129,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_disk_info, mock_get_disk_info,
mock_destroy, mock_destroy,
mock_rename, mock_rename,
mock_execute): mock_execute,
mock_vtpm):
self.convert_file_called = False self.convert_file_called = False
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
flavor_obj = objects.Flavor(**flavor) flavor_obj = objects.Flavor(**flavor)
@ -22084,8 +22152,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
context.get_admin_context(), instance, '10.0.0.2', context.get_admin_context(), instance, '10.0.0.2',
flavor_obj, None) flavor_obj, None)
dest = '10.0.0.2' if not shared_storage else None
self.assertTrue(mock_is_shared_storage.called) self.assertTrue(mock_is_shared_storage.called)
mock_destroy.assert_called_once_with(instance) mock_destroy.assert_called_once_with(instance)
mock_vtpm.assert_called_once_with(
instance.uuid, mock.ANY, mock.ANY, dest, mock.ANY, mock.ANY)
disk_info_text = jsonutils.dumps(disk_info) disk_info_text = jsonutils.dumps(disk_info)
self.assertEqual(out, disk_info_text) self.assertEqual(out, disk_info_text)
@ -22331,6 +22404,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock.call(_path_qcow, path)]) mock.call(_path_qcow, path)])
@mock.patch.object(libvirt_driver.LibvirtDriver, '_allocate_mdevs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_allocate_mdevs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_finish_migration_vtpm')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, '_inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, @mock.patch.object(libvirt_driver.LibvirtDriver,
@ -22351,6 +22425,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_raw_to_qcow2, mock_raw_to_qcow2,
mock_create_guest_with_network, mock_create_guest_with_network,
mock_get_info, mock_inject_data, mock_get_info, mock_inject_data,
mock_finish_vtpm,
mock_alloc_mdevs, mock_alloc_mdevs,
power_on=True, resize_instance=False): power_on=True, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
@ -22386,9 +22461,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
libvirt_guest.Guest('fake_dom') libvirt_guest.Guest('fake_dom')
self.drvr.finish_migration( self.drvr.finish_migration(
context.get_admin_context(), migration, instance, self.context, migration, instance, disk_info_text, [], image_meta,
disk_info_text, [], image_meta, resize_instance, mock.ANY, bdi, power_on)
resize_instance, mock.ANY, bdi, power_on)
# Assert that we converted the root, ephemeral, and swap disks # Assert that we converted the root, ephemeral, and swap disks
instance_path = libvirt_utils.get_instance_path(instance) instance_path = libvirt_utils.get_instance_path(instance)
@ -22420,6 +22494,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
if 'disk.config' in disks: if 'disk.config' in disks:
self.assertFalse(disks['disk.config'].import_file.called) self.assertFalse(disks['disk.config'].import_file.called)
mock_finish_vtpm.assert_called_once_with(self.context, instance)
# We shouldn't be injecting data during migration # We shouldn't be injecting data during migration
self.assertFalse(mock_inject_data.called) self.assertFalse(mock_inject_data.called)
@ -22445,6 +22521,128 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test_finish_migration_power_off(self): def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False) self._test_finish_migration(power_on=False)
def _test_finish_migration_vtpm(self, old_flavor, new_flavor,
instance=None):
if instance is None:
instance = self._create_instance()
instance.old_flavor = old_flavor or instance.flavor
instance.new_flavor = new_flavor or instance.flavor
self.drvr._finish_migration_vtpm(context.get_admin_context(), instance)
@mock.patch('shutil.rmtree')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.utils.restore_vtpm_dir')
@mock.patch('os.path.exists')
def test_finish_migration_vtpm(
self, mock_exists, mock_restore, mock_get_path, mock_rmtree,
):
mock_exists.return_value = True
mock_get_path.return_value = 'dummy'
flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '2.0'})
instance = self._create_instance()
self._test_finish_migration_vtpm(flavor, flavor,
instance=instance)
mock_rmtree.assert_not_called()
path = 'dummy/swtpm/' + instance.uuid
mock_restore.assert_called_once_with(path)
@mock.patch('shutil.rmtree')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.utils.restore_vtpm_dir')
@mock.patch('os.path.exists')
def test_finish_migration_vtpm_version_change(
self, mock_exists, mock_restore, mock_get_path, mock_rmtree,
):
mock_exists.return_value = True
mock_get_path.return_value = 'dummy'
old_flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '2.0'},
)
new_flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '1.2'},
)
instance = self._create_instance()
self._test_finish_migration_vtpm(
old_flavor, new_flavor, instance=instance)
path = 'dummy/swtpm/' + instance.uuid
mock_rmtree.assert_called_once_with(path)
mock_restore.assert_not_called()
@mock.patch('shutil.rmtree')
@mock.patch('nova.virt.libvirt.utils.restore_vtpm_dir')
@mock.patch('os.path.exists')
def test_finish_migration_vtpm_no_tpm(
self, mock_exists, mock_restore, mock_rmtree,
):
mock_exists.return_value = True
old_flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '2.0'},
)
new_flavor = objects.Flavor(extra_specs={})
self._test_finish_migration_vtpm(old_flavor, new_flavor)
mock_rmtree.assert_called_once()
mock_restore.assert_not_called()
@mock.patch('shutil.rmtree')
@mock.patch('nova.virt.libvirt.utils.restore_vtpm_dir')
@mock.patch('os.path.exists')
def test_finish_migration_vtpm_no_swtpm_dir(
self, mock_exists, mock_restore, mock_rmtree,
):
mock_exists.return_value = False
self._test_finish_migration_vtpm(None, None)
mock_rmtree.assert_not_called()
mock_restore.assert_not_called()
@mock.patch.object(libvirt_utils, 'restore_vtpm_dir')
@mock.patch('os.path.exists')
@mock.patch('os.path.join')
@mock.patch.object(libvirt_utils, 'get_instance_path')
def test_finish_revert_migration_vtpm__no_vtpm_back_to_vtpm(
self, mock_path, mock_join, mock_exists, mock_restore_vtpm,
):
"""From new flavor with no vTPM back to old flavor with vTPM."""
instance = self._create_instance()
instance.old_flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '2.0'})
instance.new_flavor = instance.flavor
self.drvr._finish_revert_migration_vtpm(self.context, instance)
mock_path.assert_called_once_with(instance)
mock_exists.assert_called_once_with(mock_join.return_value)
mock_restore_vtpm.assert_called_once_with(mock_join.return_value)
@mock.patch('nova.crypto.delete_vtpm_secret')
def test_finish_revert_migration_vtpm__vtpm_back_to_no_vtpm(
self, mock_delete_vtpm,
):
"""From new flavor with vTPM back to old flavor with no vTPM."""
instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = objects.Flavor(
extra_specs={'hw:tpm_model': 'tpm-tis', 'hw:tpm_version': '2.0'})
self.drvr._finish_revert_migration_vtpm(self.context, instance)
mock_delete_vtpm.assert_called_once_with(self.context, instance)
@mock.patch('nova.crypto.delete_vtpm_secret')
@mock.patch.object(libvirt_utils, 'restore_vtpm_dir')
def test_finish_revert_migration_vtpm__no_vtpm(
self, mock_restore_vtpm, mock_delete_vtpm,
):
"""Neither flavor has vTPM requests."""
instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
self.drvr._finish_revert_migration_vtpm(self.context, instance)
mock_restore_vtpm.assert_not_called()
mock_delete_vtpm.assert_not_called()
def _test_finish_revert_migration(self, power_on, migration): def _test_finish_revert_migration(self, power_on, migration):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration. .finish_revert_migration.
@ -22498,10 +22696,12 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
with utils.tempdir() as tmpdir: with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir) self.flags(instances_path=tmpdir)
ins_ref = self._create_instance() instance = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name'])) instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
os.mkdir(os.path.join(tmpdir, instance.name))
libvirt_xml_path = os.path.join(tmpdir, libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'], instance.name,
'libvirt.xml') 'libvirt.xml')
f = open(libvirt_xml_path, 'w') f = open(libvirt_xml_path, 'w')
f.close() f.close()
@ -22522,11 +22722,12 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
('network-vif-plugged', uuids.normal_vif)] ('network-vif-plugged', uuids.normal_vif)]
with mock.patch.object( with mock.patch.object(
self.drvr, '_get_all_assigned_mediated_devices', self.drvr, '_get_all_assigned_mediated_devices',
return_value={}) as mock_get_a_mdevs: return_value={}
) as mock_get_a_mdevs:
self.drvr.finish_revert_migration( self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref, network_info, self.context, instance, network_info, migration,
migration, None, power_on) power_on=power_on)
self.assertTrue(self.fake_create_guest_called) self.assertTrue(self.fake_create_guest_called)
mock_get_a_mdevs.assert_called_once_with(mock.ANY) mock_get_a_mdevs.assert_called_once_with(mock.ANY)
@ -22558,34 +22759,41 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
context = 'fake_context' context = 'fake_context'
ins_ref = self._create_instance() instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
migration = objects.Migration(source_compute='fake-host1', migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2') dest_compute='fake-host2')
with test.nested( with test.nested(
mock.patch.object(os.path, 'exists', return_value=backup_made), mock.patch.object(os.path, 'exists', return_value=backup_made),
mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(os, 'rename'), mock.patch.object(os, 'rename'),
mock.patch.object(drvr, '_create_guest_with_network'), mock.patch.object(drvr, '_create_guest_with_network'),
mock.patch.object(drvr, '_get_guest_xml'), mock.patch.object(drvr, '_get_guest_xml'),
mock.patch.object(shutil, 'rmtree'), mock.patch.object(shutil, 'rmtree'),
mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'), mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'),
mock.patch.object(drvr, '_get_all_assigned_mediated_devices', mock.patch.object(drvr, '_get_all_assigned_mediated_devices',
return_value={}), return_value={}),
) as (mock_stat, mock_path, mock_rename, mock_cdn, mock_ggx, mock.patch.object(drvr, '_finish_revert_migration_vtpm'),
mock_rmtree, mock_looping_call, mock_get_a_mdevs): ) as (
mock_stat, mock_path, mock_rename, mock_cdn, mock_ggx,
mock_rmtree, mock_looping_call, mock_get_a_mdevs, mock_vtpm,
):
mock_path.return_value = '/fake/foo' mock_path.return_value = '/fake/foo'
if del_inst_failed: if del_inst_failed:
mock_rmtree.side_effect = OSError(errno.ENOENT, mock_rmtree.side_effect = OSError(errno.ENOENT,
'test exception') 'test exception')
drvr.finish_revert_migration(context, ins_ref,
network_model.NetworkInfo(), drvr.finish_revert_migration(
migration) context, instance, network_model.NetworkInfo(), migration)
mock_vtpm.assert_called_once_with(context, instance)
if backup_made: if backup_made:
mock_rename.assert_called_once_with('/fake/foo_resize', mock_rename.assert_called_once_with('/fake/foo_resize',
'/fake/foo') '/fake/foo')
else: else:
self.assertFalse(mock_rename.called) mock_rename.assert_not_called()
def test_finish_revert_migration_after_crash(self): def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True) self._test_finish_revert_migration_after_crash(backup_made=True)
@ -22609,6 +22817,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
image_meta = {"disk_format": "raw", image_meta = {"disk_format": "raw",
"properties": {"hw_disk_bus": "ide"}} "properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance() instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
migration = objects.Migration(source_compute='fake-host1', migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2') dest_compute='fake-host2')
@ -22625,15 +22835,17 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
return_value={}), return_value={}),
) as (mock_img_bkend, mock_cdan, mock_gifsm, mock_ggxml, ) as (mock_img_bkend, mock_cdan, mock_gifsm, mock_ggxml,
mock_get_a_mdevs): mock_get_a_mdevs):
drvr.finish_revert_migration('', instance, drvr.finish_revert_migration(
network_model.NetworkInfo(), self.context, instance, network_model.NetworkInfo(), migration,
migration, power_on=False) power_on=False)
def test_finish_revert_migration_snap_backend(self): def test_finish_revert_migration_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
ins_ref = self._create_instance() instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
migration = objects.Migration(source_compute='fake-host1', migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2') dest_compute='fake-host2')
@ -22644,9 +22856,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock.patch.object(drvr, '_get_all_assigned_mediated_devices'), mock.patch.object(drvr, '_get_all_assigned_mediated_devices'),
) as (mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs): ) as (mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs):
mock_image.return_value = {'disk_format': 'raw'} mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, drvr.finish_revert_migration(
network_model.NetworkInfo(), self.context, instance, network_model.NetworkInfo(), migration,
migration, power_on=False) power_on=False)
drvr.image_backend.rollback_to_snap.assert_called_once_with( drvr.image_backend.rollback_to_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME) libvirt_utils.RESIZE_SNAPSHOT_NAME)
@ -22657,7 +22869,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
ins_ref = self._create_instance() instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
migration = objects.Migration(source_compute='fake-host1', migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2') dest_compute='fake-host2')
@ -22670,9 +22884,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.return_value = {'disk_format': 'raw'} mock_image.return_value = {'disk_format': 'raw'}
drvr.image_backend.rollback_to_snap.side_effect = ( drvr.image_backend.rollback_to_snap.side_effect = (
exception.SnapshotNotFound(snapshot_id='testing')) exception.SnapshotNotFound(snapshot_id='testing'))
self.assertRaises(exception.SnapshotNotFound, self.assertRaises(
drvr.finish_revert_migration, exception.SnapshotNotFound,
'', ins_ref, None, migration, power_on=False) drvr.finish_revert_migration,
self.context, instance, None, migration, power_on=False)
drvr.image_backend.remove_snap.assert_not_called() drvr.image_backend.remove_snap.assert_not_called()
def test_finish_revert_migration_snap_backend_image_does_not_exist(self): def test_finish_revert_migration_snap_backend_image_does_not_exist(self):
@ -22680,7 +22895,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
drvr.image_backend.exists.return_value = False drvr.image_backend.exists.return_value = False
ins_ref = self._create_instance() instance = self._create_instance()
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
migration = objects.Migration(source_compute='fake-host1', migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2') dest_compute='fake-host2')
@ -22692,9 +22909,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock.patch.object(drvr, '_get_all_assigned_mediated_devices'), mock.patch.object(drvr, '_get_all_assigned_mediated_devices'),
) as (mock_rbd, mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs): ) as (mock_rbd, mock_image, mock_cdn, mock_ggx, mock_get_a_mdevs):
mock_image.return_value = {'disk_format': 'raw'} mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, drvr.finish_revert_migration(
network_model.NetworkInfo(), self.context, instance, network_model.NetworkInfo(), migration,
migration, power_on=False) power_on=False)
self.assertFalse(drvr.image_backend.rollback_to_snap.called) self.assertFalse(drvr.image_backend.rollback_to_snap.called)
self.assertFalse(drvr.image_backend.remove_snap.called) self.assertFalse(drvr.image_backend.remove_snap.called)
@ -22715,7 +22932,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
@mock.patch('time.sleep', new=mock.Mock()) @mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_same_host(self): def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy') CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host}) instance = self._create_instance({'host': CONF.host})
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
@ -22730,15 +22949,17 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst' mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize( drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self)) self.context, instance, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref) mock_get_path.assert_called_once_with(instance)
self.assertEqual(5, mock_rmtree.call_count) self.assertEqual(5, mock_rmtree.call_count)
@mock.patch('time.sleep', new=mock.Mock()) @mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_not_same_host(self): def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy') CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host}) instance = self._create_instance({'host': host})
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
fake_net = _fake_network_info(self) fake_net = _fake_network_info(self)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -22758,11 +22979,12 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_exists.return_value = True mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst' mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(self.context, ins_ref, fake_net) drvr._cleanup_resize(self.context, instance, fake_net)
mock_get_path.assert_called_once_with(ins_ref)
mock_get_path.assert_called_once_with(instance)
self.assertEqual(5, mock_rmtree.call_count) self.assertEqual(5, mock_rmtree.call_count)
mock_undef.assert_called_once_with(ins_ref) mock_undef.assert_called_once_with(instance)
mock_unplug.assert_called_once_with(ins_ref, fake_net) mock_unplug.assert_called_once_with(instance, fake_net)
@mock.patch('time.sleep', new=mock.Mock()) @mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_not_same_host_volume_backed(self): def test_cleanup_resize_not_same_host_volume_backed(self):
@ -22772,7 +22994,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
""" """
CONF.set_override('policy_dirs', [], group='oslo_policy') CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host}) instance = self._create_instance({'host': host})
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
fake_net = _fake_network_info(self) fake_net = _fake_network_info(self)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -22793,17 +23017,21 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_exists.return_value = True mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst' mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(self.context, ins_ref, fake_net) drvr._cleanup_resize(self.context, instance, fake_net)
mock_get_path.assert_called_once_with(ins_ref)
mock_get_path.assert_called_once_with(instance)
self.assertEqual(5, mock_rmtree.call_count) self.assertEqual(5, mock_rmtree.call_count)
mock_undef.assert_called_once_with(ins_ref) mock_undef.assert_called_once_with(instance)
mock_unplug.assert_called_once_with(ins_ref, fake_net) mock_unplug.assert_called_once_with(instance, fake_net)
@mock.patch('time.sleep', new=mock.Mock()) @mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_snap_backend(self): def test_cleanup_resize_snap_backend(self):
CONF.set_override('policy_dirs', [], group='oslo_policy') CONF.set_override('policy_dirs', [], group='oslo_policy')
self.flags(images_type='rbd', group='libvirt') self.flags(images_type='rbd', group='libvirt')
ins_ref = self._create_instance({'host': CONF.host}) instance = self._create_instance({'host': CONF.host})
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
@ -22818,16 +23046,19 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst' mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize( drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self)) self.context, instance, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref) mock_get_path.assert_called_once_with(instance)
mock_remove.assert_called_once_with( mock_remove.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME) libvirt_utils.RESIZE_SNAPSHOT_NAME)
self.assertEqual(5, mock_rmtree.call_count) self.assertEqual(5, mock_rmtree.call_count)
@mock.patch('time.sleep', new=mock.Mock()) @mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_snap_backend_image_does_not_exist(self): def test_cleanup_resize_snap_backend_image_does_not_exist(self):
CONF.set_override('policy_dirs', [], group='oslo_policy') CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host}) instance = self._create_instance({'host': CONF.host})
instance.old_flavor = instance.flavor
instance.new_flavor = instance.flavor
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock() drvr.image_backend = mock.Mock()
drvr.image_backend.by_name.return_value = drvr.image_backend drvr.image_backend.by_name.return_value = drvr.image_backend
@ -22846,8 +23077,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst' mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize( drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self)) self.context, instance, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref) mock_get_path.assert_called_once_with(instance)
self.assertFalse(mock_remove.called) self.assertFalse(mock_remove.called)
self.assertEqual(5, mock_rmtree.call_count) self.assertEqual(5, mock_rmtree.call_count)
mock_rmtree.assert_has_calls([mock.call('/fake/inst_resize', mock_rmtree.assert_has_calls([mock.call('/fake/inst_resize',

View File

@ -14,7 +14,9 @@
# under the License. # under the License.
import functools import functools
import grp
import os import os
import pwd
import tempfile import tempfile
import ddt import ddt
@ -741,3 +743,96 @@ sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
# we shouldn't see the hyperthreading trait since that's a valid trait # we shouldn't see the hyperthreading trait since that's a valid trait
# but not a CPU flag # but not a CPU flag
self.assertEqual(set(['3dnow', 'sse2']), traits) self.assertEqual(set(['3dnow', 'sse2']), traits)
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.privsep.path.chown')
@mock.patch('nova.privsep.path.move_tree')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('os.path.exists', return_value=True)
def test_save_migrate_vtpm(
self, mock_exists, mock_ensure, mock_move, mock_chown, mock_copy,
):
def _on_execute():
pass
def _on_completion():
pass
libvirt_utils.save_and_migrate_vtpm_dir(
uuids.instance, 'base_resize', 'base', 'host', _on_execute,
_on_completion,
)
vtpm_dir = f'/var/lib/libvirt/swtpm/{uuids.instance}'
swtpm_dir = 'base_resize/swtpm'
mock_exists.assert_called_once_with(vtpm_dir)
mock_ensure.assert_called_once_with(swtpm_dir)
mock_move.assert_called_once_with(vtpm_dir, swtpm_dir)
mock_chown.assert_called_once_with(
swtpm_dir, os.geteuid(), os.getegid(), recursive=True,
)
mock_copy.assert_called_once_with(
swtpm_dir, 'base', host='host', on_completion=_on_completion,
on_execute=_on_execute,
)
@mock.patch('nova.privsep.path.move_tree')
@mock.patch('nova.privsep.path.chown')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('os.path.exists', return_value=False)
def test_save_migrate_vtpm_not_enabled(
self, mock_exists, mock_copy_image, mock_chown, mock_move,
):
def _dummy():
pass
libvirt_utils.save_and_migrate_vtpm_dir(
uuids.instance, 'base_resize', 'base', 'host', _dummy, _dummy,
)
mock_exists.assert_called_once_with(
f'/var/lib/libvirt/swtpm/{uuids.instance}')
mock_copy_image.assert_not_called()
mock_chown.assert_not_called()
mock_move.assert_not_called()
@mock.patch('grp.getgrnam')
@mock.patch('pwd.getpwnam')
@mock.patch('nova.privsep.path.chmod')
@mock.patch('nova.privsep.path.makedirs')
@mock.patch('nova.privsep.path.move_tree')
@mock.patch('nova.privsep.path.chown')
@mock.patch('os.path.exists')
@mock.patch('os.path.isdir')
def _test_restore_vtpm(
self, exists, mock_isdir, mock_exists, mock_chown, mock_move,
mock_makedirs, mock_chmod, mock_getpwnam, mock_getgrnam,
):
mock_exists.return_value = exists
mock_isdir.return_value = True
mock_getpwnam.return_value = pwd.struct_passwd(
('swtpm', '*', 1234, 1234, None, '/home/test', '/bin/bash'))
mock_getgrnam.return_value = grp.struct_group(('swtpm', '*', 4321, []))
libvirt_utils.restore_vtpm_dir('dummy')
if not exists:
mock_makedirs.assert_called_once_with(libvirt_utils.VTPM_DIR)
mock_chmod.assert_called_once_with(libvirt_utils.VTPM_DIR, 0o711)
mock_getpwnam.assert_called_once_with(CONF.libvirt.swtpm_user)
mock_getgrnam.assert_called_once_with(CONF.libvirt.swtpm_group)
mock_chown.assert_called_with('dummy', 1234, 4321, recursive=True)
mock_move.assert_called_with('dummy', libvirt_utils.VTPM_DIR)
def test_restore_vtpm(self):
self._test_restore_vtpm(True)
def test_restore_vtpm_not_exist(self):
self._test_restore_vtpm(False)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=False)
def test_restore_vtpm_notdir(self, mock_isdir, mock_exists):
self.assertRaises(exception.Invalid,
libvirt_utils.restore_vtpm_dir, 'dummy')

View File

@ -33,6 +33,7 @@ import copy
import errno import errno
import functools import functools
import glob import glob
import grp
import itertools import itertools
import operator import operator
import os import os
@ -783,7 +784,7 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt.virt_type not in ('qemu', 'kvm'): if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
msg = _( msg = _(
"vTPM support requires '[libvirt] virt_type' of 'qemu' or " "vTPM support requires '[libvirt] virt_type' of 'qemu' or "
"'kvm'; found %s.") "'kvm'; found '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.virt_type) raise exception.InvalidConfiguration(msg % CONF.libvirt.virt_type)
if not self._host.has_min_version( if not self._host.has_min_version(
@ -808,6 +809,25 @@ class LibvirtDriver(driver.ComputeDriver):
"'swtpm_setup' binaries could not be found on PATH.") "'swtpm_setup' binaries could not be found on PATH.")
raise exception.InvalidConfiguration(msg) raise exception.InvalidConfiguration(msg)
# The user and group must be valid on this host for cold migration and
# resize to function.
try:
pwd.getpwnam(CONF.libvirt.swtpm_user)
except KeyError:
msg = _(
"The user configured in '[libvirt] swtpm_user' does not exist "
"on this host; expected '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.swtpm_user)
try:
grp.getgrnam(CONF.libvirt.swtpm_group)
except KeyError:
msg = _(
"The group configured in '[libvirt] swtpm_group' does not "
"exist on this host; expected '%s'.")
raise exception.InvalidConfiguration(
msg % CONF.libvirt.swtpm_group)
LOG.debug('Enabling emulated TPM support') LOG.debug('Enabling emulated TPM support')
@staticmethod @staticmethod
@ -1579,6 +1599,29 @@ class LibvirtDriver(driver.ComputeDriver):
enforce_multipath=True, enforce_multipath=True,
host=CONF.host) host=CONF.host)
def _cleanup_resize_vtpm(
self,
context: nova_context.RequestContext,
instance: 'objects.Instance',
) -> None:
"""Handle vTPM when confirming a migration or resize.
If the old flavor have vTPM and the new one doesn't, there are keys to
be deleted.
"""
old_vtpm_config = hardware.get_vtpm_constraint(
instance.old_flavor, instance.image_meta)
new_vtpm_config = hardware.get_vtpm_constraint(
instance.new_flavor, instance.image_meta)
if old_vtpm_config and not new_vtpm_config:
# the instance no longer cares for its vTPM so delete the related
# secret; the deletion of the instance directory and undefining of
# the domain will take care of the TPM files themselves
LOG.info('New flavor no longer requests vTPM; deleting secret.')
crypto.delete_vtpm_secret(context, instance)
# TODO(stephenfin): Fold this back into its only caller, cleanup_resize
def _cleanup_resize(self, context, instance, network_info): def _cleanup_resize(self, context, instance, network_info):
inst_base = libvirt_utils.get_instance_path(instance) inst_base = libvirt_utils.get_instance_path(instance)
target = inst_base + '_resize' target = inst_base + '_resize'
@ -1588,6 +1631,9 @@ class LibvirtDriver(driver.ComputeDriver):
if vpmems: if vpmems:
self._cleanup_vpmems(vpmems) self._cleanup_vpmems(vpmems)
# Remove any old vTPM data, if necessary
self._cleanup_resize_vtpm(context, instance)
# Deletion can fail over NFS, so retry the deletion as required. # Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory # Set maximum attempt as 5, most test can remove the directory
# for the second time. # for the second time.
@ -10354,6 +10400,12 @@ class LibvirtDriver(driver.ComputeDriver):
dst_disk_info_path, dst_disk_info_path,
host=dest, on_execute=on_execute, host=dest, on_execute=on_execute,
on_completion=on_completion) on_completion=on_completion)
# Handle migration of vTPM data if needed
libvirt_utils.save_and_migrate_vtpm_dir(
instance.uuid, inst_base_resize, inst_base, dest,
on_execute, on_completion)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base, self._cleanup_remote_migration(dest, inst_base,
@ -10376,9 +10428,62 @@ class LibvirtDriver(driver.ComputeDriver):
images.convert_image(path, path_qcow, 'raw', 'qcow2') images.convert_image(path, path_qcow, 'raw', 'qcow2')
os.rename(path_qcow, path) os.rename(path_qcow, path)
def finish_migration(self, context, migration, instance, disk_info, def _finish_migration_vtpm(
network_info, image_meta, resize_instance, self,
allocations, block_device_info=None, power_on=True): context: nova_context.RequestContext,
instance: 'objects.Instance',
) -> None:
"""Handle vTPM when migrating or resizing an instance.
Handle the case where we're resizing between different versions of TPM,
or enabling/disabling TPM.
"""
old_vtpm_config = hardware.get_vtpm_constraint(
instance.old_flavor, instance.image_meta)
new_vtpm_config = hardware.get_vtpm_constraint(
instance.new_flavor, instance.image_meta)
if old_vtpm_config:
# we had a vTPM in the old flavor; figure out if we need to do
# anything with it
inst_base = libvirt_utils.get_instance_path(instance)
swtpm_dir = os.path.join(inst_base, 'swtpm', instance.uuid)
copy_swtpm_dir = True
if old_vtpm_config != new_vtpm_config:
# we had vTPM in the old flavor but the new flavor either
# doesn't or has different config; delete old TPM data and let
# libvirt create new data
if os.path.exists(swtpm_dir):
LOG.info(
'Old flavor and new flavor have different vTPM '
'configuration; removing existing vTPM data.')
copy_swtpm_dir = False
shutil.rmtree(swtpm_dir)
# apparently shutil.rmtree() isn't reliable on NFS so don't rely
# only on path existance here.
if copy_swtpm_dir and os.path.exists(swtpm_dir):
libvirt_utils.restore_vtpm_dir(swtpm_dir)
elif new_vtpm_config:
# we've requested vTPM in the new flavor and didn't have one
# previously so we need to create a new secret
crypto.ensure_vtpm_secret(context, instance)
def finish_migration(
self,
context: nova_context.RequestContext,
migration: 'objects.Migration',
instance: 'objects.Instance',
disk_info: str,
network_info: network_model.NetworkInfo,
image_meta: 'objects.ImageMeta',
resize_instance: bool,
allocations: ty.Dict[str, ty.Any],
block_device_info: ty.Optional[ty.Dict[str, ty.Any]] = None,
power_on: bool = True,
) -> None:
"""Complete the migration process on the destination host."""
LOG.debug("Starting finish_migration", instance=instance) LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@ -10403,8 +10508,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Convert raw disks to qcow2 if migrating to host which uses # Convert raw disks to qcow2 if migrating to host which uses
# qcow2 from host which uses raw. # qcow2 from host which uses raw.
disk_info = jsonutils.loads(disk_info) for info in jsonutils.loads(disk_info):
for info in disk_info:
path = info['path'] path = info['path']
disk_name = os.path.basename(path) disk_name = os.path.basename(path)
@ -10450,6 +10554,9 @@ class LibvirtDriver(driver.ComputeDriver):
# Does the guest need to be assigned some vGPU mediated devices ? # Does the guest need to be assigned some vGPU mediated devices ?
mdevs = self._allocate_mdevs(allocations) mdevs = self._allocate_mdevs(allocations)
# Handle the case where the guest has emulated TPM
self._finish_migration_vtpm(context, instance)
xml = self._get_guest_xml(context, instance, network_info, xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta, block_disk_info, image_meta,
block_device_info=block_device_info, block_device_info=block_device_info,
@ -10484,11 +10591,45 @@ class LibvirtDriver(driver.ComputeDriver):
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise raise
def finish_revert_migration(self, context, instance, network_info, def _finish_revert_migration_vtpm(
migration, block_device_info=None, self,
power_on=True): context: nova_context.RequestContext,
LOG.debug("Starting finish_revert_migration", instance: 'objects.Instance',
instance=instance) ) -> None:
"""Handle vTPM differences when reverting a migration or resize.
We should either restore any emulated vTPM persistent storage files or
create new ones.
"""
old_vtpm_config = hardware.get_vtpm_constraint(
instance.old_flavor, instance.image_meta)
new_vtpm_config = hardware.get_vtpm_constraint(
instance.new_flavor, instance.image_meta)
if old_vtpm_config:
# the instance had a vTPM before resize and should have one again;
# move the previously-saved vTPM data back to its proper location
inst_base = libvirt_utils.get_instance_path(instance)
swtpm_dir = os.path.join(inst_base, 'swtpm', instance.uuid)
if os.path.exists(swtpm_dir):
libvirt_utils.restore_vtpm_dir(swtpm_dir)
elif new_vtpm_config:
# the instance gained a vTPM and must now lose it; delete the vTPM
# secret, knowing that libvirt will take care of everything else on
# the destination side
crypto.delete_vtpm_secret(context, instance)
def finish_revert_migration(
self,
context: nova.context.RequestContext,
instance: 'objects.Instance',
network_info: network_model.NetworkInfo,
migration: 'objects.Migration',
block_device_info: ty.Optional[ty.Dict[str, ty.Any]] = None,
power_on: bool = True,
) -> None:
"""Finish the second half of reverting a resize on the source host."""
LOG.debug('Starting finish_revert_migration', instance=instance)
inst_base = libvirt_utils.get_instance_path(instance) inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize" inst_base_resize = inst_base + "_resize"
@ -10507,6 +10648,8 @@ class LibvirtDriver(driver.ComputeDriver):
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME) root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME) root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
self._finish_revert_migration_vtpm(context, instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, instance,
instance.image_meta, instance.image_meta,

View File

@ -19,7 +19,9 @@
# under the License. # under the License.
import errno import errno
import grp
import os import os
import pwd
import re import re
import typing as ty import typing as ty
import uuid import uuid
@ -31,12 +33,14 @@ from oslo_utils import fileutils
import nova.conf import nova.conf
from nova import context as nova_context from nova import context as nova_context
from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
import nova.privsep.fs import nova.privsep.fs
import nova.privsep.idmapshift import nova.privsep.idmapshift
import nova.privsep.libvirt import nova.privsep.libvirt
import nova.privsep.path
from nova.scheduler import utils as scheduler_utils from nova.scheduler import utils as scheduler_utils
from nova import utils from nova import utils
from nova.virt import images from nova.virt import images
@ -98,6 +102,9 @@ CPU_TRAITS_MAPPING = {
# Reverse CPU_TRAITS_MAPPING # Reverse CPU_TRAITS_MAPPING
TRAITS_CPU_MAPPING = {v: k for k, v in CPU_TRAITS_MAPPING.items()} TRAITS_CPU_MAPPING = {v: k for k, v in CPU_TRAITS_MAPPING.items()}
# global directory for emulated TPM
VTPM_DIR = '/var/lib/libvirt/swtpm/'
def create_image( def create_image(
disk_format: str, path: str, size: ty.Union[str, int], disk_format: str, path: str, size: ty.Union[str, int],
@ -658,3 +665,77 @@ def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
if trait in TRAITS_CPU_MAPPING] if trait in TRAITS_CPU_MAPPING]
return set(flags) return set(flags)
def save_and_migrate_vtpm_dir(
instance_uuid: str,
inst_base_resize: str,
inst_base: str,
dest: str,
on_execute: ty.Callable,
on_completion: ty.Callable,
) -> None:
"""Save vTPM data to instance directory and migrate to the destination.
If the instance has vTPM enabled, then we need to save its vTPM data
locally (to allow for revert) and then migrate the data to the dest node.
Do so by copying vTPM data from the swtpm data directory to a resize
working directory, $inst_base_resize, and then copying this to the remote
directory at $dest:$inst_base.
:param instance_uuid: The instance's UUID.
:param inst_base_resize: The instance's base resize working directory.
:param inst_base: The instances's base directory on the destination host.
:param dest: Destination host.
:param on_execute: Callback method to store PID of process in cache.
:param on_completion: Callback method to remove PID of process from cache.
:returns: None.
"""
vtpm_dir = os.path.join(VTPM_DIR, instance_uuid)
if not os.path.exists(vtpm_dir):
return
# We likely need to create the instance swtpm directory on the dest node
# with ownership that is not the user running nova. We only have
# permissions to copy files to <instance_path> on the dest node so we need
# to get creative.
# First, make a new directory in the local instance directory
swtpm_dir = os.path.join(inst_base_resize, 'swtpm')
fileutils.ensure_tree(swtpm_dir)
# Now move the per-instance swtpm persistent files into the
# local instance directory.
nova.privsep.path.move_tree(vtpm_dir, swtpm_dir)
# Now adjust ownership.
nova.privsep.path.chown(
swtpm_dir, os.geteuid(), os.getegid(), recursive=True)
# Copy the swtpm subtree to the remote instance directory
copy_image(
swtpm_dir, inst_base, host=dest, on_execute=on_execute,
on_completion=on_completion)
def restore_vtpm_dir(swtpm_dir: str) -> None:
"""Given a saved TPM directory, restore it where libvirt can find it.
:path swtpm_dir: Path to swtpm directory.
:returns: None
"""
# Ensure global swtpm dir exists with suitable
# permissions/ownership
if not os.path.exists(VTPM_DIR):
nova.privsep.path.makedirs(VTPM_DIR)
nova.privsep.path.chmod(VTPM_DIR, 0o711)
elif not os.path.isdir(VTPM_DIR):
msg = _(
'Guest wants emulated TPM but host path %s is not a directory.')
raise exception.Invalid(msg % VTPM_DIR)
# These can raise KeyError but they're validated by the driver on startup.
uid = pwd.getpwnam(CONF.libvirt.swtpm_user).pw_uid
gid = grp.getgrnam(CONF.libvirt.swtpm_group).gr_gid
# Set ownership of instance-specific files
nova.privsep.path.chown(swtpm_dir, uid, gid, recursive=True)
# Move instance-specific directory to global dir
nova.privsep.path.move_tree(swtpm_dir, VTPM_DIR)