libvirt: make live migration possible with Virtuozzo
Live migration for Libvirt vz driver works the same way as for qemu. However some migration capabilities are supported differently: * Virtuozzo containers doesn't use backing file, so we shouldn't ensure it exists in pre live migration. * migrate_disks parameter doesn't make sense for Virtuozzo, as backend determines whether specific block device should be migrated or not. * Passing new domain xml to migration method is not supported. Implements: blueprint live-migration-vz-driver Change-Id: I89ecdef13ad47800abc8a5158f8834e46750b9ea Signed-off-by: Pavel Gluschak <pglushchak@virtuozzo.com>
This commit is contained in:
parent
38a43e56dd
commit
d8d813800c
@ -313,8 +313,8 @@ driver-impl-vmware=missing
|
|||||||
driver-notes-vmware=https://bugs.launchpad.net/nova/+bug/1192192
|
driver-notes-vmware=https://bugs.launchpad.net/nova/+bug/1192192
|
||||||
driver-impl-hyperv=complete
|
driver-impl-hyperv=complete
|
||||||
driver-impl-ironic=missing
|
driver-impl-ironic=missing
|
||||||
driver-impl-libvirt-vz-vm=missing
|
driver-impl-libvirt-vz-vm=complete
|
||||||
driver-impl-libvirt-vz-ct=missing
|
driver-impl-libvirt-vz-ct=complete
|
||||||
|
|
||||||
[operation.force-live-migration-to-complete]
|
[operation.force-live-migration-to-complete]
|
||||||
title=Force live migration to complete
|
title=Force live migration to complete
|
||||||
|
@ -7638,6 +7638,60 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||||||
self.context, instance_ref, 'dest',
|
self.context, instance_ref, 'dest',
|
||||||
False, migrate_data, guest, [])
|
False, migrate_data, guest, [])
|
||||||
|
|
||||||
|
def test_live_migration_parallels_no_new_xml(self):
|
||||||
|
self.flags(virt_type='parallels', group='libvirt')
|
||||||
|
self.flags(enabled=False, group='vnc')
|
||||||
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
||||||
|
instance_dict = dict(self.test_instance)
|
||||||
|
instance_dict.update({'host': 'fake',
|
||||||
|
'power_state': power_state.RUNNING,
|
||||||
|
'vm_state': vm_states.ACTIVE})
|
||||||
|
instance = objects.Instance(**instance_dict)
|
||||||
|
migrate_data = objects.LibvirtLiveMigrateData(
|
||||||
|
block_migration=False)
|
||||||
|
dom_mock = mock.MagicMock()
|
||||||
|
guest = libvirt_guest.Guest(dom_mock)
|
||||||
|
drvr._live_migration_operation(self.context, instance, 'dest',
|
||||||
|
False, migrate_data, guest, [])
|
||||||
|
# when new xml is not passed we fall back to migrateToURI
|
||||||
|
dom_mock.migrateToURI.assert_called_once_with(
|
||||||
|
drvr._live_migration_uri('dest'),
|
||||||
|
flags=0, bandwidth=0)
|
||||||
|
|
||||||
|
@mock.patch.object(utils, 'spawn')
|
||||||
|
@mock.patch.object(host.Host, 'get_guest')
|
||||||
|
@mock.patch.object(fakelibvirt.Connection, '_mark_running')
|
||||||
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
||||||
|
'_live_migration_monitor')
|
||||||
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
||||||
|
'_live_migration_copy_disk_paths')
|
||||||
|
def test_live_migration_parallels_no_migrate_disks(self,
|
||||||
|
mock_copy_disk_paths,
|
||||||
|
mock_monitor,
|
||||||
|
mock_running,
|
||||||
|
mock_guest,
|
||||||
|
mock_thread):
|
||||||
|
self.flags(virt_type='parallels', group='libvirt')
|
||||||
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
||||||
|
instance_dict = dict(self.test_instance)
|
||||||
|
instance_dict.update({'host': 'fake',
|
||||||
|
'power_state': power_state.RUNNING,
|
||||||
|
'vm_state': vm_states.ACTIVE})
|
||||||
|
instance = objects.Instance(**instance_dict)
|
||||||
|
migrate_data = objects.LibvirtLiveMigrateData(
|
||||||
|
block_migration=True)
|
||||||
|
dom = fakelibvirt.Domain(drvr._get_connection(), '<domain/>', True)
|
||||||
|
guest = libvirt_guest.Guest(dom)
|
||||||
|
mock_guest.return_value = guest
|
||||||
|
drvr._live_migration(self.context, instance, 'dest',
|
||||||
|
lambda: None, lambda: None, True,
|
||||||
|
migrate_data)
|
||||||
|
self.assertFalse(mock_copy_disk_paths.called)
|
||||||
|
mock_thread.assert_called_once_with(
|
||||||
|
drvr._live_migration_operation,
|
||||||
|
self.context, instance, 'dest', True,
|
||||||
|
migrate_data, guest, [])
|
||||||
|
|
||||||
def test_live_migration_update_volume_xml(self):
|
def test_live_migration_update_volume_xml(self):
|
||||||
self.compute = manager.ComputeManager()
|
self.compute = manager.ComputeManager()
|
||||||
instance_dict = dict(self.test_instance)
|
instance_dict = dict(self.test_instance)
|
||||||
@ -7798,9 +7852,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||||||
('xen', 'xenmigr://%s/system'),
|
('xen', 'xenmigr://%s/system'),
|
||||||
('kvm', 'qemu+tcp://%s/system'),
|
('kvm', 'qemu+tcp://%s/system'),
|
||||||
('qemu', 'qemu+tcp://%s/system'),
|
('qemu', 'qemu+tcp://%s/system'),
|
||||||
|
('parallels', 'parallels+tcp://%s/system'),
|
||||||
# anything else will return None
|
# anything else will return None
|
||||||
('lxc', None),
|
('lxc', None),
|
||||||
('parallels', None),
|
|
||||||
)
|
)
|
||||||
dest = 'destination'
|
dest = 'destination'
|
||||||
for hyperv, uri in hypervisor_uri_map:
|
for hyperv, uri in hypervisor_uri_map:
|
||||||
@ -9420,6 +9474,17 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||||||
# Assert that we did nothing
|
# Assert that we did nothing
|
||||||
self.assertEqual({}, fake_backend.created_disks)
|
self.assertEqual({}, fake_backend.created_disks)
|
||||||
|
|
||||||
|
@mock.patch.object(libvirt_driver.LibvirtDriver,
|
||||||
|
'_fetch_instance_kernel_ramdisk')
|
||||||
|
def test_create_images_and_backing_parallels(self, mock_fetch):
|
||||||
|
self.flags(virt_type='parallels', group='libvirt')
|
||||||
|
instance = objects.Instance(**self.test_instance)
|
||||||
|
instance.vm_mode = fields.VMMode.EXE
|
||||||
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
||||||
|
drvr._create_images_and_backing(self.context, instance,
|
||||||
|
'/fake/instance/dir', None)
|
||||||
|
self.assertFalse(mock_fetch.called)
|
||||||
|
|
||||||
def _generate_target_ret(self, target_connect_addr=None):
|
def _generate_target_ret(self, target_connect_addr=None):
|
||||||
target_ret = {
|
target_ret = {
|
||||||
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
|
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
|
||||||
|
@ -681,12 +681,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _live_migration_uri(dest):
|
def _live_migration_uri(dest):
|
||||||
# Only Xen and QEMU support live migration, see
|
|
||||||
# https://libvirt.org/migration.html#scenarios for reference
|
|
||||||
uris = {
|
uris = {
|
||||||
'kvm': 'qemu+tcp://%s/system',
|
'kvm': 'qemu+tcp://%s/system',
|
||||||
'qemu': 'qemu+tcp://%s/system',
|
'qemu': 'qemu+tcp://%s/system',
|
||||||
'xen': 'xenmigr://%s/system',
|
'xen': 'xenmigr://%s/system',
|
||||||
|
'parallels': 'parallels+tcp://%s/system',
|
||||||
}
|
}
|
||||||
virt_type = CONF.libvirt.virt_type
|
virt_type = CONF.libvirt.virt_type
|
||||||
uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
|
uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
|
||||||
@ -6036,11 +6035,13 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
migrate_uri = self._migrate_uri(dest)
|
migrate_uri = self._migrate_uri(dest)
|
||||||
|
|
||||||
params = None
|
params = None
|
||||||
new_xml_str = libvirt_migrate.get_updated_guest_xml(
|
new_xml_str = None
|
||||||
# TODO(sahid): It's not a really good idea to pass
|
if CONF.libvirt.virt_type != "parallels":
|
||||||
# the method _get_volume_config and we should to find
|
new_xml_str = libvirt_migrate.get_updated_guest_xml(
|
||||||
# a way to avoid this in future.
|
# TODO(sahid): It's not a really good idea to pass
|
||||||
guest, migrate_data, self._get_volume_config)
|
# the method _get_volume_config and we should to find
|
||||||
|
# a way to avoid this in future.
|
||||||
|
guest, migrate_data, self._get_volume_config)
|
||||||
if self._host.has_min_version(
|
if self._host.has_min_version(
|
||||||
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
|
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
|
||||||
params = {
|
params = {
|
||||||
@ -6483,7 +6484,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
|
|
||||||
disk_paths = []
|
disk_paths = []
|
||||||
device_names = []
|
device_names = []
|
||||||
if migrate_data.block_migration:
|
if (migrate_data.block_migration and
|
||||||
|
CONF.libvirt.virt_type != "parallels"):
|
||||||
disk_paths, device_names = self._live_migration_copy_disk_paths(
|
disk_paths, device_names = self._live_migration_copy_disk_paths(
|
||||||
context, instance, guest)
|
context, instance, guest)
|
||||||
|
|
||||||
@ -6792,6 +6794,12 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
not available.
|
not available.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Virtuozzo containers don't use backing file
|
||||||
|
if (CONF.libvirt.virt_type == "parallels" and
|
||||||
|
instance.vm_mode == fields.VMMode.EXE):
|
||||||
|
return
|
||||||
|
|
||||||
if not disk_info:
|
if not disk_info:
|
||||||
disk_info = []
|
disk_info = []
|
||||||
|
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Live migration is supported for both Virtuozzo containers
|
||||||
|
and virtual machines when using virt_type=parallels.
|
Loading…
x
Reference in New Issue
Block a user