libvirt: Remove usage of migrateToURI{2} APIs
The recently updated minimum required libvirt version (1.3.1; in commit
403320b
-- libvirt: Bump MIN_{LIBVIRT,QEMU}_VERSION for "Rocky") brings
in the newer libvirt migration API, migrateToURI3(). The newer API was
explicitly designed[*] to be backward compatible with the older variant.
So remove the usage of the older variants:
migrateToURI()
migrateToURI2()
And just stick to the newer API -- migrateToURI3().
Clean up the following:
- Add the 'migrate_disks' and 'destination_xml' paramters, and remove
the no longer needed 'domain_xml' from the Nova migrate() method.
- Remove or fix various unit tests to use migrateToURI3().
- Stub nova.virt.libvirt.guest.Guest.migrate() correctly in
nova/tests/unit/virt/test_virt_drivers.py.
[*] https://libvirt.org/git/?p=libvirt.git;a=commit;h=4bf62f4 --
Extensible migration APIs
Change-Id: Id9ee1feeadf612fa79c3d280cee3a614a74a00a7
Signed-off-by: Kashyap Chamarthy <kchamart@redhat.com>
This commit is contained in:
parent
bf497cc474
commit
4b3e877210
|
@ -78,7 +78,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
|
|||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector')
|
||||
@mock.patch('nova.virt.libvirt.guest.Guest.get_job_info')
|
||||
@mock.patch.object(fakelibvirt.Domain, 'migrateToURI2')
|
||||
@mock.patch.object(fakelibvirt.Domain, 'migrateToURI3')
|
||||
@mock.patch('nova.virt.libvirt.host.Host.get_connection')
|
||||
@mock.patch('nova.virt.disk.api.get_disk_size', return_value=1024)
|
||||
@mock.patch('os.path.getsize', return_value=1024)
|
||||
|
|
|
@ -754,20 +754,6 @@ class Domain(object):
|
|||
self._def['vcpu'],
|
||||
123456789]
|
||||
|
||||
def migrateToURI(self, desturi, flags, dname, bandwidth):
|
||||
raise make_libvirtError(
|
||||
libvirtError,
|
||||
"Migration always fails for fake libvirt!",
|
||||
error_code=VIR_ERR_INTERNAL_ERROR,
|
||||
error_domain=VIR_FROM_QEMU)
|
||||
|
||||
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
|
||||
raise make_libvirtError(
|
||||
libvirtError,
|
||||
"Migration always fails for fake libvirt!",
|
||||
error_code=VIR_ERR_INTERNAL_ERROR,
|
||||
error_domain=VIR_FROM_QEMU)
|
||||
|
||||
def migrateToURI3(self, dconnuri, params, flags):
|
||||
raise make_libvirtError(
|
||||
libvirtError,
|
||||
|
|
|
@ -9600,6 +9600,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_min_version):
|
||||
self.compute = manager.ComputeManager()
|
||||
instance_ref = self.test_instance
|
||||
target_connection = '127.0.0.2'
|
||||
|
||||
xml_tmpl = ("<domain type='kvm'>"
|
||||
"<devices>"
|
||||
|
@ -9628,7 +9629,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
|
||||
disk_paths = ['vda', 'vdb']
|
||||
params = {
|
||||
'migrate_disks': ['vda', 'vdb'],
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'migrate_disks': disk_paths,
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
|
||||
'destination_xml': target_xml,
|
||||
}
|
||||
|
@ -9639,7 +9641,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
graphics_listen_addr_vnc='10.0.0.1',
|
||||
graphics_listen_addr_spice='10.0.0.2',
|
||||
serial_listen_addr='127.0.0.1',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=False)
|
||||
dom = fakelibvirt.virDomain
|
||||
|
@ -9647,36 +9649,42 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
self.assertRaises(fakelibvirt.libvirtError,
|
||||
drvr._live_migration_operation,
|
||||
self.context, instance_ref, 'dest',
|
||||
self.context, instance_ref, target_connection,
|
||||
False, migrate_data, guest, disk_paths,
|
||||
bandwidth=bandwidth)
|
||||
mock_xml.assert_called_once_with(
|
||||
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
|
||||
def test_live_migration_parallels_no_new_xml(self):
|
||||
self.flags(virt_type='parallels', group='libvirt')
|
||||
self.flags(enabled=False, group='vnc')
|
||||
target_connection = None
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
||||
instance_dict = dict(self.test_instance)
|
||||
instance_dict.update({'host': 'fake',
|
||||
'power_state': power_state.RUNNING,
|
||||
'vm_state': vm_states.ACTIVE})
|
||||
instance = objects.Instance(**instance_dict)
|
||||
|
||||
params = {
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
}
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
target_connect_addr=target_connection,
|
||||
block_migration=False)
|
||||
dom_mock = mock.MagicMock()
|
||||
guest = libvirt_guest.Guest(dom_mock)
|
||||
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
drvr._live_migration_operation(self.context, instance, 'dest',
|
||||
False, migrate_data, guest, [],
|
||||
drvr._live_migration_operation(self.context, instance,
|
||||
target_connection, False,
|
||||
migrate_data, guest, None,
|
||||
bandwidth=_bandwidth)
|
||||
# when new xml is not passed we fall back to migrateToURI
|
||||
dom_mock.migrateToURI.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
flags=0, bandwidth=_bandwidth)
|
||||
dom_mock.migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
|
||||
@mock.patch.object(utils, 'spawn')
|
||||
@mock.patch.object(host.Host, 'get_guest')
|
||||
|
@ -9717,18 +9725,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
guest.migrate_configure_max_speed.assert_called_once_with(
|
||||
CONF.libvirt.live_migration_bandwidth)
|
||||
|
||||
def test_live_migration_update_volume_xml(self):
|
||||
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
||||
@mock.patch.object(nova.virt.libvirt.migration,
|
||||
'get_updated_guest_xml', return_value='')
|
||||
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
|
||||
def test_live_migration_update_volume_xml(self, mock_xml,
|
||||
mock_updated_guest_xml,
|
||||
mock_migrateToURI3):
|
||||
self.compute = manager.ComputeManager()
|
||||
instance_dict = dict(self.test_instance)
|
||||
instance_dict.update({'host': 'fake',
|
||||
'power_state': power_state.RUNNING,
|
||||
'vm_state': vm_states.ACTIVE})
|
||||
instance_ref = objects.Instance(**instance_dict)
|
||||
instance_ref = self.test_instance
|
||||
target_connection = '127.0.0.2'
|
||||
|
||||
target_xml = self.device_xml_tmpl.format(
|
||||
device_path='/dev/disk/by-path/'
|
||||
'ip-1.2.3.4:3260-iqn.'
|
||||
'cde.67890.opst-lun-Z')
|
||||
# start test
|
||||
|
||||
# Prepare mocks
|
||||
mock_xml.return_value = target_xml
|
||||
|
||||
disk_paths = ['vda', 'vdb']
|
||||
params = {
|
||||
'migrate_disks': disk_paths,
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
|
||||
'destination_xml': target_xml
|
||||
}
|
||||
|
||||
# Start test
|
||||
connection_info = {
|
||||
u'driver_volume_type': u'iscsi',
|
||||
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
|
||||
|
@ -9746,30 +9770,24 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
connection_info=connection_info)
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
serial_listen_addr='',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[bdm],
|
||||
block_migration=False)
|
||||
|
||||
dom = fakelibvirt.virDomain
|
||||
guest = libvirt_guest.Guest(dom)
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
test_mock = mock.MagicMock()
|
||||
guest = libvirt_guest.Guest(test_mock)
|
||||
|
||||
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
|
||||
mget_info,\
|
||||
mock.patch.object(drvr._host, '_get_domain') as mget_domain,\
|
||||
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
|
||||
mock.patch.object(
|
||||
libvirt_migrate, 'get_updated_guest_xml') as mupdate:
|
||||
|
||||
mget_info.side_effect = exception.InstanceNotFound(
|
||||
instance_id='foo')
|
||||
mget_domain.return_value = test_mock
|
||||
test_mock.XMLDesc.return_value = target_xml
|
||||
self.assertFalse(drvr._live_migration_operation(
|
||||
self.context, instance_ref, 'dest', False,
|
||||
migrate_data, guest, [],
|
||||
libvirt_driver.MIN_MIGRATION_SPEED_BW))
|
||||
mupdate.assert_called_once_with(
|
||||
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
mock_updated_guest_xml.return_value = target_xml
|
||||
drvr._live_migration_operation(
|
||||
self.context, instance_ref, target_connection,
|
||||
False, migrate_data, guest, disk_paths,
|
||||
_bandwidth)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
mock_updated_guest_xml.assert_called_once_with(
|
||||
guest, migrate_data, mock.ANY, get_vif_config=None)
|
||||
|
||||
def test_live_migration_update_vifs_xml(self):
|
||||
|
@ -10115,6 +10133,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_min_version):
|
||||
self.compute = manager.ComputeManager()
|
||||
instance_ref = self.test_instance
|
||||
target_connection = '127.0.0.2'
|
||||
|
||||
xml_tmpl = ("<domain type='kvm'>"
|
||||
"<devices>"
|
||||
|
@ -10137,6 +10156,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
|
||||
disk_paths = ['vda', 'vdb']
|
||||
params = {
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'migrate_disks': ['vda', 'vdb'],
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
|
||||
'destination_xml': target_xml,
|
||||
|
@ -10148,7 +10168,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
graphics_listen_addr_vnc='10.0.0.1',
|
||||
graphics_listen_addr_spice='10.0.0.2',
|
||||
serial_listen_addr='9.0.0.12',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=False,
|
||||
serial_listen_ports=[10200])
|
||||
|
@ -10157,13 +10177,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
self.assertRaises(fakelibvirt.libvirtError,
|
||||
drvr._live_migration_operation,
|
||||
self.context, instance_ref, 'dest',
|
||||
self.context, instance_ref, target_connection,
|
||||
False, migrate_data, guest, disk_paths,
|
||||
bandwidth=bandwidth)
|
||||
mock_xml.assert_called_once_with(
|
||||
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
|
||||
def test_live_migration_fails_without_serial_console_address(self):
|
||||
|
@ -10203,12 +10223,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
def test_live_migration_uses_migrateToURI3(
|
||||
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
|
||||
mock_min_version):
|
||||
|
||||
target_connection = '127.0.0.2'
|
||||
# Preparing mocks
|
||||
disk_paths = ['vda', 'vdb']
|
||||
params = {
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'migrate_disks': ['vda', 'vdb'],
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
|
||||
'destination_xml': '',
|
||||
}
|
||||
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
||||
|
||||
|
@ -10217,7 +10239,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
graphics_listen_addr_vnc='0.0.0.0',
|
||||
graphics_listen_addr_spice='0.0.0.0',
|
||||
serial_listen_addr='127.0.0.1',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=False)
|
||||
|
||||
|
@ -10228,11 +10250,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
instance = objects.Instance(**self.test_instance)
|
||||
self.assertRaises(fakelibvirt.libvirtError,
|
||||
drvr._live_migration_operation,
|
||||
self.context, instance, 'dest',
|
||||
self.context, instance, target_connection,
|
||||
False, migrate_data, guest, disk_paths,
|
||||
libvirt_driver.MIN_MIGRATION_SPEED_BW)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
|
||||
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
|
||||
|
@ -10242,11 +10264,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
def _test_live_migration_block_migration_flags(self,
|
||||
device_names, expected_flags,
|
||||
mock_old_xml, mock_min_version, mock_migrateToURI3):
|
||||
|
||||
target_connection = '127.0.0.2'
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
graphics_listen_addr_vnc='0.0.0.0',
|
||||
graphics_listen_addr_spice='0.0.0.0',
|
||||
serial_listen_addr='127.0.0.1',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=True)
|
||||
|
||||
|
@ -10257,17 +10281,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
|
||||
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
instance = objects.Instance(**self.test_instance)
|
||||
drvr._live_migration_operation(self.context, instance, 'dest',
|
||||
drvr._live_migration_operation(self.context, instance,
|
||||
target_connection,
|
||||
True, migrate_data, guest,
|
||||
device_names, _bandwidth)
|
||||
|
||||
params = {
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'migrate_disks': device_names,
|
||||
'bandwidth': _bandwidth,
|
||||
'destination_xml': '<xml/>',
|
||||
}
|
||||
if not params['migrate_disks']:
|
||||
del params['migrate_disks']
|
||||
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'), params=params,
|
||||
drvr._live_migration_uri(target_connection), params=params,
|
||||
flags=expected_flags)
|
||||
|
||||
def test_live_migration_block_migration_with_devices(self):
|
||||
|
@ -10300,38 +10329,42 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
self, mock_old_xml, mock_new_xml,
|
||||
mock_migrateToURI3, mock_min_version):
|
||||
self.flags(live_migration_tunnelled=True, group='libvirt')
|
||||
|
||||
target_connection = None
|
||||
device_names = ['disk1', 'disk2']
|
||||
|
||||
# Preparing mocks
|
||||
disk_paths = []
|
||||
_bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
# Since we are passing the VIR_MIGRATE_TUNNELLED flag, the
|
||||
# 'parms' dict will not (as expected) contain 'migrate_disks'
|
||||
params = {
|
||||
'bandwidth': _bandwidth,
|
||||
'destination_xml': '',
|
||||
'bandwidth': _bandwidth
|
||||
}
|
||||
# Start test
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
graphics_listen_addr_vnc='0.0.0.0',
|
||||
graphics_listen_addr_spice='0.0.0.0',
|
||||
serial_listen_addr='127.0.0.1',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=True)
|
||||
|
||||
dom = fakelibvirt.virDomain
|
||||
guest = libvirt_guest.Guest(dom)
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
|
||||
drvr._parse_migration_flags()
|
||||
instance = objects.Instance(**self.test_instance)
|
||||
drvr._live_migration_operation(self.context, instance, 'dest',
|
||||
True, migrate_data, guest, disk_paths,
|
||||
_bandwidth)
|
||||
drvr._live_migration_operation(self.context, instance,
|
||||
target_connection, True, migrate_data,
|
||||
guest, device_names, _bandwidth)
|
||||
|
||||
expected_flags = (fakelibvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
|
||||
fakelibvirt.VIR_MIGRATE_PERSIST_DEST |
|
||||
fakelibvirt.VIR_MIGRATE_TUNNELLED |
|
||||
fakelibvirt.VIR_MIGRATE_PEER2PEER |
|
||||
fakelibvirt.VIR_MIGRATE_LIVE)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=expected_flags)
|
||||
|
||||
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
|
||||
|
@ -10344,24 +10377,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
# Prepare data
|
||||
self.compute = manager.ComputeManager()
|
||||
instance_ref = self.test_instance
|
||||
|
||||
# Prepare mocks
|
||||
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
||||
target_connection = '127.0.0.2'
|
||||
|
||||
disk_paths = ['vda', 'vdb']
|
||||
params = {
|
||||
'migrate_uri': 'tcp://127.0.0.2',
|
||||
'migrate_disks': disk_paths,
|
||||
'bandwidth': libvirt_driver.MIN_MIGRATION_SPEED_BW,
|
||||
'destination_xml': '<xml/>',
|
||||
}
|
||||
|
||||
# Prepare mocks
|
||||
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
|
||||
|
||||
# Start test
|
||||
bandwidth = libvirt_driver.MIN_MIGRATION_SPEED_BW
|
||||
migrate_data = objects.LibvirtLiveMigrateData(
|
||||
graphics_listen_addr_vnc='10.0.0.1',
|
||||
graphics_listen_addr_spice='10.0.0.2',
|
||||
serial_listen_addr='127.0.0.1',
|
||||
target_connect_addr=None,
|
||||
target_connect_addr=target_connection,
|
||||
bdms=[],
|
||||
block_migration=False)
|
||||
dom = fakelibvirt.virDomain
|
||||
|
@ -10369,11 +10404,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
self.assertRaises(fakelibvirt.libvirtError,
|
||||
drvr._live_migration_operation,
|
||||
self.context, instance_ref, 'dest',
|
||||
self.context, instance_ref, target_connection,
|
||||
False, migrate_data, guest, disk_paths,
|
||||
bandwidth=bandwidth)
|
||||
mock_migrateToURI3.assert_called_once_with(
|
||||
drvr._live_migration_uri('dest'),
|
||||
drvr._live_migration_uri(target_connection),
|
||||
params=params, flags=0)
|
||||
|
||||
@mock.patch('shutil.rmtree')
|
||||
|
|
|
@ -631,29 +631,28 @@ class GuestTestCase(test.NoDBTestCase):
|
|||
self.guest.pause()
|
||||
self.domain.suspend.assert_called_once_with()
|
||||
|
||||
def test_migrate_v1(self):
|
||||
self.guest.migrate('an-uri', flags=1, bandwidth=2)
|
||||
self.domain.migrateToURI.assert_called_once_with(
|
||||
'an-uri', flags=1, bandwidth=2)
|
||||
|
||||
def test_migrate_v2(self):
|
||||
self.guest.migrate('an-uri', domain_xml='</xml>', flags=1, bandwidth=2)
|
||||
self.domain.migrateToURI2.assert_called_once_with(
|
||||
'an-uri', miguri=None, dxml='</xml>', flags=1, bandwidth=2)
|
||||
|
||||
def test_migrate_v3(self):
|
||||
self.guest.migrate('an-uri', domain_xml='</xml>',
|
||||
params={'p1': 'v1'}, flags=1, bandwidth=2)
|
||||
self.guest.migrate('an-uri', flags=1, migrate_uri='dest-uri',
|
||||
migrate_disks='disk1',
|
||||
destination_xml='</xml>',
|
||||
bandwidth=2)
|
||||
self.domain.migrateToURI3.assert_called_once_with(
|
||||
'an-uri', flags=1, params={'p1': 'v1', 'bandwidth': 2})
|
||||
'an-uri', flags=1, params={'migrate_uri': 'dest-uri',
|
||||
'migrate_disks': 'disk1',
|
||||
'destination_xml': '</xml>',
|
||||
'bandwidth': 2})
|
||||
|
||||
@testtools.skipIf(not six.PY2, 'libvirt python3 bindings accept unicode')
|
||||
def test_migrate_v3_unicode(self):
|
||||
self.guest.migrate('an-uri', domain_xml=u'</xml>',
|
||||
params={'p1': u'v1', 'p2': 'v2', 'p3': 3},
|
||||
flags=1, bandwidth=2)
|
||||
self.guest.migrate('an-uri', flags=1, migrate_uri='dest-uri',
|
||||
migrate_disks=[u"disk1", u"disk2"],
|
||||
destination_xml='</xml>',
|
||||
bandwidth=2)
|
||||
self.domain.migrateToURI3.assert_called_once_with(
|
||||
'an-uri', flags=1, params={'p1': 'v1', 'p2': 'v2', 'p3': 3,
|
||||
'an-uri', flags=1, params={'migrate_uri': 'dest-uri',
|
||||
'migrate_disks': ['disk1',
|
||||
'disk2'],
|
||||
'destination_xml': '</xml>',
|
||||
'bandwidth': 2})
|
||||
|
||||
def test_abort_job(self):
|
||||
|
|
|
@ -133,8 +133,9 @@ class _FakeDriverBackendTestCase(object):
|
|||
self.stub_out('nova.virt.libvirt.guest.Guest.detach_device_with_retry',
|
||||
fake_detach_device_with_retry)
|
||||
self.stub_out('nova.virt.libvirt.guest.Guest.migrate',
|
||||
lambda self, destination, migrate_uri, params, flags,
|
||||
domain_xml, bandwidth: None)
|
||||
lambda self, destination, migrate_uri=None,
|
||||
migrate_disks=None, destination_xml=None, flags=0,
|
||||
bandwidth=0: None)
|
||||
# We can't actually make a config drive v2 because ensure_tree has
|
||||
# been faked out
|
||||
self.stub_out('nova.virt.configdrive.ConfigDriveBuilder.make_drive',
|
||||
|
@ -631,7 +632,8 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
|
|||
migration=migration, bdms=[], block_migration=False,
|
||||
serial_listen_addr='127.0.0.1')
|
||||
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
|
||||
lambda *a: None, lambda *a: None,
|
||||
lambda *a, **b: None,
|
||||
lambda *a, **b: None,
|
||||
migrate_data=migrate_data)
|
||||
|
||||
@catch_notimplementederror
|
||||
|
|
|
@ -1158,10 +1158,6 @@ class ComputeDriver(object):
|
|||
- setup_basic_filtering (for nova-basic, etc.)
|
||||
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
|
||||
|
||||
to_xml may have to be called since it defines PROJNET, PROJMASK.
|
||||
but libvirt migrates those value through migrateToURI(),
|
||||
so , no need to be called.
|
||||
|
||||
Don't use thread for this method since migration should
|
||||
not be started when setting-up filtering rules operations
|
||||
are not completed.
|
||||
|
|
|
@ -7066,10 +7066,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
# a way to avoid this in future.
|
||||
guest, migrate_data, self._get_volume_config,
|
||||
get_vif_config=get_vif_config)
|
||||
params = {
|
||||
'destination_xml': new_xml_str,
|
||||
'migrate_disks': device_names,
|
||||
}
|
||||
|
||||
# NOTE(pkoniszewski): Because of precheck which blocks
|
||||
# tunnelled block live migration with mapped volumes we
|
||||
# can safely remove migrate_disks when tunnelling is on.
|
||||
|
@ -7079,8 +7076,10 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
# supported in tunnelled block live migration. Also we
|
||||
# cannot fallback to migrateToURI2 in this case because of
|
||||
# bug #1398999
|
||||
#
|
||||
# TODO(kchamart) Move the following bit to guest.migrate()
|
||||
if (migration_flags & libvirt.VIR_MIGRATE_TUNNELLED != 0):
|
||||
params.pop('migrate_disks')
|
||||
device_names = []
|
||||
|
||||
# TODO(sahid): This should be in
|
||||
# post_live_migration_at_source but no way to retrieve
|
||||
|
@ -7096,8 +7095,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
guest.migrate(self._live_migration_uri(dest),
|
||||
migrate_uri=migrate_uri,
|
||||
flags=migration_flags,
|
||||
params=params,
|
||||
domain_xml=new_xml_str,
|
||||
migrate_disks=device_names,
|
||||
destination_xml=new_xml_str,
|
||||
bandwidth=bandwidth)
|
||||
LOG.debug("Migrate API has completed", instance=instance)
|
||||
|
||||
|
|
|
@ -601,14 +601,14 @@ class Guest(object):
|
|||
"""
|
||||
self._domain.suspend()
|
||||
|
||||
def migrate(self, destination, migrate_uri=None, params=None, flags=0,
|
||||
domain_xml=None, bandwidth=0):
|
||||
def migrate(self, destination, migrate_uri=None, migrate_disks=None,
|
||||
destination_xml=None, flags=0, bandwidth=0):
|
||||
"""Migrate guest object from its current host to the destination
|
||||
|
||||
:param destination: URI of host destination where guest will be migrate
|
||||
:param migrate_uri: URI for invoking the migration
|
||||
:param params: optional dict containing migration parameters such as
|
||||
"destination_xml" and "migrate_disks"
|
||||
:param migrate_disks: List of disks to be migrated
|
||||
:param destination_xml: The guest XML to be used on the target host
|
||||
:param flags: May be one of more of the following:
|
||||
VIR_MIGRATE_LIVE Do not pause the VM during migration
|
||||
VIR_MIGRATE_PEER2PEER Direct connection between source &
|
||||
|
@ -636,14 +636,20 @@ class Guest(object):
|
|||
VIR_MIGRATE_UNSAFE Force migration even if it is considered
|
||||
unsafe.
|
||||
VIR_MIGRATE_OFFLINE Migrate offline
|
||||
:param domain_xml: Changing guest configuration during migration
|
||||
:param bandwidth: The maximun bandwidth in MiB/s
|
||||
:param bandwidth: The maximum bandwidth in MiB/s
|
||||
"""
|
||||
if domain_xml is None:
|
||||
self._domain.migrateToURI(
|
||||
destination, flags=flags, bandwidth=bandwidth)
|
||||
else:
|
||||
if params:
|
||||
params = {}
|
||||
# In migrateToURI3 these parameters are extracted from the
|
||||
# `params` dict
|
||||
params['bandwidth'] = bandwidth
|
||||
|
||||
if destination_xml:
|
||||
params['destination_xml'] = destination_xml
|
||||
if migrate_disks:
|
||||
params['migrate_disks'] = migrate_disks
|
||||
if migrate_uri:
|
||||
params['migrate_uri'] = migrate_uri
|
||||
|
||||
# Due to a quirk in the libvirt python bindings,
|
||||
# VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
|
||||
# interpreted as "block migrate all writable disks" rather than
|
||||
|
@ -656,13 +662,7 @@ class Guest(object):
|
|||
not params.get('migrate_disks')):
|
||||
flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
|
||||
|
||||
# In migrateToURI3 these parameters are extracted from the
|
||||
# `params` dict
|
||||
if migrate_uri:
|
||||
params['migrate_uri'] = migrate_uri
|
||||
params['bandwidth'] = bandwidth
|
||||
|
||||
# In the python2 libvirt bindings, strings passed to
|
||||
# In the Python2 libvirt bindings, strings passed to
|
||||
# migrateToURI3 via params must not be unicode.
|
||||
if six.PY2:
|
||||
params = {key: str(value) if isinstance(value, unicode) # noqa
|
||||
|
@ -671,10 +671,6 @@ class Guest(object):
|
|||
|
||||
self._domain.migrateToURI3(
|
||||
destination, params=params, flags=flags)
|
||||
else:
|
||||
self._domain.migrateToURI2(
|
||||
destination, miguri=migrate_uri, dxml=domain_xml,
|
||||
flags=flags, bandwidth=bandwidth)
|
||||
|
||||
def abort_job(self):
|
||||
"""Requests to abort current background job"""
|
||||
|
|
Loading…
Reference in New Issue