xenapi: Use XAPI pool instead of aggregate pool for shared SR migration

xenapi is going to support pool-based multi-hosts OpenStack
environments, this patch is used to remove dependency to the
old aggregate-based-pools and add support to xenapi pool.
Also include unit test changes.
Updated configuring migrations document:
https://docs.openstack.org/nova/latest/admin/configuring-
migrations.html#configuring-migrations-xenserver-shared-storage
Other related documents will be updated in another patch.

Implements: blueprint live-migration-in-xapi-pool
Change-Id: I2c492c46e85c1df96aa7fdc12cdee0b1c7ba775e
This commit is contained in:
naichuans 2018-03-19 07:50:36 +00:00
parent 324899c621
commit 9b7affcac1
6 changed files with 211 additions and 138 deletions

View File

@ -334,7 +334,7 @@ Shared storage
- **Compatible XenServer hypervisors**.
For more information, see the `Requirements for Creating Resource Pools
<http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#pooling_homogeneity_requirements>`_
<https://docs.citrix.com/en-us/xenserver/7-1.html#pooling_homogeneity_requirements>`_
section of the XenServer Administrator's Guide.
- **Shared storage**.
@ -343,14 +343,12 @@ Shared storage
.. note::
For the supported NFS versions, see the `NFS VHD
<http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/reference.html#id1002701>`_
For the supported NFS versions, see the `NFS and SMB
<https://docs.citrix.com/en-us/xenserver/7-1.html#id1002701>`_
section of the XenServer Administrator's Guide.
To use shared storage live migration with XenServer hypervisors, the hosts must
be joined to a XenServer pool. To create that pool, a host aggregate must be
created with specific metadata. This metadata is used by the XAPI plug-ins to
establish the pool.
be joined to a XenServer pool.
.. rubric:: Using shared storage live migrations with XenServer Hypervisors
@ -366,34 +364,12 @@ establish the pool.
sr_matching_filter=default-sr:true
#. Create a host aggregate. This command creates the aggregate, and then
displays a table that contains the ID of the new aggregate
#. To add a host to a pool, you need to know the pool master ip address,
user name and password. Run below command on the XenServer host:
.. code-block:: console
$ openstack aggregate create --zone AVAILABILITY_ZONE POOL_NAME
Add metadata to the aggregate, to mark it as a hypervisor pool
.. code-block:: console
$ openstack aggregate set --property hypervisor_pool=true AGGREGATE_ID
$ openstack aggregate set --property operational_state=created AGGREGATE_ID
Make the first compute node part of that aggregate
.. code-block:: console
$ openstack aggregate add host AGGREGATE_ID MASTER_COMPUTE_NAME
The host is now part of a XenServer pool.
#. Add hosts to the pool
.. code-block:: console
$ openstack aggregate add host AGGREGATE_ID COMPUTE_HOST_NAME
$ xe pool-join master-address=MASTER_IP master-username=root master-password=MASTER_PASSWORD
.. note::

View File

@ -1504,19 +1504,26 @@ class XenstoreCallsTestCase(VMOpsTestBase):
class LiveMigrateTestCase(VMOpsTestBase):
@mock.patch.object(vmops.VMOps, '_get_network_ref')
@mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate')
@mock.patch.object(vm_utils, 'host_in_this_pool')
def _test_check_can_live_migrate_destination_shared_storage(
self,
shared,
mock_ensure_host,
mock_is_same_pool,
mock_net_ref):
fake_instance = {"name": "fake_instance", "host": "fake_host"}
fake_instance = objects.Instance(host="fake_host")
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
mock_net_ref.return_value = 'fake_net_ref'
if shared:
mock_is_same_pool.return_value = True
else:
mock_is_same_pool.return_value = False
with mock.patch.object(self._session, 'get_rec') as fake_sr_rec:
with mock.patch.object(self._session, 'get_rec') as fake_sr_rec, \
mock.patch.object(self._session, 'host.get_by_name_label') \
as fake_get_ref:
fake_get_ref.return_value = ['fake_host_ref']
fake_sr_rec.return_value = {'shared': shared}
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
@ -1535,50 +1542,83 @@ class LiveMigrateTestCase(VMOpsTestBase):
self._test_check_can_live_migrate_destination_shared_storage(False)
@mock.patch.object(vmops.VMOps, '_get_network_ref')
@mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate',
side_effect=exception.MigrationPreCheckError(reason=""))
def test_check_can_live_migrate_destination_block_migration(
self,
mock_ensure_host,
mock_net_ref):
fake_instance = {"name": "fake_instance", "host": "fake_host"}
self,
mock_net_ref):
fake_instance = objects.Instance(host="fake_host")
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
mock_net_ref.return_value = 'fake_net_ref'
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
with mock.patch.object(self._session, 'host.get_by_name_label') \
as fake_get_ref:
fake_get_ref.return_value = ['fake_host_ref']
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
self.assertTrue(migrate_data_ret.block_migration)
self.assertEqual(vm_utils.safe_find_sr(self._session),
migrate_data_ret.destination_sr_ref)
self.assertEqual({'value': 'fake_migrate_data'},
migrate_data_ret.migrate_send_data)
self.assertEqual({'': 'fake_net_ref'},
migrate_data_ret.vif_uuid_map)
self.assertTrue(migrate_data_ret.block_migration)
self.assertEqual(vm_utils.safe_find_sr(self._session),
migrate_data_ret.destination_sr_ref)
self.assertEqual({'value': 'fake_migrate_data'},
migrate_data_ret.migrate_send_data)
self.assertEqual({'': 'fake_net_ref'},
migrate_data_ret.vif_uuid_map)
@mock.patch.object(vmops.objects.AggregateList, 'get_by_host')
def test_get_host_uuid_from_aggregate_no_aggr(self, mock_get_by_host):
mock_get_by_host.return_value = objects.AggregateList(objects=[])
context = "ctx"
hostname = "other_host"
self.assertRaises(exception.MigrationPreCheckError,
self.vmops._get_host_uuid_from_aggregate,
context, hostname)
@mock.patch.object(vmops.VMOps, '_migrate_receive')
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vmops.VMOps, '_get_network_ref')
def test_no_hosts_found_with_the_name_label(self,
mock_get_network_ref,
mock_safe_find_sr,
mock_migrate_receive):
# Can find the dest host in current pool, do block live migrate
fake_instance = objects.Instance(host="fake_host")
mock_migrate_receive.return_value = {'fake_key': 'fake_data'}
mock_safe_find_sr.return_value = 'fake_destination_sr_ref'
mock_get_network_ref.return_value = 'fake_net_ref'
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
with mock.patch.object(self._session, 'host.get_by_name_label') \
as fake_get_ref:
fake_get_ref.return_value = []
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
self.assertTrue(migrate_data_ret.block_migration)
self.assertEqual(migrate_data_ret.vif_uuid_map,
{'': 'fake_net_ref'})
@mock.patch.object(vmops.objects.AggregateList, 'get_by_host')
def test_get_host_uuid_from_aggregate_bad_aggr(self, mock_get_by_host):
context = "ctx"
hostname = "other_host"
fake_aggregate_obj = objects.Aggregate(hosts=['fake'],
metadata={'this': 'that'})
fake_aggr_list = objects.AggregateList(objects=[fake_aggregate_obj])
mock_get_by_host.return_value = fake_aggr_list
def test_multiple_hosts_found_with_same_name(self):
# More than one host found with the dest host name, raise exception
fake_instance = objects.Instance(host="fake_host")
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
with mock.patch.object(self._session, 'host.get_by_name_label') \
as fake_get_ref:
fake_get_ref.return_value = ['fake_host_ref1', 'fake_host_ref2']
self.assertRaises(exception.MigrationPreCheckError,
self.vmops.check_can_live_migrate_destination,
ctxt, fake_instance, block_migration,
disk_over_commit)
self.assertRaises(exception.MigrationPreCheckError,
self.vmops._get_host_uuid_from_aggregate,
context, hostname)
@mock.patch.object(vm_utils, 'host_in_this_pool')
def test_request_pool_migrate_to_outer_pool_host(self, mock_is_same_pool):
# Caller asks for no block live migrate while the dest host is not in
# the same pool with the src host, raise exception
fake_instance = objects.Instance(host="fake_host")
block_migration = False
disk_over_commit = False
ctxt = 'ctxt'
mock_is_same_pool.return_value = False
with mock.patch.object(self._session, 'host.get_by_name_label') \
as fake_get_ref:
fake_get_ref.return_value = ['fake_host_ref1']
self.assertRaises(exception.MigrationPreCheckError,
self.vmops.check_can_live_migrate_destination,
ctxt, fake_instance, block_migration,
disk_over_commit)
@mock.patch.object(vmops.VMOps, 'create_interim_networks')
@mock.patch.object(vmops.VMOps, 'connect_block_device_volumes')

View File

@ -3498,15 +3498,21 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
mock_post_action.assert_called_once_with(fake_instance)
def test_check_can_live_migrate_destination_with_block_migration(self):
@mock.patch.object(vm_utils, 'host_in_this_pool')
def test_check_can_live_migrate_destination_with_block_migration(
self,
mock_same_pool):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = objects.Instance(host="fake_host")
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
with mock.patch.object(self.conn._vmops._session, "host_ref") as \
fake_host_ref, mock.patch.object(
self.conn._vmops, '_get_network_ref') as \
fake_get_network_ref:
fake_get_network_ref, mock.patch.object(
self.conn._vmops, '_get_host_opaque_ref'):
fake_host_ref.return_value = 'fake_host_ref'
fake_get_network_ref.return_value = 'fake_network_ref'
expected = {'block_migration': True,
@ -3518,7 +3524,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
}
result = self.conn.check_can_live_migrate_destination(
self.context,
{'host': 'host'},
fake_instance,
{}, {},
True, False)
result.is_volume_backed = False
@ -3528,6 +3534,8 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = objects.Instance(host="fake_host")
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
@ -3537,17 +3545,20 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
self.context, fake_instance,
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
fake_instance = objects.Instance(host="fake_host")
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
self.context, fake_instance,
{}, {},
True, False)
@ -3662,42 +3673,69 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
{'host': 'host'},
dest_check_data)
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_works(self, mock_get_by_host):
@mock.patch.object(vm_utils, 'host_in_this_pool')
def test_check_can_live_migrate_works(self,
mock_host_in_this_pool):
# The dest host is in the same pool with the src host, do no block
# live migrate
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(self.conn._vmops._session, "host_ref") as \
fake_host_ref, \
mock.patch.object(self.conn._vmops,
'_get_network_ref') as fake_get_network_ref:
fake_host_ref.return_value = 'fake_host_ref'
fake_get_network_ref.return_value = 'fake_network_ref'
metadata = {'host': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
instance = objects.Instance(host='host')
self.conn.check_can_live_migrate_destination(
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
mock_host_in_this_pool.side_effect = [True, True]
with mock.patch.object(self.conn._vmops, "_get_host_opaque_ref") as \
fake_get_host_opaque_ref, \
mock.patch.object(self.conn._vmops, '_get_network_ref') as \
fake_get_network_ref, \
mock.patch.object(self.conn._vmops._session, 'get_rec') as \
fake_get_rec:
fake_host_ref = 'fake_host_ref'
fake_get_host_opaque_ref.return_value = fake_host_ref
fake_network_ref = 'fake_network_ref'
fake_get_network_ref.return_value = fake_network_ref
fake_get_rec.return_value = {'shared': True}
fake_host_name = 'fake_host'
instance = objects.Instance(host=fake_host_name)
@mock.patch.object(objects.AggregateList, 'get_by_host')
def test_check_can_live_migrate_fails(self, mock_get_by_host):
# Set block_migration to None to enable pool check, then do pooled
# live migrate
dest_check_data = self.conn.check_can_live_migrate_destination(
self.context, instance, 'fake_src_compute_info',
'fake_dst_compute_info', None, None)
self.assertFalse(dest_check_data.block_migration)
self.assertEqual(dest_check_data.vif_uuid_map,
{'': fake_network_ref})
fake_get_host_opaque_ref.assert_called_once_with(fake_host_name)
mock_host_in_this_pool.assert_called_once_with(
self.conn._vmops._session, fake_host_ref)
fake_get_network_ref.assert_called_once()
@mock.patch.object(vm_utils, 'host_in_this_pool')
def test_check_can_live_migrate_fails(self, mock_host_in_this_pool):
# Caller asks for no block live migrate while the dest host is not in
# the same pool with the src host, raise exception
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
mock_host_in_this_pool.return_value = False
with mock.patch.object(self.conn._vmops, "_get_host_opaque_ref") as \
fake_get_host_opaque_ref, \
mock.patch.object(self.conn._vmops, '_get_network_ref') as \
fake_get_network_ref:
fake_host_ref = 'fake_host_ref'
fake_get_host_opaque_ref.return_value = fake_host_ref
fake_network_ref = 'fake_network_ref'
fake_get_network_ref.return_value = fake_network_ref
fake_host_name = 'fake_host'
instance = objects.Instance(host=fake_host_name)
metadata = {'dest_other': 'test_host_uuid'}
aggregate = objects.Aggregate(metadata=metadata)
aggregate_list = objects.AggregateList(objects=[aggregate])
mock_get_by_host.return_value = aggregate_list
# Set block_migration to False to do pooled live migrate
self.assertRaises(exception.MigrationPreCheckError,
self.conn.check_can_live_migrate_destination,
self.context, instance, 'fake_src_compute_info',
'fake_dst_compute_info', False, None)
instance = objects.Instance(host='host')
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, instance, None, None)
mock_get_by_host.assert_called_once_with(
self.context, CONF.host, key='hypervisor_pool')
fake_get_host_opaque_ref.assert_called_once_with(fake_host_name)
mock_host_in_this_pool.assert_called_once_with(
self.conn._vmops._session, fake_host_ref)
fake_get_network_ref.assert_not_called()
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
@ -3713,7 +3751,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
def fake_get_host_opaque_ref(destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
@ -3881,7 +3919,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
def fake_get_host_opaque_ref(destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",

View File

@ -302,8 +302,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
if vifs:
# Still has vifs attached to this network
for remain_vif in vifs:
# if remain vifs are on the local server, give up all the
# operations. If remain vifs are on the remote hosts, keep
# if the remain vifs are on the local server, give up all the
# operations. If the remain vifs are on the remote hosts, keep
# the network and delete the bridge
if self._get_host_by_vif(remain_vif) == self._session.host_ref:
return
@ -521,9 +521,10 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
def create_vif_interim_network(self, vif):
net_name = self.get_vif_interim_net_name(vif['id'])
# In a pooled environment, make the network to be shared to ensure it
# can also be used in the target host while live migration. It will
# make no change if the environment is not pooled.
# In a pooled environment, make the network shared in order to ensure
# it can also be used in the target host while live migrating.
# "assume_network_is_shared" flag does not affect environments where
# storage pools are not used.
network_rec = {'name_label': net_name,
'name_description': "interim network for vif[%s]"
% vif['id'],

View File

@ -57,7 +57,6 @@ from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
@ -2208,25 +2207,18 @@ class VMOps(object):
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
aggregate_list = objects.AggregateList.get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)
reason = _('Destination host:%s must be in the same '
'aggregate as the source server') % hostname
if len(aggregate_list) == 0:
def _get_host_opaque_ref(self, hostname):
host_ref_set = self._session.host.get_by_name_label(hostname)
# If xenapi can't get host ref by the name label, it means the
# destination host is not in the same pool with the source host.
if host_ref_set is None or host_ref_set == []:
return None
# It should be only one host with the name, or there would be
# a confuse on which host is required
if len(host_ref_set) > 1:
reason = _('Multiple hosts have the same hostname: %s.') % hostname
raise exception.MigrationPreCheckError(reason=reason)
if hostname not in aggregate_list[0].metadata:
raise exception.MigrationPreCheckError(reason=reason)
return aggregate_list[0].metadata[hostname]
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
return host_ref_set[0]
def _get_host_ref_no_aggr(self):
# Pull the current host ref from Dom0's resident_on field. This
@ -2312,13 +2304,20 @@ class VMOps(object):
"""
dest_check_data = objects.XenapiLiveMigrateData()
src = instance_ref.host
def _host_in_this_pool(host_name_label):
host_ref = self._get_host_opaque_ref(host_name_label)
if not host_ref:
return False
return vm_utils.host_in_this_pool(self._session, host_ref)
# Check if migrate happen in a xapi pool
pooled_migrate = _host_in_this_pool(src)
# Notes(eliqiao): if block_migration is None, we calculate it
# by checking if src and dest node are in same aggregate
# by checking if src and dest node are in same xapi pool
if block_migration is None:
src = instance_ref['host']
try:
self._ensure_host_in_aggregate(ctxt, src)
except exception.MigrationPreCheckError:
if not pooled_migrate:
block_migration = True
else:
sr_ref = vm_utils.safe_find_sr(self._session)
@ -2332,11 +2331,13 @@ class VMOps(object):
self._session)
else:
dest_check_data.block_migration = False
src = instance_ref['host']
# TODO(eilqiao): There is still one case that block_migration is
# passed from admin user, so we need this check until
# block_migration flag is removed from API
self._ensure_host_in_aggregate(ctxt, src)
if not pooled_migrate:
reason = _("Destination host is not in the same shared storage"
"pool as source host %s.") % src
raise exception.MigrationPreCheckError(reason=reason)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
@ -2580,8 +2581,14 @@ class VMOps(object):
for sr_ref in iscsi_srs:
volume_utils.forget_sr(self._session, sr_ref)
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
host_ref = self._get_host_opaque_ref(destination_hostname)
if not host_ref:
LOG.exception(_("Destination host %s was not found in the"
" same shared storage pool as source "
"host."), destination_hostname)
raise exception.MigrationError(
reason=_('No host with name %s found')
% destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {"live": "true"})
post_method(context, instance, destination_hostname,

View File

@ -0,0 +1,11 @@
---
feature:
- |
XenServer Pools with shared storage used to use aggregates in Nova,
however this support was not adequately tested and became broken in
a previous release.
Now live migration with shared storage is possible using native
XAPI pools - independently of Nova's aggregates.
The XenAPI/Nova integration expects one nova-compute per XenServer
host, so a nova-compute must be run for each slave XenServer host
in the pool as well as for the master.