Clear out NPIV metadata on reschedule
When a VM reschedules on to a different host, the NPIV metadata was sticking around, and this caused an issue with the new host when the VM landed on it. This change clears out the system metadata on a full terminate of the NPIV connection. A full terminate is when the VM is being destroyed. This should make the spawn on a new host fully 'fresh'. Change-Id: I35348bbfffc8cb4693e6d0822e7a0c6ff42de032 Closes-Bug: 1510251
This commit is contained in:
parent
903ceabb32
commit
b8199619f2
@ -228,7 +228,7 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter):
|
|||||||
def test_is_initial_wwpn(self, mock_fabric_meta):
|
def test_is_initial_wwpn(self, mock_fabric_meta):
|
||||||
# The deleting state is for roll back on spawn. Migrating is a
|
# The deleting state is for roll back on spawn. Migrating is a
|
||||||
# scenario where you can't be creating new wwpns
|
# scenario where you can't be creating new wwpns
|
||||||
mock_fabric_meta.return_value = [('phys', 'virt1 virt2')]
|
mock_fabric_meta.return_value = [('21000024FF649104', 'virt1 virt2')]
|
||||||
bad_states = [task_states.DELETING, task_states.MIGRATING]
|
bad_states = [task_states.DELETING, task_states.MIGRATING]
|
||||||
for state in bad_states:
|
for state in bad_states:
|
||||||
self.vol_drv.instance.task_state = state
|
self.vol_drv.instance.task_state = state
|
||||||
@ -238,8 +238,8 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter):
|
|||||||
# Task state should still be bad.
|
# Task state should still be bad.
|
||||||
self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
||||||
|
|
||||||
# Set a good task state, but should still be false due to WWPNs not yet
|
# Set a good task state, but fails due to the WWPNs already being
|
||||||
# being assigned.
|
# hosted
|
||||||
self.vol_drv.instance.task_state = task_states.NETWORKING
|
self.vol_drv.instance.task_state = task_states.NETWORKING
|
||||||
self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
self.assertFalse(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
||||||
|
|
||||||
@ -248,6 +248,12 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter):
|
|||||||
mock_fabric_meta.return_value = []
|
mock_fabric_meta.return_value = []
|
||||||
self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
||||||
|
|
||||||
|
# Validate that has fabric metadata of a different host, and therefore
|
||||||
|
# is still a valid initial wwpn. It is initial because it simulates
|
||||||
|
# a reschedule on a new host.
|
||||||
|
mock_fabric_meta.return_value = [('BAD_WWPN', 'virt1 virt2')]
|
||||||
|
self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
||||||
|
|
||||||
# And now no task state.
|
# And now no task state.
|
||||||
self.vol_drv.instance.task_state = None
|
self.vol_drv.instance.task_state = None
|
||||||
self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
self.assertTrue(self.vol_drv._is_initial_wwpn(npiv.FS_UNMAPPED, 'a'))
|
||||||
@ -467,6 +473,10 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter):
|
|||||||
self.vol_drv._set_fabric_meta('A', port_map)
|
self.vol_drv._set_fabric_meta('A', port_map)
|
||||||
self.assertEqual(self.vol_drv.instance.system_metadata, expected)
|
self.assertEqual(self.vol_drv.instance.system_metadata, expected)
|
||||||
|
|
||||||
|
# Clear out the metadata and make sure it sticks.
|
||||||
|
self.vol_drv._set_fabric_meta('A', [])
|
||||||
|
self.assertEqual(self.vol_drv.instance.system_metadata, {})
|
||||||
|
|
||||||
def test_get_fabric_meta(self):
|
def test_get_fabric_meta(self):
|
||||||
system_meta = {'npiv_adpt_wwpns_A':
|
system_meta = {'npiv_adpt_wwpns_A':
|
||||||
'1,aa,AA,2,bb,BB,3,cc,CC,4,dd,DD',
|
'1,aa,AA,2,bb,BB,3,cc,CC,4,dd,DD',
|
||||||
|
@ -210,16 +210,48 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
:return: True if the invocation appears to be for a spawn/volume
|
:return: True if the invocation appears to be for a spawn/volume
|
||||||
action. False otherwise.
|
action. False otherwise.
|
||||||
"""
|
"""
|
||||||
if (fc_state == FS_UNMAPPED and
|
# Easy fabric state check. If its a state other than unmapped, it
|
||||||
len(self._get_fabric_meta(fabric)) == 0 and
|
# can't be an initial WWPN
|
||||||
self.instance.task_state not in [task_states.DELETING,
|
if fc_state != FS_UNMAPPED:
|
||||||
task_states.MIGRATING]):
|
return False
|
||||||
LOG.info(_LI("Instance %(inst)s has not yet defined a WWPN on "
|
|
||||||
"fabric %(fabric)s. Appropriate WWPNs will be "
|
|
||||||
"generated."),
|
|
||||||
{'inst': self.instance.name, 'fabric': fabric})
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
# Easy state check. This is important in case of a rollback failure.
|
||||||
|
# If it is deleting or migrating, it definitely is not an initial WWPN
|
||||||
|
if self.instance.task_state in [task_states.DELETING,
|
||||||
|
task_states.MIGRATING]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Next, we have to check the fabric metadata. Having metadata
|
||||||
|
# indicates that we have been on at least a single host. However,
|
||||||
|
# a VM could be rescheduled. In that case, the 'physical WWPNs' won't
|
||||||
|
# match. So if any of the physical WWPNs are not supported by this
|
||||||
|
# host, we know that it is 'initial' for this host.
|
||||||
|
port_maps = self._get_fabric_meta(fabric)
|
||||||
|
if len(port_maps) > 0 and self._hosts_wwpn(port_maps):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# At this point, it should be correct.
|
||||||
|
LOG.info(_LI("Instance %(inst)s has not yet defined a WWPN on "
|
||||||
|
"fabric %(fabric)s. Appropriate WWPNs will be "
|
||||||
|
"generated."),
|
||||||
|
{'inst': self.instance.name, 'fabric': fabric})
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _hosts_wwpn(self, port_maps):
|
||||||
|
"""Determines if this system hosts the port maps.
|
||||||
|
|
||||||
|
Hosting the port map will be determined if one of the physical WWPNs
|
||||||
|
is hosted by one of the VIOSes.
|
||||||
|
|
||||||
|
:param port_maps: The list of port mappings for the given fabric.
|
||||||
|
"""
|
||||||
|
vios_wraps = self.stg_ftsk.feed
|
||||||
|
if port_maps:
|
||||||
|
for port_map in port_maps:
|
||||||
|
for vios_w in vios_wraps:
|
||||||
|
for pfc_port in vios_w.pfc_ports:
|
||||||
|
if pfc_port.wwpn == port_map[0]:
|
||||||
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _is_migration_wwpn(self, fc_state):
|
def _is_migration_wwpn(self, fc_state):
|
||||||
@ -480,12 +512,20 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
{'fabric': fabric, 'meta': ",".join(meta_elems),
|
{'fabric': fabric, 'meta': ",".join(meta_elems),
|
||||||
'inst': self.instance.name})
|
'inst': self.instance.name})
|
||||||
|
|
||||||
fabric_id_iter = 1
|
# Clear out the original metadata. We may be reducing the number of
|
||||||
meta_key = self._sys_meta_fabric_key(fabric)
|
# keys (ex. reschedule) so we need to just delete what we had before
|
||||||
key_len = len(meta_key)
|
# we add something new.
|
||||||
num_keys = self._get_num_keys(port_map)
|
meta_key_root = self._sys_meta_fabric_key(fabric)
|
||||||
|
for key in tuple(self.instance.system_metadata.keys()):
|
||||||
|
if key.startswith(meta_key_root):
|
||||||
|
del self.instance.system_metadata[key]
|
||||||
|
|
||||||
for key in range(num_keys):
|
# Build up the mapping for the new keys.
|
||||||
|
fabric_id_iter = 1
|
||||||
|
meta_key = meta_key_root
|
||||||
|
key_len = len(meta_key)
|
||||||
|
|
||||||
|
for key in range(self._get_num_keys(port_map)):
|
||||||
start_elem = 12 * (fabric_id_iter - 1)
|
start_elem = 12 * (fabric_id_iter - 1)
|
||||||
meta_value = ",".join(meta_elems[start_elem:start_elem + 12])
|
meta_value = ",".join(meta_elems[start_elem:start_elem + 12])
|
||||||
self.instance.system_metadata[meta_key] = meta_value
|
self.instance.system_metadata[meta_key] = meta_value
|
||||||
|
Loading…
Reference in New Issue
Block a user