Remove WWPN pre-mapping generation

The pypowervm API is planning to remove the method that pre-builds the
WWPNs for a given VM.  This is instead meant to be handled by letting
the mapping determine the WWPNs when the mgmt partition is mapped.

This change set updates the code such that the nova-powervm delegates
the WWPN generation to the mgmt partitions mapping.  It uses the
derive_base_npiv_map method instead.

This change set also fixes certain currency issues within the fabrics by
delegating the resp_wwpns generation to an overall for-loop.

Closes-Bug: 1478587
Change-Id: I8917779f6d9225e6460c0fa64e855c96b55fb655
This commit is contained in:
Drew Thorstensen
2015-07-27 09:06:08 -04:00
parent 1c411f4b5f
commit 52e38ee2b6
2 changed files with 76 additions and 97 deletions

View File

@@ -193,26 +193,27 @@ class TestNPIVAdapter(test.TestCase):
# Verify
self.assertEqual(0, self.adpt.update.call_count)
@mock.patch('pypowervm.tasks.vfc_mapper.build_wwpn_pair')
@mock.patch('nova_powervm.virt.powervm.mgmt.get_mgmt_partition')
@mock.patch('pypowervm.tasks.vfc_mapper.add_npiv_port_mappings')
def test_wwpns(self, mock_add_port, mock_mgmt_part, mock_build_wwpns):
def test_wwpns(self, mock_add_port, mock_mgmt_part):
"""Tests that new WWPNs get generated properly."""
# Mock Data
inst = mock.Mock()
meta_key = self.vol_drv._sys_meta_fabric_key('A')
inst.system_metadata = {meta_key: None}
mock_build_wwpns.return_value = ['aa', 'bb']
mock_add_port.return_value = [('21000024FF649104', 'AA BB'),
('21000024FF649105', 'CC DD')]
mock_vios = mock.MagicMock()
mock_vios.uuid = '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089'
mock_mgmt_part.return_value = mock_vios
self.adpt.read.return_value = self.vios_feed_resp
# invoke
# Invoke
wwpns = self.vol_drv.wwpns(self.adpt, 'host_uuid', inst)
# Check
self.assertListEqual(['aa', 'bb'], wwpns)
self.assertEqual('21000024FF649104,AA,BB',
self.assertListEqual(['AA', 'BB', 'CC', 'DD'], wwpns)
self.assertEqual('21000024FF649104,AA,BB,21000024FF649105,CC,DD',
inst.system_metadata[meta_key])
xags = [pvm_vios.VIOS.xags.FC_MAPPING, pvm_vios.VIOS.xags.STORAGE]
self.adpt.read.assert_called_once_with('VirtualIOServer', xag=xags)
@@ -226,17 +227,18 @@ class TestNPIVAdapter(test.TestCase):
self.assertEqual('mgmt_mapped',
self.vol_drv._get_fabric_state(inst, 'A'))
def test_wwpns_on_sys_meta(self):
@mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.'
'_get_fabric_state')
def test_wwpns_on_sys_meta(self, mock_fabric_state):
"""Tests that previously stored WWPNs are returned."""
# Mock
inst = mock.MagicMock()
inst.system_metadata = {self.vol_drv._sys_meta_fabric_key('A'):
'phys1,a,b,phys2,c,d'}
mock_fabric_state.return_value = npiv.FS_INST_MAPPED
# Invoke
wwpns = self.vol_drv.wwpns(mock.ANY, 'host_uuid', inst)
# Verify
self.assertListEqual(['a', 'b', 'c', 'd'], wwpns)
fc_state = self.vol_drv._get_fabric_state(inst, 'A')
self.assertEqual(npiv.FS_UNMAPPED, fc_state)

View File

@@ -149,95 +149,81 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
:param instance: The nova instance.
:returns: The list of WWPNs that need to be included in the zone set.
"""
# Check to see if all of the fabrics have had the metadata set.
# We check against all of the fabrics in case a fabric was later
# added to the system.
wwpn_keys = []
bad_fabric = False
for fabric in self._fabric_names():
metas = self._get_fabric_meta(instance, fabric)
if metas is None:
bad_fabric = True
break
# Must have been set. Extend the wwpn keys with each element.
for meta_val in metas:
wwpn_keys.extend(meta_val[1].split(' '))
# A 'bad_fabric' is when we had one fabric that was missing. So
# we just kick off a rebuild.
if not bad_fabric:
return wwpn_keys
# At this point, the WWPNs need to be logged into the fabric. But
# before that can be done, the mapping needs to be derived between
# the logical ports and the physical ports.
#
# This should be done on a per-fabric basis.
vios_resp = adapter.read(pvm_vios.VIOS.schema_type,
xag=[pvm_vios.VIOS.xags.FC_MAPPING,
pvm_vios.VIOS.xags.STORAGE])
vios_wraps = pvm_vios.VIOS.wrap(vios_resp)
vios_wraps, mgmt_uuid = None, None
resp_wwpns = []
# If this is a new mapping altogether, the WWPNs need to be logged
# into the fabric so that Cinder can make use of them. This is a bit
# of a catch-22 because the LPAR doesn't exist yet. So a mapping will
# be created against the mgmt partition and then upon VM creation, the
# mapping will be moved over to the VM.
#
# If a mapping already exists, we can instead just pull the data off
# of the system metadata from the nova instance.
for fabric in self._fabric_names():
v_port_wwpns = self._build_wwpns_for_fabric(adapter, host_uuid)
resp_wwpns.extend(v_port_wwpns)
fc_state = self._get_fabric_state(instance, fabric)
LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
"instance %(inst)s") %
{'st': fc_state, 'inst': instance.name})
# Derive the virtual to physical port mapping
port_map = pvm_vfcm.derive_npiv_map(vios_wraps,
self._fabric_ports(fabric),
v_port_wwpns)
if (fc_state == FS_UNMAPPED and
instance.task_state not in [task_states.DELETING]):
self._set_fabric_meta(instance, fabric, port_map)
# At this point we've determined that we need to do a mapping.
# So we go and obtain the mgmt uuid and the VIOS wrappers.
# We only do this for the first loop through so as to ensure
# that we do not keep invoking these expensive calls
# unnecessarily.
if mgmt_uuid is None:
mgmt_uuid = mgmt.get_mgmt_partition(adapter).uuid
# Every loop through, we reverse the vios wrappers. This is done
# so that if Fabric A only has 1 port, it goes on the first VIOS.
# Then Fabric B would put its port on a different VIOS. As a form
# of multi pathing (so that your paths were not restricted to a
# single VIOS).
vios_wraps.reverse()
# The VIOS wrappers are also not set at this point. Seed
# them as well. Will get reused on subsequent loops.
vios_resp = adapter.read(
pvm_vios.VIOS.schema_type,
xag=[pvm_vios.VIOS.xags.FC_MAPPING,
pvm_vios.VIOS.xags.STORAGE])
vios_wraps = pvm_vios.VIOS.wrap(vios_resp)
# Check if the fabrics are unmapped then we need to map it
# temporarily with the management partition.
self._add_npiv_mgmt_mappings(adapter, fabric, host_uuid, instance,
port_map)
# Derive the virtual to physical port mapping
port_maps = pvm_vfcm.derive_base_npiv_map(
vios_wraps, self._fabric_ports(fabric),
self._ports_per_fabric())
# Every loop through, we reverse the vios wrappers. This is
# done so that if Fabric A only has 1 port, it goes on the
# first VIOS. Then Fabric B would put its port on a different
# VIOS. As a form of multi pathing (so that your paths were
# not restricted to a single VIOS).
vios_wraps.reverse()
# Check if the fabrics are unmapped then we need to map it
# temporarily with the management partition.
LOG.info(_LI("Adding NPIV Mapping with mgmt partition for "
"instance %s") % instance.name)
port_maps = pvm_vfcm.add_npiv_port_mappings(
adapter, host_uuid, mgmt_uuid, port_maps)
# Set the fabric meta (which indicates on the instance how
# the fabric is mapped to the physical port) and the fabric
# state.
self._set_fabric_meta(instance, fabric, port_maps)
self._set_fabric_state(instance, fabric, FS_MGMT_MAPPED)
else:
# This specific fabric had been previously set. Just pull
# from the meta (as it is likely already mapped to the
# instance)
port_maps = self._get_fabric_meta(instance, fabric)
# Port map is set by either conditional, but may be set to None.
# If not None, then add the WWPNs to the response.
if port_maps is not None:
for mapping in port_maps:
resp_wwpns.extend(mapping[1].split())
# The return object needs to be a list for the volume connector.
return resp_wwpns
def _add_npiv_mgmt_mappings(self, adapter, fabric, host_uuid, instance,
npiv_port_map):
"""Check if a single fabric is mapped if not it will add NPIV mappings.
The fabric will be mapped to the management partition so that NPIV
WWPNs are logged onto the FC physical fabric. This will allow the WWPNs
to properly zone via the cinder zone manager, as well as be available
the cinder storage host.
Once the mapping is done the fabric state is updated to FS_MGMT_MAPPED.
:param adapter: The pypowervm adapter.
:param fabric: fabric name
:param host_uuid: The pypowervm UUID of the host.
:param instance: The nova instance for which the mapping needs to
be added
:param npiv_port_map: NPIV port mappings needs to be added.
"""
fc_state = self._get_fabric_state(instance, fabric)
LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
"instance=%(inst)s") %
{'st': fc_state, 'inst': instance.name})
if fc_state == FS_UNMAPPED:
# Need to login the WWPNs in temporarily here with
# the mgmt_uuid
mg_wrap = mgmt.get_mgmt_partition(adapter)
LOG.info(_LI("Adding NPIV Mapping with mgmt partition for "
"instance=%s") % instance.name)
pvm_vfcm.add_npiv_port_mappings(adapter, host_uuid,
mg_wrap.uuid, npiv_port_map)
self._set_fabric_state(instance, fabric, FS_MGMT_MAPPED)
return
def _remove_npiv_mgmt_mappings(self, adapter, fabric, host_uuid, instance,
npiv_port_map):
"""Remove the fabric from the management partition if necessary.
@@ -363,15 +349,6 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
"""Returns the nova system metadata key for a given fabric."""
return WWPN_SYSTEM_METADATA_KEY + '_' + fabric
def _build_wwpns_for_fabric(self, adapter, host_uuid):
"""Builds all of the WWPNs that are needed for a given fabric."""
wwpns = []
i = 0
while i < self._ports_per_fabric():
wwpns.extend(pvm_vfcm.build_wwpn_pair(adapter, host_uuid))
i += 1
return wwpns
def _fabric_names(self):
"""Returns a list of the fabric names."""
return powervm.NPIV_FABRIC_WWPNS.keys()