Optimize the VIOS specification in driver

This change optimizes the use of the VIOSes in the driver.  It does so
by:
 - Removing an excess call to capture the VIOSes in the driver start up.
 - Querying only for the active VIOSes within the vscsi volume driver.

This not only makes the code run quicker, it is also more correct
functionally.  It no longer tries to connect to VIOSes that may be down
for maintenance purposes.

Change-Id: I718752385837c021c8d0f429ab71f4f7e27c0dd6
This commit is contained in:
Drew Thorstensen 2015-04-29 12:41:31 -05:00
parent d263057d32
commit 9916adf375
9 changed files with 56 additions and 60 deletions

View File

@ -57,10 +57,6 @@ class TestLocalDisk(test.TestCase):
self.apt = self.pypvm.apt
# Set up for the mocks for get_ls
self.mock_vio_name_map_p = mock.patch('nova_powervm.virt.powervm.vios.'
'get_vios_name_map')
self.mock_vio_name_map = self.mock_vio_name_map_p.start()
self.mock_vio_name_map.return_value = {'vio_name': 'vio_uuid'}
self.mock_vg_uuid_p = mock.patch('nova_powervm.virt.powervm.disk.'
'localdisk.LocalStorage.'
@ -74,7 +70,6 @@ class TestLocalDisk(test.TestCase):
# Tear down mocks
self.mock_vg_uuid_p.stop()
self.mock_vio_name_map_p.stop()
def get_ls(self, adpt):
return ld.LocalStorage({'adapter': adpt, 'host_uuid': 'host_uuid'})

View File

@ -90,7 +90,6 @@ class PowerVMComputeDriver(fixtures.Fixture):
def __init__(self):
pass
@mock.patch('nova_powervm.virt.powervm.vios.get_vios_name_map')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage')
@mock.patch('pypowervm.wrappers.managed_system.find_entry_by_mtms')
def _init_host(self, *args):

View File

@ -101,9 +101,8 @@ class TestPowerVMDriver(test.TestCase):
self.assertIsNotNone(vol_connector['host'])
@mock.patch('pypowervm.wrappers.managed_system.find_entry_by_mtms')
@mock.patch('nova_powervm.virt.powervm.vios.get_vios_name_map')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage')
def test_driver_init(self, mock_disk, mock_vio_name_map, mock_find):
def test_driver_init(self, mock_disk, mock_find):
"""Validates the PowerVM driver can be initialized for the host."""
drv = driver.PowerVMDriver(fake.FakeVirtAPI())
drv.init_host('FakeHost')

View File

@ -18,6 +18,7 @@
from nova import test
import os
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import base_partition as pvm_bp
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import vios
@ -41,12 +42,14 @@ class TestVios(test.TestCase):
return pvmhttp.load_pvm_resp(file_path).get_response()
self.vios_feed_resp = resp(VIOS_FEED)
def test_get_vios_name_map(self):
def test_get_active_vioses(self):
self.adpt.read.return_value = self.vios_feed_resp
expected = {'IOServer - SN2125D4A':
'3443DB77-AED1-47ED-9AA5-3DB9C6CF7089'}
self.assertEqual(expected,
vios.get_vios_name_map(self.adpt, 'host_uuid'))
vioses = vios.get_active_vioses(self.adpt, 'host_uuid')
self.assertEqual(1, len(vioses))
vio = vioses[0]
self.assertEqual(pvm_bp.LPARState.RUNNING, vio.state)
self.assertEqual(pvm_bp.RMCState.ACTIVE, vio.rmc_state)
def test_get_physical_wwpns(self):
self.adpt.read.return_value = self.vios_feed_resp

View File

@ -117,9 +117,8 @@ class TestNPIVAdapter(test.TestCase):
inst, mock.ANY)
self.assertEqual(0, self.adpt.read.call_count)
@mock.patch('nova_powervm.virt.powervm.vios.get_vios_name_map')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap')
def test_connect_volume_no_map(self, mock_vio_wrap, mock_vio_name_map):
def test_connect_volume_no_map(self, mock_vio_wrap):
"""Tests that if the VFC Mapping exists, another is not added."""
# Mock Data
con_info = {'data': {'initiator_target_map': {'a': None,
@ -133,8 +132,6 @@ class TestNPIVAdapter(test.TestCase):
mock_vio_wrap.return_value = mock_vios
mock_vio_name_map.return_value = {'vios_name': 'vios_uuid'}
# Invoke
self.vol_drv.connect_volume(self.adpt, 'host_uuid', 'vm_uuid',
mock.MagicMock(), con_info)

View File

@ -58,24 +58,23 @@ class TestVSCSIAdapter(test.TestCase):
@mock.patch('pypowervm.tasks.hdisk.build_itls')
@mock.patch('pypowervm.tasks.hdisk.discover_hdisk')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
@mock.patch('nova_powervm.virt.powervm.vios.get_vios_name_map')
def test_connect_volume(self, mock_vio_name_map, mock_add_vscsi_mapping,
def test_connect_volume(self, mock_add_vscsi_mapping,
mock_discover_hdisk, mock_build_itls):
con_info = {'data': {'initiator_target_map': {'i1': ['t1'],
'i2': ['t2', 't3']},
'target_lun': '1', 'volume_id': 'id'}}
mock_discover_hdisk.return_value = (
hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid')
mock_vio_name_map.return_value = {'vio_name': 'vio_uuid',
'vio_name1': 'vio_uuid1'}
self.adpt.read.return_value = self.vios_feed_resp
mock_instance = mock.Mock()
mock_instance.system_metadata = {}
vscsi.VscsiVolumeAdapter().connect_volume(None, 'host_uuid',
vscsi.VscsiVolumeAdapter().connect_volume(self.adpt, 'host_uuid',
'vm_uuid', mock_instance,
con_info)
# Confirm mapping called twice for two defined VIOS
self.assertEqual(2, mock_add_vscsi_mapping.call_count)
# Single mapping
self.assertEqual(1, mock_add_vscsi_mapping.call_count)
@mock.patch('pypowervm.tasks.hdisk.remove_hdisk')
@mock.patch('pypowervm.tasks.scsi_mapper.remove_pv_mapping')

View File

@ -46,7 +46,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
from nova_powervm.virt.powervm import host as pvm_host
from nova_powervm.virt.powervm.tasks import storage as tf_stg
from nova_powervm.virt.powervm.tasks import vm as tf_vm
from nova_powervm.virt.powervm import vios
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm import volume as vol_attach
@ -86,9 +85,6 @@ class PowerVMDriver(driver.ComputeDriver):
# Initialize the UUID Cache. Lets not prime it at this time.
vm.UUIDCache(self.adapter)
# Get the map of the VIOS names to UUIDs
self.vios_map = vios.get_vios_name_map(self.adapter, self.host_uuid)
# Initialize the disk adapter. Sets self.disk_drv
self._get_disk_adapter()
self.image_api = image.API()

View File

@ -17,6 +17,7 @@
from oslo_config import cfg
from oslo_log import log as logging
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import virtual_io_server as pvm_vios
@ -24,24 +25,39 @@ from pypowervm.wrappers import virtual_io_server as pvm_vios
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# RMC must be either active or busy. Busy is allowed because that simply
# means that something is running against the VIOS at the moment...but
# it should recover shortly.
VALID_RMC_STATES = [pvm_bp.RMCState.ACTIVE, pvm_bp.RMCState.BUSY]
def get_vios_name_map(adapter, host_uuid):
"""Returns the map of VIOS names to UUIDs.
# Only a running state is OK for now.
VALID_VM_STATES = [pvm_bp.LPARState.RUNNING]
def get_active_vioses(adapter, host_uuid):
"""Returns a list of active Virtual I/O Server Wrappers for a host.
Active is defined by powered on and RMC state being 'active'.
:param adapter: The pypowervm adapter for the query.
:param host_uuid: The host servers UUID.
:return: A dictionary with all of the Virtual I/O Servers on the system.
The format is:
{
'vio_name': 'vio_uuid',
'vio2_name': 'vio2_uuid',
etc...
}
:return: List of VIOS wrappers.
"""
vio_feed_resp = adapter.read(pvm_ms.System.schema_type, root_id=host_uuid,
child_type=pvm_vios.VIOS.schema_type)
wrappers = pvm_vios.VIOS.wrap(vio_feed_resp)
return {wrapper.name: wrapper.uuid for wrapper in wrappers}
return [vio for vio in wrappers if is_vios_active(vio)]
def is_vios_active(vios):
"""Returns a boolean to indicate if the VIOS is active.
Active is defined by running, and the RMC being 'active'.
:param vios: The Virtual I/O Server wrapper to validate.
:return: Boolean
"""
return (vios.rmc_state in VALID_RMC_STATES and
vios.state in VALID_VM_STATES)
def get_physical_wwpns(adapter, ms_uuid):

View File

@ -26,9 +26,7 @@ from nova_powervm.virt.powervm.volume import driver as v_driver
import pypowervm.exceptions as pexc
from pypowervm.tasks import hdisk
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
@ -93,44 +91,42 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
for it_list in it_map.values():
t_wwpns.extend(it_list)
# Get VIOS map
vio_map = vios.get_vios_name_map(adapter, host_uuid)
# Get VIOS feed
vios_feed = vios.get_active_vioses(adapter, host_uuid)
# Iterate through host vios list to find valid hdisks and map to VM.
# TODO(IBM): The VIOS should only include the intersection with
# defined SCG targets when they are available.
for vios_name, vios_uuid in vio_map.iteritems():
for vio_wrap in vios_feed:
# TODO(IBM): Investigate if i_wwpns passed to discover_hdisk
# should be intersection with VIOS pfc_wwpns
itls = hdisk.build_itls(i_wwpns, t_wwpns, lun)
status, device_name, udid = hdisk.discover_hdisk(adapter,
vios_uuid, itls)
status, device_name, udid = hdisk.discover_hdisk(
adapter, vio_wrap.uuid, itls)
if device_name is not None and status in [
hdisk.LUAStatus.DEVICE_AVAILABLE,
hdisk.LUAStatus.FOUND_ITL_ERR]:
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
'volume %(volume_id)s. Status code: %(status)s.') %
{'hdisk': device_name, 'vios': vios_name,
{'hdisk': device_name, 'vios': vio_wrap.name,
'volume_id': volume_id, 'status': str(status)})
self._add_mapping(adapter, host_uuid, vm_uuid, vios_uuid,
self._add_mapping(adapter, host_uuid, vm_uuid, vio_wrap.uuid,
device_name)
connection_info['data']['target_UDID'] = udid
self._set_udid(instance, vios_uuid, volume_id, udid)
self._set_udid(instance, vio_wrap.uuid, volume_id, udid)
LOG.info(_LI('Device attached: %s'), device_name)
hdisk_found = True
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
LOG.warn(_LW('Discovered device %(dev)s for volume %(volume)s '
'on %(vios)s is in use Errorcode: %(status)s.'),
{'dev': device_name, 'volume': volume_id,
'vios': vios_name, 'status': str(status)})
'vios': vio_wrap.name, 'status': str(status)})
# A valid hdisk was not found so log and exit
if not hdisk_found:
msg = (_LW('Failed to discover valid hdisk on %(vios)s '
'for volume %(volume_id)s. status: '
'%(status)s.') % {'vios': vios_name,
'volume_id': volume_id,
'status': str(status)})
LOG.warn(msg)
msg = (_LE('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
{'volume_id': volume_id})
LOG.error(msg)
if device_name is None:
device_name = 'None'
ex_args = {'backing_dev': device_name,
@ -171,11 +167,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
try:
# Get VIOS feed
vio_feed_resp = adapter.read(pvm_ms.System.schema_type,
root_id=host_uuid,
child_type=pvm_vios.VIOS.schema_type,
xag=[pvm_vios.VIOS.xags.STORAGE])
vios_feed = pvm_vios.VIOS.wrap(vio_feed_resp)
vios_feed = vios.get_active_vioses(adapter, host_uuid)
# Iterate through host vios list to find hdisks to disconnect.
for vio_wrap in vios_feed: