LPAR-specific SSP ops target VIOSes on same host

When nova is managing an SSP Cluster whose VIOSes span multiple
accessible hosts (e.g. nova is running against a single HMC managing
multiple hosts), it is important that operations specific to a
particular instance - such as creating and removing disk connections -
be invoked only on the VIOS(es) residing on the same host as that
instance.  This change set fixes the SSP Disk Driver to ensure this is
the case.

Ancillary to this change is an expansion and refactoring of the vm
utility to retrieve quick properties for an LPAR.  Invoking the new
vm.get_vm_qp method without the qprop parameter will return a dictionary
of all the quick properties available for the LPAR.

Change-Id: Ib1d02743baf041c9abc69b25201d33e516fa6cb3
This commit is contained in:
Eric Fried 2015-04-15 17:45:54 -05:00
parent 1f7b1c0544
commit ceca475b21
4 changed files with 168 additions and 47 deletions

View File

@ -231,12 +231,12 @@ class TestSSPDiskAdapter(test.TestCase):
def test_vios_uuids(self):
ssp_stor = self._get_ssp_stor()
vios_uuids = ssp_stor._vios_uuids
vios_uuids = ssp_stor._vios_uuids()
self.assertEqual(['58C9EB1D-7213-4956-A011-77D43CC4ACCC',
'6424120D-CA95-437D-9C18-10B06F4B3400'], vios_uuids)
s = set()
for i in range(1000):
u = ssp_stor._any_vios_uuid
u = ssp_stor._any_vios_uuid()
# Make sure we got a good value
self.assertIn(u, vios_uuids)
s.add(u)
@ -244,6 +244,18 @@ class TestSSPDiskAdapter(test.TestCase):
# guaranteed to work, but the odds of failure should be infinitesimal.
self.assertEqual(set(vios_uuids), s)
# Now make sure the host-restricted versions work
vios_uuids = ssp_stor._vios_uuids(
host_uuid='67dca605-3923-34da-bd8f-26a378fc817f')
self.assertEqual(['6424120D-CA95-437D-9C18-10B06F4B3400'], vios_uuids)
s = set()
for i in range(1000):
u = ssp_stor._any_vios_uuid(
host_uuid='67dca605-3923-34da-bd8f-26a378fc817f')
# Make sure we get the good value every time
self.assertIn(u, vios_uuids)
s.add(u)
def test_capacity(self):
ssp_stor = self._get_ssp_stor()
self.assertEqual(49.88, ssp_stor.capacity)
@ -265,7 +277,7 @@ class TestSSPDiskAdapter(test.TestCase):
def verify_upload_new_lu(adap, vios_uuid, ssp1, stream, lu_name,
f_size):
self.assertIn(vios_uuid, ssp_stor._vios_uuids)
self.assertIn(vios_uuid, ssp_stor._vios_uuids())
self.assertEqual(ssp_stor._ssp_wrap, ssp1)
# 'image' + '_' + s/-/_/g(image['id']), per _get_image_name
self.assertEqual('image_image_id', lu_name)
@ -315,11 +327,24 @@ class TestSSPDiskAdapter(test.TestCase):
@mock.patch('pypowervm.wrappers.virtual_io_server.VSCSIMapping.'
'_client_lpar_href')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
def test_connect_disk(self, mock_add_map, mock_href):
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
def test_connect_disk(self, mock_vm_qp, mock_add_map, mock_href):
ms_uuid = '67dca605-3923-34da-bd8f-26a378fc817f'
mock_vm_qp.return_value = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/' + ms_uuid)
def validate_add_vscsi_mapping(adapter, host_uuid, vios_uuid,
lpar_uuid, inlu):
self.assertEqual(ms_uuid, host_uuid)
self.assertEqual('6424120D-CA95-437D-9C18-10B06F4B3400', vios_uuid)
self.assertEqual('lpar_uuid', lpar_uuid)
self.assertEqual(lu, inlu)
mock_add_map.side_effect = validate_add_vscsi_mapping
ssp_stor = self._get_ssp_stor()
lu = ssp_stor._ssp_wrap.logical_units[0]
ssp_stor.connect_disk(None, self.instance, lu, 'lpar_uuid')
self.assertEqual(2, mock_add_map.call_count)
self.assertEqual(1, mock_add_map.call_count)
def test_delete_disks(self):
def _mk_img_lu(idx):
@ -357,35 +382,33 @@ class TestSSPDiskAdapter(test.TestCase):
self.assertEqual(1, self.apt.update_by_path.call_count)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_disconnect_image_disk(self, mock_vm_id, mock_rm_lu_map):
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
def test_disconnect_image_disk(self, mock_vm_qp, mock_rm_lu_map):
ssp_stor = self._get_ssp_stor()
mock_vm_id.return_value = 'lpar_id'
mock_vm_qp.return_value = dict(
PartitionID='lpar_id',
AssociatedManagedSystem='https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f')
def mklu(udid):
lu = pvm_stg.LU.bld('lu_%s' % udid, 1)
lu._udid('27%s' % udid)
return lu
lu1_1 = mklu('abc')
lu1_2 = mklu('abc')
lu2_1 = mklu('def')
lu3_2 = mklu('123')
lu1 = mklu('abc')
lu2 = mklu('def')
def remove_lu_mapping(adapter, vios_uuid, lpar_id, disk_prefixes=None):
"""Mock returning different sets of LUs for each VIOS."""
self.assertEqual(adapter, self.apt)
self.assertEqual('lpar_id', lpar_id)
if vios_uuid == ssp_stor._vios_uuids[0]:
return [lu1_1, lu2_1]
elif vios_uuid == ssp_stor._vios_uuids[1]:
return [lu1_2, lu3_2]
else:
self.fail('Called with invalid VIOS UUID %s' % vios_uuid)
self.assertEqual('6424120D-CA95-437D-9C18-10B06F4B3400', vios_uuid)
return [lu1, lu2]
mock_rm_lu_map.side_effect = remove_lu_mapping
lu_list = ssp_stor.disconnect_image_disk(None, None, None)
self.assertEqual({lu1_1, lu2_1, lu3_2}, set(lu_list))
self.assertEqual({lu1, lu2}, set(lu_list))
def test_shared_stg_calls(self):

View File

@ -259,3 +259,62 @@ class TestVM(test.TestCase):
# Validate (along with validate method above)
self.assertEqual(1, mock_crt_cna.call_count)
def test_get_vm_qp(self):
def adapter_read(root_type, root_id=None, suffix_type=None,
suffix_parm=None):
json_str = (u'{"IsVirtualServiceAttentionLEDOn":"false","Migration'
u'State":"Not_Migrating","CurrentProcessingUnits":0.1,'
u'"ProgressState":null,"PartitionType":"AIX/Linux","Pa'
u'rtitionID":1,"AllocatedVirtualProcessors":1,"Partiti'
u'onState":"not activated","RemoteRestartState":"Inval'
u'id","OperatingSystemVersion":"Unknown","AssociatedMa'
u'nagedSystem":"https://9.1.2.3:12443/rest/api/uom/Man'
u'agedSystem/98498bed-c78a-3a4f-b90a-4b715418fcb6","RM'
u'CState":"inactive","PowerManagementMode":null,"Parti'
u'tionName":"lpar-1-06674231-lpar","HasDedicatedProces'
u'sors":"false","ResourceMonitoringIPAddress":null,"Re'
u'ferenceCode":"00000000","CurrentProcessors":null,"Cu'
u'rrentMemory":512,"SharingMode":"uncapped"}')
self.assertEqual('LogicalPartition', root_type)
self.assertEqual('lpar_uuid', root_id)
self.assertEqual('quick', suffix_type)
resp = mock.MagicMock()
if suffix_parm is None:
resp.body = json_str
elif suffix_parm == 'PartitionID':
resp.body = '1'
elif suffix_parm == 'CurrentProcessingUnits':
resp.body = '0.1'
elif suffix_parm == 'AssociatedManagedSystem':
# The double quotes are important
resp.body = ('"https://9.1.2.3:12443/rest/api/uom/ManagedSyste'
'm/98498bed-c78a-3a4f-b90a-4b715418fcb6"')
else:
self.fail('Unhandled quick property key %s' % suffix_parm)
return resp
ms_href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/98498bed-'
'c78a-3a4f-b90a-4b715418fcb6')
self.apt.read.side_effect = adapter_read
self.assertEqual(1, vm.get_vm_id(self.apt, 'lpar_uuid'))
self.assertEqual(ms_href, vm.get_vm_qp(self.apt, 'lpar_uuid',
'AssociatedManagedSystem'))
self.assertEqual(0.1, vm.get_vm_qp(self.apt, 'lpar_uuid',
'CurrentProcessingUnits'))
qp_dict = vm.get_vm_qp(self.apt, 'lpar_uuid')
self.assertEqual(ms_href, qp_dict['AssociatedManagedSystem'])
self.assertEqual(1, qp_dict['PartitionID'])
self.assertEqual(0.1, qp_dict['CurrentProcessingUnits'])
resp = mock.MagicMock()
resp.status = 404
self.apt.read.side_effect = pvm_exc.Error('message', response=resp)
self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt,
'lpar_uuid')
resp.status = 500
self.apt.read.side_effect = pvm_exc.Error('message', response=resp)
self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
'lpar_uuid')

View File

@ -27,6 +27,7 @@ from nova_powervm.virt.powervm import vm
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
import pypowervm.util as pvm_u
import pypowervm.wrappers.cluster as pvm_clust
import pypowervm.wrappers.storage as pvm_stg
@ -117,12 +118,15 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:return: A list of all the backing storage elements that were
disconnected from the I/O Server and VM.
"""
lpar_id = vm.get_vm_id(self.adapter, lpar_uuid)
lpar_qps = vm.get_vm_qp(self.adapter, lpar_uuid)
lpar_id = lpar_qps['PartitionID']
host_uuid = pvm_u.get_req_path_uuid(
lpar_qps['AssociatedManagedSystem'], preserve_case=True)
lu_set = set()
# The mappings will normally be the same on all VIOSes, unless a VIOS
# was down when a disk was added. So for the return value, we need to
# collect the union of all relevant mappings from all VIOSes.
for vios_uuid in self._vios_uuids:
for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
for lu in tsk_map.remove_lu_mapping(
self.adapter, vios_uuid, lpar_id, disk_prefixes=disk_type):
lu_set.add(lu)
@ -210,7 +214,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Make the image LU only as big as the image.
stream = self._get_image_upload(context, image)
LOG.info(_LI('SSP: Uploading new image LU %s.') % luname)
lu, f_wrap = tsk_stg.upload_new_lu(self.adapter, self._any_vios_uuid,
lu, f_wrap = tsk_stg.upload_new_lu(self.adapter, self._any_vios_uuid(),
ssp, stream, luname, image['size'])
return lu
@ -227,9 +231,14 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Create the LU structure
lu = pvm_stg.LU.bld_ref(disk_info.name, disk_info.udid)
# Add the mapping to *each* VIOS on this host.
for vios_uuid in self._vios_uuids:
tsk_map.add_vscsi_mapping(self.adapter, self.host_uuid, vios_uuid,
# Add the mapping to *each* VIOS on the LPAR's host.
# Note that the LPAR's host is likely to be the same as self.host_uuid,
# but this is safer.
host_href = vm.get_vm_qp(self.adapter, lpar_uuid,
'AssociatedManagedSystem')
host_uuid = pvm_u.get_req_path_uuid(host_href, preserve_case=True)
for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
tsk_map.add_vscsi_mapping(self.adapter, host_uuid, vios_uuid,
lpar_uuid, lu)
def extend_disk(self, context, instance, disk_info, size):
@ -357,20 +366,36 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self._ssp_wrap = self._ssp_wrap.refresh(self.adapter)
return self._ssp_wrap
@property
def _vios_uuids(self):
"""Return a list of the UUIDs of our cluster's VIOSes on this host.
def _vios_uuids(self, host_uuid=None):
"""List the UUIDs of our cluster's VIOSes (on a specific host).
(If a VIOS is not on this host, its URI and therefore its UUID will not
be available in the pypowervm wrapper.)
"""
return [n.vios_uuid for n in self._cluster.nodes]
@property
def _any_vios_uuid(self):
:param host_uuid: Restrict the response to VIOSes residing on the host
with the specified UUID. If None/unspecified, VIOSes
on all hosts are included.
:return: A list of VIOS UUID strings.
"""
ret = []
for n in self._cluster.nodes:
if host_uuid:
node_host_uuid = pvm_u.get_req_path_uuid(
n.vios_uri, preserve_case=True, root=True)
if host_uuid != node_host_uuid:
continue
ret.append(n.vios_uuid)
return ret
def _any_vios_uuid(self, host_uuid=None):
"""Pick one of the Cluster's VIOSes and return its UUID.
Use when it doesn't matter which VIOS an operation is invoked against.
Currently picks at random; may later be changed to use round-robin.
:param host_uuid: Restrict the response to VIOSes residing on the host
with the specified UUID. If None/unspecified, VIOSes
on all hosts are included.
:return: A single VIOS UUID string.
"""
return random.choice(self._vios_uuids)
return random.choice(self._vios_uuids(host_uuid=host_uuid))

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_config import cfg
from oslo_log import log as logging
@ -142,18 +144,7 @@ class InstanceInfo(hardware.InstanceInfo):
self.id = uuid
def _get_property(self, q_prop):
try:
resp = self._adapter.read(
pvm_lpar.LPAR.schema_type, root_id=self._uuid,
suffix_type='quick', suffix_parm=q_prop)
except pvm_exc.Error as e:
if e.response.status == 404:
raise exception.InstanceNotFound(instance_id=self._name)
else:
LOG.exception(e)
raise
return resp.body.lstrip(' "').rstrip(' "')
return get_vm_qp(self._adapter, self._uuid, q_prop)
@property
def state(self):
@ -310,8 +301,31 @@ def get_vm_id(adapter, lpar_uuid):
:param lpar_uuid: The UUID for the LPAR.
:returns: The system id (an integer value).
"""
return adapter.read(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid,
suffix_type='quick', suffix_parm='PartitionID').body
return get_vm_qp(adapter, lpar_uuid, qprop='PartitionID')
def get_vm_qp(adapter, lpar_uuid, qprop=None):
"""Returns one or all quick properties of an LPAR.
:param adapter: The pypowervm adapter.
:param lpar_uuid: The (powervm) UUID for the LPAR.
:param qprop: The quick property key to return. If specified, that single
property value is returned. If None/unspecified, all quick
properties are returned in a dictionary.
:return: Either a single quick property value or a dictionary of all quick
properties.
"""
try:
resp = adapter.read(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid,
suffix_type='quick', suffix_parm=qprop)
except pvm_exc.Error as e:
if e.response.status == 404:
raise exception.InstanceNotFound(instance_id=lpar_uuid)
else:
LOG.exception(e)
raise
return json.loads(resp.body)
def _crt_lpar_builder(host_wrapper, instance, flavor):