SSP: Only consider this host's VIOSes

Previously, the SSP disk driver was gleaning the list of VIOS UUIDs from
the list of Nodes in the Cluster, and optionally restricting that list
to VIOSes from a specified host.

It will almost never be useful to know about VIOSes residing on a
different host.  Any attempt to talk to those VIOSes through the PowerVM
API will fail, as we don't have a pypowervm connection to that host.

Therefore this change set simplifies the retrieval of VIOS UUIDs,
eliminating the host_uuid option and restricting the result to VIOSes on
the current host.

Change-Id: I2fe1e93b7039383889e56b809d5348ee2d5222a9
This commit is contained in:
Eric Fried 2015-05-18 17:11:32 -05:00
parent 1b649cc221
commit 8ee482c82a
3 changed files with 59 additions and 66 deletions

View File

@ -88,6 +88,23 @@ BODY{
<VirtualIOServerLevel kb="CUD" kxe="false">2.2.4.0</VirtualIOServerLevel>
<VirtualIOServer kb="CUD" kxe="false" href="https://9.1.2.3:12443/rest/api/uom/ManagedSystem/67dca605-3923-34da-bd8f-26a378fc817f/VirtualIOServer/6424120D-CA95-437D-9C18-10B06F4B3400" rel="related"/>
</Node>
<Node schemaVersion="V1_2_0">
<Metadata>
<Atom/>
</Metadata>
<HostName kb="CUD" kxe="false">bar.example.com</HostName>
<PartitionID kb="CUD" kxe="false">3</PartitionID>
<MachineTypeModelAndSerialNumber kxe="false" kb="CUD" schemaVersion="V1_2_0">
<Metadata>
<Atom/>
</Metadata>
<MachineType kb="CUR" kxe="false">8247</MachineType>
<Model kxe="false" kb="CUR">22L</Model>
<SerialNumber kb="CUR" kxe="false">2125D0A</SerialNumber>
</MachineTypeModelAndSerialNumber>
<VirtualIOServerLevel kb="CUD" kxe="false">2.2.4.0</VirtualIOServerLevel>
<VirtualIOServer kb="CUD" kxe="false" href="https://9.1.2.3:12443/rest/api/uom/ManagedSystem/67dca605-3923-34da-bd8f-26a378fc817f/VirtualIOServer/10B06F4B-437D-9C18-CA95-34006424120D" rel="related"/>
</Node>
</Node>
</Cluster:Cluster>
</content>

View File

@ -109,8 +109,9 @@ class TestSSPDiskAdapter(test.TestCase):
cfg.CONF.set_override('cluster_name', 'clust1')
def _get_ssp_stor(self):
ssp_stor = ssp.SSPDiskAdapter({'adapter': self.apt,
'host_uuid': 'host_uuid'})
ssp_stor = ssp.SSPDiskAdapter(
{'adapter': self.apt,
'host_uuid': '67dca605-3923-34da-bd8f-26a378fc817f'})
return ssp_stor
def _bld_resp(self, status=200, entry_or_list=None):
@ -234,9 +235,10 @@ class TestSSPDiskAdapter(test.TestCase):
def test_vios_uuids(self):
ssp_stor = self._get_ssp_stor()
vios_uuids = ssp_stor._vios_uuids()
self.assertEqual(['58C9EB1D-7213-4956-A011-77D43CC4ACCC',
'6424120D-CA95-437D-9C18-10B06F4B3400'], vios_uuids)
vios_uuids = ssp_stor.vios_uuids
self.assertEqual({'10B06F4B-437D-9C18-CA95-34006424120D',
'6424120D-CA95-437D-9C18-10B06F4B3400'},
set(vios_uuids))
s = set()
for i in range(1000):
u = ssp_stor._any_vios_uuid()
@ -247,32 +249,22 @@ class TestSSPDiskAdapter(test.TestCase):
# guaranteed to work, but the odds of failure should be infinitesimal.
self.assertEqual(set(vios_uuids), s)
# Now make sure the host-restricted versions work
vios_uuids = ssp_stor._vios_uuids(
host_uuid='67dca605-3923-34da-bd8f-26a378fc817f')
self.assertEqual(['6424120D-CA95-437D-9C18-10B06F4B3400'], vios_uuids)
s = set()
for i in range(1000):
u = ssp_stor._any_vios_uuid(
host_uuid='67dca605-3923-34da-bd8f-26a378fc817f')
# Make sure we get the good value every time
self.assertIn(u, vios_uuids)
s.add(u)
# Test VIOSs on other nodes, which won't have uuid or url
# Test VIOSes on other nodes, which won't have uuid or url
with mock.patch.object(ssp_stor, '_cluster') as mock_clust:
def mock_node(uuid, uri):
node = mock.MagicMock()
node.vios_uuid = uuid
node.vios_uri = uri
return node
node1 = mock_node(None, 'uri')
uri = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/67dca605-'
'3923-34da-bd8f-26a378fc817f/VirtualIOServer/10B06F4B-437D-'
'9C18-CA95-34006424120D')
node1 = mock_node(None, uri)
node2 = mock_node('2', None)
# This mock is good and should be returned
node3 = mock_node('3', 'uri')
node3 = mock_node('3', uri)
mock_clust.nodes = [node1, node2, node3]
vios_uuids = ssp_stor._vios_uuids()
self.assertEqual(['3'], vios_uuids)
self.assertEqual(['3'], ssp_stor.vios_uuids)
def test_capacity(self):
ssp_stor = self._get_ssp_stor()
@ -294,7 +286,7 @@ class TestSSPDiskAdapter(test.TestCase):
img = dict(name='image-name', id='image-id', size=b2G)
def verify_upload_new_lu(vios_uuid, ssp1, stream, lu_name, f_size):
self.assertIn(vios_uuid, ssp_stor._vios_uuids())
self.assertIn(vios_uuid, ssp_stor.vios_uuids)
self.assertEqual(ssp_stor._ssp_wrap, ssp1)
# 'image' + '_' + s/-/_/g(image['id']), per _get_image_name
self.assertEqual('image_image_name', lu_name)
@ -343,15 +335,13 @@ class TestSSPDiskAdapter(test.TestCase):
@mock.patch('pypowervm.wrappers.virtual_io_server.VSCSIMapping.'
'_client_lpar_href')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
def test_connect_disk(self, mock_vm_qp, mock_add_map, mock_href):
def test_connect_disk(self, mock_add_map, mock_href):
ms_uuid = '67dca605-3923-34da-bd8f-26a378fc817f'
mock_vm_qp.return_value = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/' + ms_uuid)
def validate_add_vscsi_mapping(host_uuid, vios_uuid, lpar_uuid, inlu):
self.assertEqual(ms_uuid, host_uuid)
self.assertEqual('6424120D-CA95-437D-9C18-10B06F4B3400', vios_uuid)
self.assertIn(vios_uuid, ('6424120D-CA95-437D-9C18-10B06F4B3400',
'10B06F4B-437D-9C18-CA95-34006424120D'))
self.assertEqual('lpar_uuid', lpar_uuid)
self.assertEqual(lu, inlu)
mock_add_map.side_effect = validate_add_vscsi_mapping
@ -359,7 +349,7 @@ class TestSSPDiskAdapter(test.TestCase):
ssp_stor = self._get_ssp_stor()
lu = ssp_stor._ssp_wrap.logical_units[0]
ssp_stor.connect_disk(None, self.instance, lu, 'lpar_uuid')
self.assertEqual(1, mock_add_map.call_count)
self.assertEqual(2, mock_add_map.call_count)
def test_delete_disks(self):
def _mk_img_lu(idx):
@ -399,14 +389,10 @@ class TestSSPDiskAdapter(test.TestCase):
self.assertEqual(1, self.apt.update_by_path.call_count)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
def test_disconnect_image_disk(self, mock_vm_qp, mock_rm_lu_map):
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_disconnect_image_disk(self, mock_vm_id, mock_rm_lu_map):
ssp_stor = self._get_ssp_stor()
mock_vm_qp.return_value = dict(
PartitionID='lpar_id',
AssociatedManagedSystem='https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f')
mock_vm_id.return_value = 'lpar_id'
def mklu(udid):
lu = pvm_stg.LU.bld(None, 'lu_%s' % udid, 1)
@ -420,7 +406,8 @@ class TestSSPDiskAdapter(test.TestCase):
"""Mock returning different sets of LUs for each VIOS."""
self.assertEqual(adapter, self.apt)
self.assertEqual('lpar_id', lpar_id)
self.assertEqual('6424120D-CA95-437D-9C18-10B06F4B3400', vios_uuid)
self.assertIn(vios_uuid, ('6424120D-CA95-437D-9C18-10B06F4B3400',
'10B06F4B-437D-9C18-CA95-34006424120D'))
return [lu1, lu2]
mock_rm_lu_map.side_effect = remove_lu_mapping

View File

@ -118,15 +118,12 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:return: A list of all the backing storage elements that were
disconnected from the I/O Server and VM.
"""
lpar_qps = vm.get_vm_qp(self.adapter, lpar_uuid)
lpar_id = lpar_qps['PartitionID']
host_uuid = pvm_u.get_req_path_uuid(
lpar_qps['AssociatedManagedSystem'], preserve_case=True)
lpar_id = vm.get_vm_id(self.adapter, lpar_uuid)
lu_set = set()
# The mappings will normally be the same on all VIOSes, unless a VIOS
# was down when a disk was added. So for the return value, we need to
# collect the union of all relevant mappings from all VIOSes.
for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
for vios_uuid in self.vios_uuids:
for lu in tsk_map.remove_lu_mapping(
self.adapter, vios_uuid, lpar_id, disk_prefixes=disk_type):
lu_set.add(lu)
@ -231,13 +228,10 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
lu = pvm_stg.LU.bld_ref(self.adapter, disk_info.name, disk_info.udid)
# Add the mapping to *each* VIOS on the LPAR's host.
# Note that the LPAR's host is likely to be the same as self.host_uuid,
# but this is safer.
host_href = vm.get_vm_qp(self.adapter, lpar_uuid,
'AssociatedManagedSystem')
host_uuid = pvm_u.get_req_path_uuid(host_href, preserve_case=True)
for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, lpar_uuid, lu)
# The LPAR's host has to be self.host_uuid, else the PowerVM API will
# fail.
for vios_uuid in self.vios_uuids:
tsk_map.add_vscsi_mapping(self.host_uuid, vios_uuid, lpar_uuid, lu)
def extend_disk(self, context, instance, disk_info, size):
"""Extends the disk.
@ -364,15 +358,14 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self._ssp_wrap = self._ssp_wrap.refresh()
return self._ssp_wrap
def _vios_uuids(self, host_uuid=None):
"""List the UUIDs of our cluster's VIOSes (on a specific host).
@property
def vios_uuids(self):
"""List the UUIDs of our cluster's VIOSes on this host.
(If a VIOS is not on this host, its URI and therefore its UUID will not
be available in the pypowervm wrapper.)
(If a VIOS is not on this host, we can't interact with it, even if its
URI and therefore its UUID happen to be available in the pypowervm
wrapper.)
:param host_uuid: Restrict the response to VIOSes residing on the host
with the specified UUID. If None/unspecified, VIOSes
on all hosts are included.
:return: A list of VIOS UUID strings.
"""
ret = []
@ -380,23 +373,19 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Skip any nodes that we don't have the vios uuid or uri
if not (n.vios_uuid and n.vios_uri):
continue
if host_uuid:
node_host_uuid = pvm_u.get_req_path_uuid(
n.vios_uri, preserve_case=True, root=True)
if host_uuid != node_host_uuid:
continue
node_host_uuid = pvm_u.get_req_path_uuid(
n.vios_uri, preserve_case=True, root=True)
if self.host_uuid != node_host_uuid:
continue
ret.append(n.vios_uuid)
return ret
def _any_vios_uuid(self, host_uuid=None):
def _any_vios_uuid(self):
"""Pick one of the Cluster's VIOSes and return its UUID.
Use when it doesn't matter which VIOS an operation is invoked against.
Currently picks at random; may later be changed to use round-robin.
:param host_uuid: Restrict the response to VIOSes residing on the host
with the specified UUID. If None/unspecified, VIOSes
on all hosts are included.
:return: A single VIOS UUID string.
"""
return random.choice(self._vios_uuids(host_uuid=host_uuid))
return random.choice(self.vios_uuids)