PowerVM Driver: vSCSI Fibre Channel volume adapter
This change adds vSCSI Fibre Channel volume support via cinder for the PowerVM virt driver. Attach, detach, and extend are the supported volume operations by the PowerVM vSCSI FC adapter. PowerVM CI volume tests are run on-demand only which can be done by leaving a comment with "powervm:volume-check". Blueprint: powervm-vscsi Change-Id: I632993abe70f9f98a032a35891b690db15ded6a0
This commit is contained in:
parent
d78055df0e
commit
e997ca68b3
@ -55,3 +55,13 @@ processor, whereas 0.05 means 1/20th of a physical processor. E.g.:
|
||||
|
||||
[powervm]
|
||||
proc_units_factor = 0.1
|
||||
|
||||
|
||||
Volume Support
|
||||
--------------
|
||||
Volume support is provided for the PowerVM virt driver via Cinder. Currently,
|
||||
the only supported volume protocol is `vSCSI`_ Fibre Channel. Attach, detach,
|
||||
and extend are the operations supported by the PowerVM vSCSI FC volume adapter.
|
||||
Boot from volume is not yet supported.
|
||||
|
||||
.. _vSCSI: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm
|
||||
|
@ -176,7 +176,9 @@ xenserver=complete
|
||||
vmware=complete
|
||||
hyperv=complete
|
||||
ironic=missing
|
||||
powervm=missing
|
||||
powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[operation.server-bdm]
|
||||
title=Custom disk configurations on boot
|
||||
|
@ -111,7 +111,9 @@ driver-impl-hyperv=complete
|
||||
driver-impl-ironic=missing
|
||||
driver-impl-libvirt-vz-vm=complete
|
||||
driver-impl-libvirt-vz-ct=missing
|
||||
driver-impl-powervm=missing
|
||||
driver-impl-powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[operation.attach-tagged-volume]
|
||||
title=Attach tagged block device to instance
|
||||
@ -152,7 +154,9 @@ driver-impl-hyperv=complete
|
||||
driver-impl-ironic=missing
|
||||
driver-impl-libvirt-vz-vm=complete
|
||||
driver-impl-libvirt-vz-ct=missing
|
||||
driver-impl-powervm=missing
|
||||
driver-impl-powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[operation.extend-volume]
|
||||
title=Extend block volume attached to instance
|
||||
@ -179,7 +183,9 @@ driver-impl-hyperv=missing
|
||||
driver-impl-ironic=missing
|
||||
driver-impl-libvirt-vz-vm=unknown
|
||||
driver-impl-libvirt-vz-ct=missing
|
||||
driver-impl-powervm=missing
|
||||
driver-impl-powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[operation.attach-interface]
|
||||
title=Attach virtual network interface to instance
|
||||
@ -1113,7 +1119,9 @@ driver-impl-hyperv=complete
|
||||
driver-impl-ironic=complete
|
||||
driver-impl-libvirt-vz-vm=partial
|
||||
driver-impl-libvirt-vz-ct=missing
|
||||
driver-impl-powervm=missing
|
||||
driver-impl-powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[storage.block.backend.fibrechannel]
|
||||
title=Block storage over fibre channel
|
||||
@ -1136,7 +1144,9 @@ driver-impl-hyperv=complete
|
||||
driver-impl-ironic=missing
|
||||
driver-impl-libvirt-vz-vm=complete
|
||||
driver-impl-libvirt-vz-ct=missing
|
||||
driver-impl-powervm=missing
|
||||
driver-impl-powervm=complete
|
||||
driver-notes-powervm=This is not tested for every CI run. Add a
|
||||
"powervm:volume-check" comment to trigger a CI job running volume tests.
|
||||
|
||||
[storage.block.backend.iscsi]
|
||||
title=Block storage over iSCSI
|
||||
|
@ -255,6 +255,11 @@ class VolumeAttachFailed(Invalid):
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class VolumeDetachFailed(Invalid):
|
||||
msg_fmt = _("Volume %(volume_id)s could not be detached. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class MultiattachNotSupportedByVirtDriver(NovaException):
|
||||
# This exception indicates the compute hosting the instance does not
|
||||
# support multiattach volumes. This should generally be considered a
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2015, 2017 IBM Corp.
|
||||
# Copyright 2015, 2018 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -320,3 +320,35 @@ class TestStorage(test.NoDBTestCase):
|
||||
tf.assert_called_once_with(
|
||||
name='remove_inst_disk_from_mgmt',
|
||||
requires=['stg_elem', 'vios_wrap', 'disk_path'])
|
||||
|
||||
def test_attach_volume(self):
|
||||
vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
|
||||
|
||||
task = tf_stg.AttachVolume(vol_dvr)
|
||||
task.execute()
|
||||
vol_dvr.attach_volume.assert_called_once_with()
|
||||
|
||||
task.revert('result', 'flow failures')
|
||||
vol_dvr.reset_stg_ftsk.assert_called_once_with()
|
||||
vol_dvr.detach_volume.assert_called_once_with()
|
||||
|
||||
# Validate args on taskflow.task.Task instantiation
|
||||
with mock.patch('taskflow.task.Task.__init__') as tf:
|
||||
tf_stg.AttachVolume(vol_dvr)
|
||||
tf.assert_called_once_with(name='attach_vol_1')
|
||||
|
||||
def test_detach_volume(self):
|
||||
vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
|
||||
|
||||
task = tf_stg.DetachVolume(vol_dvr)
|
||||
task.execute()
|
||||
vol_dvr.detach_volume.assert_called_once_with()
|
||||
|
||||
task.revert('result', 'flow failures')
|
||||
vol_dvr.reset_stg_ftsk.assert_called_once_with()
|
||||
vol_dvr.detach_volume.assert_called_once_with()
|
||||
|
||||
# Validate args on taskflow.task.Task instantiation
|
||||
with mock.patch('taskflow.task.Task.__init__') as tf:
|
||||
tf_stg.DetachVolume(vol_dvr)
|
||||
tf.assert_called_once_with(name='detach_vol_1')
|
||||
|
@ -16,6 +16,7 @@ from __future__ import absolute_import
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
from oslo_serialization import jsonutils
|
||||
from pypowervm import const as pvm_const
|
||||
from pypowervm import exceptions as pvm_exc
|
||||
from pypowervm.helpers import log_helper as pvm_hlp_log
|
||||
@ -24,14 +25,21 @@ from pypowervm.utils import transaction as pvm_tx
|
||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||
import six
|
||||
|
||||
from nova import block_device as nova_block_device
|
||||
from nova import conf as cfg
|
||||
from nova import exception
|
||||
from nova.objects import block_device as bdmobj
|
||||
from nova import test
|
||||
from nova.tests.unit.virt import powervm
|
||||
from nova.virt import block_device as nova_virt_bdm
|
||||
from nova.virt import driver as nova_driver
|
||||
from nova.virt.driver import ComputeDriver
|
||||
from nova.virt import hardware
|
||||
from nova.virt.powervm.disk import ssp
|
||||
from nova.virt.powervm import driver
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestPowerVMDriver(test.NoDBTestCase):
|
||||
|
||||
@ -68,7 +76,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
self.drv.capabilities['supports_tagged_attach_interface'])
|
||||
self.assertFalse(
|
||||
self.drv.capabilities['supports_tagged_attach_volume'])
|
||||
self.assertFalse(self.drv.capabilities['supports_extend_volume'])
|
||||
self.assertTrue(self.drv.capabilities['supports_extend_volume'])
|
||||
self.assertFalse(self.drv.capabilities['supports_multiattach'])
|
||||
|
||||
@mock.patch('nova.image.API')
|
||||
@ -134,6 +142,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
mock_bhrfm.assert_called_once_with('sys')
|
||||
self.assertEqual('sys', self.drv.host_wrapper)
|
||||
|
||||
@mock.patch('nova.virt.powervm.tasks.storage.AttachVolume.execute')
|
||||
@mock.patch('nova.virt.powervm.tasks.network.PlugMgmtVif.execute')
|
||||
@mock.patch('nova.virt.powervm.tasks.network.PlugVifs.execute')
|
||||
@mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
|
||||
@ -145,7 +154,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
autospec=True)
|
||||
def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar,
|
||||
mock_cdrb, mock_cfg_drv, mock_plug_vifs,
|
||||
mock_plug_mgmt_vif):
|
||||
mock_plug_mgmt_vif, mock_attach_vol):
|
||||
"""Validates the 'typical' spawn flow of the spawn of an instance. """
|
||||
mock_cdrb.return_value = True
|
||||
self.drv.host_wrapper = mock.Mock()
|
||||
@ -153,8 +162,10 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
instance=True)
|
||||
mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
|
||||
mock_bldftsk.return_value = mock_ftsk
|
||||
block_device_info = self._fake_bdms()
|
||||
self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
|
||||
'allocs', network_info='netinfo')
|
||||
'allocs', network_info='netinfo',
|
||||
block_device_info=block_device_info)
|
||||
mock_crt_lpar.assert_called_once_with(
|
||||
self.adp, self.drv.host_wrapper, self.inst)
|
||||
mock_bldftsk.assert_called_once_with(
|
||||
@ -168,6 +179,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
self.drv.disk_dvr.attach_disk.assert_called_once_with(
|
||||
self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
|
||||
mock_ftsk)
|
||||
self.assertEqual(2, mock_attach_vol.call_count)
|
||||
mock_cfg_drv.assert_called_once_with(self.adp)
|
||||
mock_cfg_drv.return_value.create_cfg_drv_vopt.assert_called_once_with(
|
||||
self.inst, 'files', 'netinfo', mock_ftsk, admin_pass='password',
|
||||
@ -175,13 +187,16 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
self.pwron.assert_called_once_with(self.adp, self.inst)
|
||||
|
||||
mock_cfg_drv.reset_mock()
|
||||
mock_attach_vol.reset_mock()
|
||||
|
||||
# No config drive
|
||||
# No config drive, no bdms
|
||||
mock_cdrb.return_value = False
|
||||
self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
|
||||
'allocs')
|
||||
mock_cfg_drv.assert_not_called()
|
||||
mock_attach_vol.assert_not_called()
|
||||
|
||||
@mock.patch('nova.virt.powervm.tasks.storage.DetachVolume.execute')
|
||||
@mock.patch('nova.virt.powervm.tasks.network.UnplugVifs.execute')
|
||||
@mock.patch('nova.virt.powervm.vm.delete_lpar')
|
||||
@mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
|
||||
@ -189,7 +204,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
@mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
|
||||
autospec=True)
|
||||
def test_destroy(self, mock_bldftsk, mock_cdrb, mock_cfgdrv,
|
||||
mock_dlt_lpar, mock_unplug):
|
||||
mock_dlt_lpar, mock_unplug, mock_detach_vol):
|
||||
"""Validates PowerVM destroy."""
|
||||
self.drv.host_wrapper = mock.Mock()
|
||||
self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
|
||||
@ -197,10 +212,12 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
|
||||
mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
|
||||
mock_bldftsk.return_value = mock_ftsk
|
||||
block_device_info = self._fake_bdms()
|
||||
|
||||
# Good path, with config drive, destroy disks
|
||||
mock_cdrb.return_value = True
|
||||
self.drv.destroy('context', self.inst, [], block_device_info={})
|
||||
self.drv.destroy('context', self.inst, [],
|
||||
block_device_info=block_device_info)
|
||||
self.pwroff.assert_called_once_with(
|
||||
self.adp, self.inst, force_immediate=True)
|
||||
mock_bldftsk.assert_called_once_with(
|
||||
@ -210,6 +227,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
mock_cfgdrv.assert_called_once_with(self.adp)
|
||||
mock_cfgdrv.return_value.dlt_vopt.assert_called_once_with(
|
||||
self.inst, stg_ftsk=mock_bldftsk.return_value)
|
||||
self.assertEqual(2, mock_detach_vol.call_count)
|
||||
self.drv.disk_dvr.detach_disk.assert_called_once_with(
|
||||
self.inst)
|
||||
self.drv.disk_dvr.delete_disks.assert_called_once_with(
|
||||
@ -223,13 +241,15 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
mock_cfgdrv.reset_mock()
|
||||
self.drv.disk_dvr.detach_disk.reset_mock()
|
||||
self.drv.disk_dvr.delete_disks.reset_mock()
|
||||
mock_detach_vol.reset_mock()
|
||||
mock_dlt_lpar.reset_mock()
|
||||
|
||||
# No config drive, preserve disks
|
||||
# No config drive, preserve disks, no block device info
|
||||
mock_cdrb.return_value = False
|
||||
self.drv.destroy('context', self.inst, [], block_device_info={},
|
||||
destroy_disks=False)
|
||||
mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
|
||||
mock_detach_vol.assert_not_called()
|
||||
self.drv.disk_dvr.delete_disks.assert_not_called()
|
||||
|
||||
# Non-forced power_off, since preserving disks
|
||||
@ -428,3 +448,93 @@ class TestPowerVMDriver(test.NoDBTestCase):
|
||||
def test_deallocate_networks_on_reschedule(self):
|
||||
candeallocate = self.drv.deallocate_networks_on_reschedule(mock.Mock())
|
||||
self.assertTrue(candeallocate)
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
|
||||
def test_attach_volume(self, mock_vscsi_adpt):
|
||||
"""Validates the basic PowerVM attach volume."""
|
||||
# BDMs
|
||||
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
|
||||
|
||||
with mock.patch.object(self.inst, 'save') as mock_save:
|
||||
# Invoke the method.
|
||||
self.drv.attach_volume('context', mock_bdm.get('connection_info'),
|
||||
self.inst, mock.sentinel.stg_ftsk)
|
||||
|
||||
# Verify the connect volume was invoked
|
||||
mock_vscsi_adpt.return_value.attach_volume.assert_called_once_with()
|
||||
mock_save.assert_called_once_with()
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
|
||||
def test_detach_volume(self, mock_vscsi_adpt):
|
||||
"""Validates the basic PowerVM detach volume."""
|
||||
# BDMs
|
||||
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
|
||||
|
||||
# Invoke the method, good path test.
|
||||
self.drv.detach_volume('context', mock_bdm.get('connection_info'),
|
||||
self.inst, mock.sentinel.stg_ftsk)
|
||||
# Verify the disconnect volume was invoked
|
||||
mock_vscsi_adpt.return_value.detach_volume.assert_called_once_with()
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
|
||||
def test_extend_volume(self, mock_vscsi_adpt):
|
||||
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
|
||||
self.drv.extend_volume(mock_bdm.get('connection_info'), self.inst)
|
||||
mock_vscsi_adpt.return_value.extend_volume.assert_called_once_with()
|
||||
|
||||
def test_vol_drv_iter(self):
|
||||
block_device_info = self._fake_bdms()
|
||||
bdms = nova_driver.block_device_info_get_mapping(block_device_info)
|
||||
vol_adpt = mock.Mock()
|
||||
|
||||
def _get_results(bdms):
|
||||
# Patch so we get the same mock back each time.
|
||||
with mock.patch('nova.virt.powervm.volume.fcvscsi.'
|
||||
'FCVscsiVolumeAdapter', return_value=vol_adpt):
|
||||
return [
|
||||
(bdm, vol_drv) for bdm, vol_drv in self.drv._vol_drv_iter(
|
||||
'context', self.inst, bdms)]
|
||||
|
||||
results = _get_results(bdms)
|
||||
self.assertEqual(
|
||||
'fake_vol1',
|
||||
results[0][0]['connection_info']['data']['volume_id'])
|
||||
self.assertEqual(vol_adpt, results[0][1])
|
||||
self.assertEqual(
|
||||
'fake_vol2',
|
||||
results[1][0]['connection_info']['data']['volume_id'])
|
||||
self.assertEqual(vol_adpt, results[1][1])
|
||||
|
||||
# Test with empty bdms
|
||||
self.assertEqual([], _get_results([]))
|
||||
|
||||
@staticmethod
|
||||
def _fake_bdms():
|
||||
def _fake_bdm(volume_id, target_lun):
|
||||
connection_info = {'driver_volume_type': 'fibre_channel',
|
||||
'data': {'volume_id': volume_id,
|
||||
'target_lun': target_lun,
|
||||
'initiator_target_map':
|
||||
{'21000024F5': ['50050768']}}}
|
||||
mapping_dict = {'source_type': 'volume', 'volume_id': volume_id,
|
||||
'destination_type': 'volume',
|
||||
'connection_info':
|
||||
jsonutils.dumps(connection_info),
|
||||
}
|
||||
bdm_dict = nova_block_device.BlockDeviceDict(mapping_dict)
|
||||
bdm_obj = bdmobj.BlockDeviceMapping(**bdm_dict)
|
||||
|
||||
return nova_virt_bdm.DriverVolumeBlockDevice(bdm_obj)
|
||||
|
||||
bdm_list = [_fake_bdm('fake_vol1', 0), _fake_bdm('fake_vol2', 1)]
|
||||
block_device_info = {'block_device_mapping': bdm_list}
|
||||
|
||||
return block_device_info
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.wwpns', autospec=True)
|
||||
def test_get_volume_connector(self, mock_wwpns):
|
||||
vol_connector = self.drv.get_volume_connector(mock.Mock())
|
||||
self.assertEqual(mock_wwpns.return_value, vol_connector['wwpns'])
|
||||
self.assertFalse(vol_connector['multipath'])
|
||||
self.assertEqual(vol_connector['host'], CONF.host)
|
||||
self.assertIsNone(vol_connector['initiator'])
|
||||
|
0
nova/tests/unit/virt/powervm/volume/__init__.py
Normal file
0
nova/tests/unit/virt/powervm/volume/__init__.py
Normal file
456
nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
Normal file
456
nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
Normal file
@ -0,0 +1,456 @@
|
||||
# Copyright 2015, 2018 IBM Corp.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from pypowervm import const as pvm_const
|
||||
from pypowervm.tasks import hdisk
|
||||
from pypowervm.tests import test_fixtures as pvm_fx
|
||||
from pypowervm.utils import transaction as pvm_tx
|
||||
from pypowervm.wrappers import storage as pvm_stor
|
||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||
|
||||
from nova import conf as cfg
|
||||
from nova import exception as exc
|
||||
from nova import test
|
||||
from nova.virt.powervm.volume import fcvscsi
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
I_WWPN_1 = '21000024FF649104'
|
||||
I_WWPN_2 = '21000024FF649105'
|
||||
|
||||
|
||||
class TestVSCSIAdapter(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestVSCSIAdapter, self).setUp()
|
||||
|
||||
self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
|
||||
self.wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
|
||||
self.ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
|
||||
self.ftsk.configure_mock(wrapper_tasks={'vios_uuid': self.wtsk})
|
||||
|
||||
@mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
|
||||
def init_vol_adpt(mock_pvm_uuid):
|
||||
con_info = {
|
||||
'serial': 'id',
|
||||
'data': {
|
||||
'initiator_target_map': {
|
||||
I_WWPN_1: ['t1'],
|
||||
I_WWPN_2: ['t2', 't3']
|
||||
},
|
||||
'target_lun': '1',
|
||||
'volume_id': 'a_volume_identifier',
|
||||
},
|
||||
}
|
||||
mock_inst = mock.MagicMock()
|
||||
mock_pvm_uuid.return_value = '1234'
|
||||
|
||||
return fcvscsi.FCVscsiVolumeAdapter(
|
||||
self.adpt, mock_inst, con_info, stg_ftsk=self.ftsk)
|
||||
self.vol_drv = init_vol_adpt()
|
||||
|
||||
@mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
|
||||
def test_reset_stg_ftsk(self, mock_vios, mock_ftsk):
|
||||
self.vol_drv.reset_stg_ftsk('stg_ftsk')
|
||||
self.assertEqual('stg_ftsk', self.vol_drv.stg_ftsk)
|
||||
|
||||
mock_vios.getter.return_value = 'getter'
|
||||
mock_ftsk.return_value = 'local_feed_task'
|
||||
self.vol_drv.reset_stg_ftsk()
|
||||
self.assertEqual('local_feed_task', self.vol_drv.stg_ftsk)
|
||||
mock_vios.getter.assert_called_once_with(
|
||||
self.adpt, xag=[pvm_const.XAG.VIO_SMAP])
|
||||
mock_ftsk.assert_called_once_with('local_feed_task', 'getter')
|
||||
|
||||
@mock.patch('pypowervm.tasks.partition.get_physical_wwpns', autospec=True)
|
||||
def test_wwpns(self, mock_vio_wwpns):
|
||||
mock_vio_wwpns.return_value = ['aa', 'bb']
|
||||
wwpns = fcvscsi.wwpns(self.adpt)
|
||||
self.assertListEqual(['aa', 'bb'], wwpns)
|
||||
mock_vio_wwpns.assert_called_once_with(self.adpt, force_refresh=False)
|
||||
|
||||
def test_set_udid(self):
|
||||
# Mock connection info
|
||||
self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = None
|
||||
|
||||
# Set the UDID
|
||||
self.vol_drv._set_udid('udid')
|
||||
|
||||
# Verify
|
||||
self.assertEqual('udid',
|
||||
self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY])
|
||||
|
||||
def test_get_udid(self):
|
||||
# Set the value to retrieve
|
||||
self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = 'udid'
|
||||
retrieved_udid = self.vol_drv._get_udid()
|
||||
# Check key found
|
||||
self.assertEqual('udid', retrieved_udid)
|
||||
|
||||
# Check key not found
|
||||
self.vol_drv.connection_info['data'].pop(fcvscsi.UDID_KEY)
|
||||
retrieved_udid = self.vol_drv._get_udid()
|
||||
# Check key not found
|
||||
self.assertIsNone(retrieved_udid)
|
||||
|
||||
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
|
||||
@mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
|
||||
def test_attach_volume(self, mock_feed_task, mock_get_wrap):
|
||||
mock_lpar_wrap = mock.MagicMock()
|
||||
mock_lpar_wrap.can_modify_io.return_value = True, None
|
||||
mock_get_wrap.return_value = mock_lpar_wrap
|
||||
mock_attach_ftsk = mock_feed_task.return_value
|
||||
|
||||
# Pass if all vioses modified
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
|
||||
'vios2': {'vio_modified': True}}}
|
||||
mock_attach_ftsk.execute.return_value = mock_ret
|
||||
self.vol_drv.attach_volume()
|
||||
mock_feed_task.assert_called_once()
|
||||
mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
|
||||
self.vol_drv._attach_volume_to_vio, provides='vio_modified',
|
||||
flag_update=False)
|
||||
mock_attach_ftsk.execute.assert_called_once()
|
||||
self.ftsk.execute.assert_called_once()
|
||||
|
||||
mock_feed_task.reset_mock()
|
||||
mock_attach_ftsk.reset_mock()
|
||||
self.ftsk.reset_mock()
|
||||
|
||||
# Pass if 1 vios modified
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
|
||||
'vios2': {'vio_modified': False}}}
|
||||
mock_attach_ftsk.execute.return_value = mock_ret
|
||||
self.vol_drv.attach_volume()
|
||||
mock_feed_task.assert_called_once()
|
||||
mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
|
||||
self.vol_drv._attach_volume_to_vio, provides='vio_modified',
|
||||
flag_update=False)
|
||||
mock_attach_ftsk.execute.assert_called_once()
|
||||
self.ftsk.execute.assert_called_once()
|
||||
|
||||
# Raise if no vios modified
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
|
||||
'vios2': {'vio_modified': False}}}
|
||||
mock_attach_ftsk.execute.return_value = mock_ret
|
||||
self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
|
||||
|
||||
# Raise if vm in invalid state
|
||||
mock_lpar_wrap.can_modify_io.return_value = False, None
|
||||
self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_set_udid')
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_add_append_mapping')
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_discover_volume_on_vios')
|
||||
@mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
|
||||
def test_attach_volume_to_vio(self, mock_good_disc, mock_disc_vol,
|
||||
mock_add_map, mock_set_udid):
|
||||
# Setup mocks
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.uuid = 'uuid'
|
||||
mock_disc_vol.return_value = 'status', 'devname', 'udid'
|
||||
|
||||
# Bad discovery
|
||||
mock_good_disc.return_value = False
|
||||
ret = self.vol_drv._attach_volume_to_vio(mock_vios)
|
||||
self.assertFalse(ret)
|
||||
mock_disc_vol.assert_called_once_with(mock_vios)
|
||||
mock_good_disc.assert_called_once_with('status', 'devname')
|
||||
|
||||
# Good discovery
|
||||
mock_good_disc.return_value = True
|
||||
ret = self.vol_drv._attach_volume_to_vio(mock_vios)
|
||||
self.assertTrue(ret)
|
||||
mock_add_map.assert_called_once_with(
|
||||
'uuid', 'devname', tag='a_volume_identifier')
|
||||
mock_set_udid.assert_called_once_with('udid')
|
||||
|
||||
def test_extend_volume(self):
|
||||
# Ensure the method is implemented
|
||||
self.vol_drv.extend_volume()
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.LOG')
|
||||
@mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
|
||||
@mock.patch('pypowervm.tasks.hdisk.discover_hdisk', autospec=True)
|
||||
@mock.patch('pypowervm.tasks.hdisk.build_itls', autospec=True)
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_get_hdisk_itls')
|
||||
def test_discover_volume_on_vios(self, mock_get_itls, mock_build_itls,
|
||||
mock_disc_hdisk, mock_good_disc,
|
||||
mock_log):
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.uuid = 'uuid'
|
||||
mock_get_itls.return_value = 'v_wwpns', 't_wwpns', 'lun'
|
||||
mock_build_itls.return_value = 'itls'
|
||||
mock_disc_hdisk.return_value = 'status', 'devname', 'udid'
|
||||
|
||||
# Good discovery
|
||||
mock_good_disc.return_value = True
|
||||
status, devname, udid = self.vol_drv._discover_volume_on_vios(
|
||||
mock_vios)
|
||||
self.assertEqual(mock_disc_hdisk.return_value[0], status)
|
||||
self.assertEqual(mock_disc_hdisk.return_value[1], devname)
|
||||
self.assertEqual(mock_disc_hdisk.return_value[2], udid)
|
||||
mock_get_itls.assert_called_once_with(mock_vios)
|
||||
mock_build_itls.assert_called_once_with('v_wwpns', 't_wwpns', 'lun')
|
||||
mock_disc_hdisk.assert_called_once_with(self.adpt, 'uuid', 'itls')
|
||||
mock_good_disc.assert_called_once_with('status', 'devname')
|
||||
mock_log.info.assert_called_once()
|
||||
mock_log.warning.assert_not_called()
|
||||
|
||||
mock_log.reset_mock()
|
||||
|
||||
# Bad discovery, not device in use status
|
||||
mock_good_disc.return_value = False
|
||||
self.vol_drv._discover_volume_on_vios(mock_vios)
|
||||
mock_log.warning.assert_not_called()
|
||||
mock_log.info.assert_not_called()
|
||||
|
||||
# Bad discovery, device in use status
|
||||
mock_disc_hdisk.return_value = (hdisk.LUAStatus.DEVICE_IN_USE, 'dev',
|
||||
'udid')
|
||||
self.vol_drv._discover_volume_on_vios(mock_vios)
|
||||
mock_log.warning.assert_called_once()
|
||||
|
||||
def test_get_hdisk_itls(self):
|
||||
"""Validates the _get_hdisk_itls method."""
|
||||
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_1]
|
||||
|
||||
i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
|
||||
self.assertListEqual([I_WWPN_1], i_wwpn)
|
||||
self.assertListEqual(['t1'], t_wwpns)
|
||||
self.assertEqual('1', lun)
|
||||
|
||||
mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_2]
|
||||
i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
|
||||
self.assertListEqual([I_WWPN_2], i_wwpn)
|
||||
self.assertListEqual(['t2', 't3'], t_wwpns)
|
||||
|
||||
mock_vios.get_active_pfc_wwpns.return_value = ['12345']
|
||||
i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
|
||||
self.assertListEqual([], i_wwpn)
|
||||
|
||||
@mock.patch('pypowervm.wrappers.storage.PV', autospec=True)
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
|
||||
autospec=True)
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
|
||||
def test_add_append_mapping(self, mock_add_map, mock_bld_map, mock_pv):
|
||||
def test_afs(add_func):
|
||||
mock_vios = mock.create_autospec(pvm_vios.VIOS)
|
||||
self.assertEqual(mock_add_map.return_value, add_func(mock_vios))
|
||||
mock_pv.bld.assert_called_once_with(self.adpt, 'devname', tag=None)
|
||||
mock_bld_map.assert_called_once_with(
|
||||
None, mock_vios, self.vol_drv.vm_uuid,
|
||||
mock_pv.bld.return_value)
|
||||
mock_add_map.assert_called_once_with(
|
||||
mock_vios, mock_bld_map.return_value)
|
||||
|
||||
self.wtsk.add_functor_subtask.side_effect = test_afs
|
||||
self.vol_drv._add_append_mapping('vios_uuid', 'devname')
|
||||
self.wtsk.add_functor_subtask.assert_called_once()
|
||||
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.LOG.warning')
|
||||
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
|
||||
@mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
|
||||
def test_detach_volume(self, mock_feed_task, mock_get_wrap, mock_log):
|
||||
mock_lpar_wrap = mock.MagicMock()
|
||||
mock_lpar_wrap.can_modify_io.return_value = True, None
|
||||
mock_get_wrap.return_value = mock_lpar_wrap
|
||||
mock_detach_ftsk = mock_feed_task.return_value
|
||||
|
||||
# Multiple vioses modified
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
|
||||
'vios2': {'vio_modified': True}}}
|
||||
mock_detach_ftsk.execute.return_value = mock_ret
|
||||
self.vol_drv.detach_volume()
|
||||
mock_feed_task.assert_called_once()
|
||||
mock_detach_ftsk.add_functor_subtask.assert_called_once_with(
|
||||
self.vol_drv._detach_vol_for_vio, provides='vio_modified',
|
||||
flag_update=False)
|
||||
mock_detach_ftsk.execute.assert_called_once_with()
|
||||
self.ftsk.execute.assert_called_once_with()
|
||||
mock_log.assert_not_called()
|
||||
|
||||
# 1 vios modified
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
|
||||
'vios2': {'vio_modified': False}}}
|
||||
mock_detach_ftsk.execute.return_value = mock_ret
|
||||
self.vol_drv.detach_volume()
|
||||
mock_log.assert_not_called()
|
||||
|
||||
# No vioses modifed
|
||||
mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
|
||||
'vios2': {'vio_modified': False}}}
|
||||
mock_detach_ftsk.execute.return_value = mock_ret
|
||||
self.vol_drv.detach_volume()
|
||||
mock_log.assert_called_once()
|
||||
|
||||
# Raise if exception during execute
|
||||
mock_detach_ftsk.execute.side_effect = Exception()
|
||||
self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
|
||||
|
||||
# Raise if vm in invalid state
|
||||
mock_lpar_wrap.can_modify_io.return_value = False, None
|
||||
self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
|
||||
|
||||
@mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_discover_volume_on_vios')
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_add_remove_mapping')
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_add_remove_hdisk')
|
||||
@mock.patch('nova.virt.powervm.vm.get_vm_qp')
|
||||
def test_detach_vol_for_vio(self, mock_get_qp, mock_rm_hdisk, mock_rm_map,
|
||||
mock_disc_vol, mock_good_disc):
|
||||
# Good detach, bdm data is found
|
||||
self.vol_drv._set_udid('udid')
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.uuid = 'vios_uuid'
|
||||
mock_vios.hdisk_from_uuid.return_value = 'devname'
|
||||
mock_get_qp.return_value = 'part_id'
|
||||
ret = self.vol_drv._detach_vol_for_vio(mock_vios)
|
||||
self.assertTrue(ret)
|
||||
mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
|
||||
mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
|
||||
mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
|
||||
|
||||
mock_vios.reset_mock()
|
||||
mock_rm_map.reset_mock()
|
||||
mock_rm_hdisk.reset_mock()
|
||||
|
||||
# Good detach, no udid
|
||||
self.vol_drv._set_udid(None)
|
||||
mock_disc_vol.return_value = 'status', 'devname', 'udid'
|
||||
mock_good_disc.return_value = True
|
||||
ret = self.vol_drv._detach_vol_for_vio(mock_vios)
|
||||
self.assertTrue(ret)
|
||||
mock_vios.hdisk_from_uuid.assert_not_called()
|
||||
mock_disc_vol.assert_called_once_with(mock_vios)
|
||||
mock_good_disc.assert_called_once_with('status', 'devname')
|
||||
mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
|
||||
mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
|
||||
|
||||
mock_vios.reset_mock()
|
||||
mock_disc_vol.reset_mock()
|
||||
mock_good_disc.reset_mock()
|
||||
mock_rm_map.reset_mock()
|
||||
mock_rm_hdisk.reset_mock()
|
||||
|
||||
# Good detach, no device name
|
||||
self.vol_drv._set_udid('udid')
|
||||
mock_vios.hdisk_from_uuid.return_value = None
|
||||
ret = self.vol_drv._detach_vol_for_vio(mock_vios)
|
||||
self.assertTrue(ret)
|
||||
mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
|
||||
mock_disc_vol.assert_called_once_with(mock_vios)
|
||||
mock_good_disc.assert_called_once_with('status', 'devname')
|
||||
mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
|
||||
mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
|
||||
|
||||
mock_rm_map.reset_mock()
|
||||
mock_rm_hdisk.reset_mock()
|
||||
|
||||
# Bad detach, invalid state
|
||||
mock_good_disc.return_value = False
|
||||
ret = self.vol_drv._detach_vol_for_vio(mock_vios)
|
||||
self.assertFalse(ret)
|
||||
mock_rm_map.assert_not_called()
|
||||
mock_rm_hdisk.assert_not_called()
|
||||
|
||||
# Bad detach, exception discovering volume on vios
|
||||
mock_disc_vol.side_effect = Exception()
|
||||
ret = self.vol_drv._detach_vol_for_vio(mock_vios)
|
||||
self.assertFalse(ret)
|
||||
mock_rm_map.assert_not_called()
|
||||
mock_rm_hdisk.assert_not_called()
|
||||
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
|
||||
def test_add_remove_mapping(self, mock_rm_maps, mock_gen_match):
|
||||
def test_afs(rm_func):
|
||||
mock_vios = mock.create_autospec(pvm_vios.VIOS)
|
||||
self.assertEqual(mock_rm_maps.return_value, rm_func(mock_vios))
|
||||
mock_gen_match.assert_called_once_with(
|
||||
pvm_stor.PV, names=['devname'])
|
||||
mock_rm_maps.assert_called_once_with(
|
||||
mock_vios, 'vm_uuid', mock_gen_match.return_value)
|
||||
|
||||
self.wtsk.add_functor_subtask.side_effect = test_afs
|
||||
self.vol_drv._add_remove_mapping('vm_uuid', 'vios_uuid', 'devname')
|
||||
self.wtsk.add_functor_subtask.assert_called_once()
|
||||
|
||||
@mock.patch('pypowervm.tasks.hdisk.remove_hdisk', autospec=True)
|
||||
@mock.patch('taskflow.task.FunctorTask', autospec=True)
|
||||
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
|
||||
'_check_host_mappings')
|
||||
def test_add_remove_hdisk(self, mock_check_maps, mock_functask,
|
||||
mock_rm_hdisk):
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.uuid = 'uuid'
|
||||
mock_check_maps.return_value = True
|
||||
self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
|
||||
mock_functask.assert_not_called()
|
||||
self.ftsk.add_post_execute.assert_not_called()
|
||||
mock_check_maps.assert_called_once_with(mock_vios, 'devname')
|
||||
self.assertEqual(0, mock_rm_hdisk.call_count)
|
||||
|
||||
def test_functor_task(rm_hdisk, name=None):
|
||||
rm_hdisk()
|
||||
return 'functor_task'
|
||||
|
||||
mock_check_maps.return_value = False
|
||||
mock_functask.side_effect = test_functor_task
|
||||
self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
|
||||
mock_functask.assert_called_once()
|
||||
self.ftsk.add_post_execute.assert_called_once_with('functor_task')
|
||||
mock_rm_hdisk.assert_called_once_with(self.adpt, CONF.host,
|
||||
'devname', 'uuid')
|
||||
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
|
||||
def test_check_host_mappings(self, mock_find_maps, mock_gen_match):
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.uuid = 'uuid2'
|
||||
mock_v1 = mock.MagicMock(scsi_mappings='scsi_maps_1', uuid='uuid1')
|
||||
mock_v2 = mock.MagicMock(scsi_mappings='scsi_maps_2', uuid='uuid2')
|
||||
mock_feed = [mock_v1, mock_v2]
|
||||
self.ftsk.feed = mock_feed
|
||||
|
||||
# Multiple mappings found
|
||||
mock_find_maps.return_value = ['map1', 'map2']
|
||||
ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
|
||||
self.assertTrue(ret)
|
||||
mock_gen_match.assert_called_once_with(pvm_stor.PV, names=['devname'])
|
||||
mock_find_maps.assert_called_once_with('scsi_maps_2', None,
|
||||
mock_gen_match.return_value)
|
||||
|
||||
# One mapping found
|
||||
mock_find_maps.return_value = ['map1']
|
||||
ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
|
||||
self.assertFalse(ret)
|
||||
|
||||
# No mappings found
|
||||
mock_find_maps.return_value = []
|
||||
ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
|
||||
self.assertFalse(ret)
|
@ -43,6 +43,8 @@ from nova.virt.powervm.tasks import network as tf_net
|
||||
from nova.virt.powervm.tasks import storage as tf_stg
|
||||
from nova.virt.powervm.tasks import vm as tf_vm
|
||||
from nova.virt.powervm import vm
|
||||
from nova.virt.powervm import volume
|
||||
from nova.virt.powervm.volume import fcvscsi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -71,7 +73,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
'supports_device_tagging': False,
|
||||
'supports_tagged_attach_interface': False,
|
||||
'supports_tagged_attach_volume': False,
|
||||
'supports_extend_volume': False,
|
||||
'supports_extend_volume': True,
|
||||
'supports_multiattach': False,
|
||||
}
|
||||
super(PowerVMDriver, self).__init__(virtapi)
|
||||
@ -215,6 +217,16 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
flow_spawn.add(tf_stg.AttachDisk(
|
||||
self.disk_dvr, instance, stg_ftsk=stg_ftsk))
|
||||
|
||||
# Extract the block devices.
|
||||
bdms = driver.block_device_info_get_mapping(block_device_info)
|
||||
|
||||
# Determine if there are volumes to connect. If so, add a connection
|
||||
# for each type.
|
||||
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
|
||||
stg_ftsk=stg_ftsk):
|
||||
# Connect the volume. This will update the connection_info.
|
||||
flow_spawn.add(tf_stg.AttachVolume(vol_drv))
|
||||
|
||||
# If the config drive is needed, add those steps. Should be done
|
||||
# after all the other I/O.
|
||||
if configdrive.required_by(instance):
|
||||
@ -275,7 +287,14 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
flow.add(tf_stg.DeleteVOpt(
|
||||
self.adapter, instance, stg_ftsk=stg_ftsk))
|
||||
|
||||
# TODO(thorst, efried) Add volume disconnect tasks
|
||||
# Extract the block devices.
|
||||
bdms = driver.block_device_info_get_mapping(block_device_info)
|
||||
|
||||
# Determine if there are volumes to detach. If so, remove each
|
||||
# volume (within the transaction manager)
|
||||
for bdm, vol_drv in self._vol_drv_iter(
|
||||
context, instance, bdms, stg_ftsk=stg_ftsk):
|
||||
flow.add(tf_stg.DetachVolume(vol_drv))
|
||||
|
||||
# Detach the disk storage adapters
|
||||
flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
|
||||
@ -492,3 +511,112 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
:returns: Boolean value. If True deallocate networks on reschedule.
|
||||
"""
|
||||
return True
|
||||
|
||||
def attach_volume(self, context, connection_info, instance, mountpoint,
|
||||
disk_bus=None, device_type=None, encryption=None):
|
||||
"""Attach the volume to the instance using the connection_info.
|
||||
|
||||
:param context: security context
|
||||
:param connection_info: Volume connection information from the block
|
||||
device mapping
|
||||
:param instance: nova.objects.instance.Instance
|
||||
:param mountpoint: Unused
|
||||
:param disk_bus: Unused
|
||||
:param device_type: Unused
|
||||
:param encryption: Unused
|
||||
"""
|
||||
self._log_operation('attach_volume', instance)
|
||||
|
||||
# Define the flow
|
||||
flow = tf_lf.Flow("attach_volume")
|
||||
|
||||
# Build the driver
|
||||
vol_drv = volume.build_volume_driver(self.adapter, instance,
|
||||
connection_info)
|
||||
|
||||
# Add the volume attach to the flow.
|
||||
flow.add(tf_stg.AttachVolume(vol_drv))
|
||||
|
||||
# Run the flow
|
||||
tf_base.run(flow, instance=instance)
|
||||
|
||||
# The volume connector may have updated the system metadata. Save
|
||||
# the instance to persist the data. Spawn/destroy auto saves instance,
|
||||
# but the attach does not. Detach does not need this save - as the
|
||||
# detach flows do not (currently) modify system metadata. May need
|
||||
# to revise in the future as volume connectors evolve.
|
||||
instance.save()
|
||||
|
||||
def detach_volume(self, context, connection_info, instance, mountpoint,
|
||||
encryption=None):
|
||||
"""Detach the volume attached to the instance.
|
||||
|
||||
:param context: security context
|
||||
:param connection_info: Volume connection information from the block
|
||||
device mapping
|
||||
:param instance: nova.objects.instance.Instance
|
||||
:param mountpoint: Unused
|
||||
:param encryption: Unused
|
||||
"""
|
||||
self._log_operation('detach_volume', instance)
|
||||
|
||||
# Define the flow
|
||||
flow = tf_lf.Flow("detach_volume")
|
||||
|
||||
# Get a volume adapter for this volume
|
||||
vol_drv = volume.build_volume_driver(self.adapter, instance,
|
||||
connection_info)
|
||||
|
||||
# Add a task to detach the volume
|
||||
flow.add(tf_stg.DetachVolume(vol_drv))
|
||||
|
||||
# Run the flow
|
||||
tf_base.run(flow, instance=instance)
|
||||
|
||||
def extend_volume(self, connection_info, instance):
|
||||
"""Extend the disk attached to the instance.
|
||||
|
||||
:param dict connection_info: The connection for the extended volume.
|
||||
:param nova.objects.instance.Instance instance:
|
||||
The instance whose volume gets extended.
|
||||
:return: None
|
||||
"""
|
||||
|
||||
vol_drv = volume.build_volume_driver(
|
||||
self.adapter, instance, connection_info)
|
||||
vol_drv.extend_volume()
|
||||
|
||||
def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
|
||||
"""Yields a bdm and volume driver.
|
||||
|
||||
:param context: security context
|
||||
:param instance: nova.objects.instance.Instance
|
||||
:param bdms: block device mappings
|
||||
:param stg_ftsk: storage FeedTask
|
||||
"""
|
||||
# Get a volume driver for each volume
|
||||
for bdm in bdms or []:
|
||||
conn_info = bdm.get('connection_info')
|
||||
vol_drv = volume.build_volume_driver(self.adapter, instance,
|
||||
conn_info, stg_ftsk=stg_ftsk)
|
||||
yield bdm, vol_drv
|
||||
|
||||
def get_volume_connector(self, instance):
|
||||
"""Get connector information for the instance for attaching to volumes.
|
||||
|
||||
Connector information is a dictionary representing information about
|
||||
the system that will be making the connection.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
"""
|
||||
# Put the values in the connector
|
||||
connector = {}
|
||||
wwpn_list = fcvscsi.wwpns(self.adapter)
|
||||
|
||||
if wwpn_list is not None:
|
||||
connector["wwpns"] = wwpn_list
|
||||
connector["multipath"] = False
|
||||
connector['host'] = CONF.host
|
||||
connector['initiator'] = None
|
||||
|
||||
return connector
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2015, 2017 IBM Corp.
|
||||
# Copyright 2015, 2018 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -19,12 +19,92 @@ from taskflow import task
|
||||
from taskflow.types import failure as task_fail
|
||||
|
||||
from nova import exception
|
||||
from nova.virt import block_device
|
||||
from nova.virt.powervm import media
|
||||
from nova.virt.powervm import mgmt
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AttachVolume(task.Task):
|
||||
|
||||
"""The task to attach a volume to an instance."""
|
||||
|
||||
def __init__(self, vol_drv):
|
||||
"""Create the task.
|
||||
|
||||
:param vol_drv: The volume driver. Ties the storage to a connection
|
||||
type (ex. vSCSI).
|
||||
"""
|
||||
self.vol_drv = vol_drv
|
||||
self.vol_id = block_device.get_volume_id(self.vol_drv.connection_info)
|
||||
|
||||
super(AttachVolume, self).__init__(name='attach_vol_%s' % self.vol_id)
|
||||
|
||||
def execute(self):
|
||||
LOG.info('Attaching volume %(vol)s.', {'vol': self.vol_id},
|
||||
instance=self.vol_drv.instance)
|
||||
self.vol_drv.attach_volume()
|
||||
|
||||
def revert(self, result, flow_failures):
|
||||
LOG.warning('Rolling back attachment for volume %(vol)s.',
|
||||
{'vol': self.vol_id}, instance=self.vol_drv.instance)
|
||||
|
||||
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
||||
# immediate rollback.
|
||||
self.vol_drv.reset_stg_ftsk()
|
||||
try:
|
||||
# We attempt to detach in case we 'partially attached'. In
|
||||
# the attach scenario, perhaps one of the Virtual I/O Servers
|
||||
# was attached. This attempts to clear anything out to make sure
|
||||
# the terminate attachment runs smoothly.
|
||||
self.vol_drv.detach_volume()
|
||||
except exception.VolumeDetachFailed:
|
||||
# Does not block due to being in the revert flow.
|
||||
LOG.exception("Unable to detach volume %s during rollback.",
|
||||
self.vol_id, instance=self.vol_drv.instance)
|
||||
|
||||
|
||||
class DetachVolume(task.Task):
|
||||
|
||||
"""The task to detach a volume from an instance."""
|
||||
|
||||
def __init__(self, vol_drv):
|
||||
"""Create the task.
|
||||
|
||||
:param vol_drv: The volume driver. Ties the storage to a connection
|
||||
type (ex. vSCSI).
|
||||
"""
|
||||
self.vol_drv = vol_drv
|
||||
self.vol_id = self.vol_drv.connection_info['data']['volume_id']
|
||||
|
||||
super(DetachVolume, self).__init__(name='detach_vol_%s' % self.vol_id)
|
||||
|
||||
def execute(self):
|
||||
LOG.info('Detaching volume %(vol)s.',
|
||||
{'vol': self.vol_id}, instance=self.vol_drv.instance)
|
||||
self.vol_drv.detach_volume()
|
||||
|
||||
def revert(self, result, flow_failures):
|
||||
LOG.warning('Reattaching volume %(vol)s on detach rollback.',
|
||||
{'vol': self.vol_id}, instance=self.vol_drv.instance)
|
||||
|
||||
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
||||
# immediate rollback.
|
||||
self.vol_drv.reset_stg_ftsk()
|
||||
try:
|
||||
# We try to reattach the volume here so that it maintains its
|
||||
# linkage (in the hypervisor) to the VM. This makes it easier for
|
||||
# operators to understand the linkage between the VMs and volumes
|
||||
# in error scenarios. This is simply useful for debug purposes
|
||||
# if there is an operational error.
|
||||
self.vol_drv.attach_volume()
|
||||
except exception.VolumeAttachFailed:
|
||||
# Does not block due to being in the revert flow. See above.
|
||||
LOG.exception("Unable to reattach volume %s during rollback.",
|
||||
self.vol_id, instance=self.vol_drv.instance)
|
||||
|
||||
|
||||
class CreateDiskForImg(task.Task):
|
||||
|
||||
"""The Task to create the disk from an image in the storage."""
|
||||
|
28
nova/virt/powervm/volume/__init__.py
Normal file
28
nova/virt/powervm/volume/__init__.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright 2015, 2018 IBM Corp.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.virt.powervm.volume import fcvscsi
|
||||
|
||||
|
||||
def build_volume_driver(adapter, instance, conn_info, stg_ftsk=None):
|
||||
drv_type = conn_info.get('driver_volume_type')
|
||||
if drv_type != 'fibre_channel':
|
||||
reason = _("Invalid connection type of %s") % drv_type
|
||||
raise exception.InvalidVolume(reason=reason)
|
||||
return fcvscsi.FCVscsiVolumeAdapter(adapter, instance, conn_info,
|
||||
stg_ftsk=stg_ftsk)
|
469
nova/virt/powervm/volume/fcvscsi.py
Normal file
469
nova/virt/powervm/volume/fcvscsi.py
Normal file
@ -0,0 +1,469 @@
|
||||
# Copyright 2015, 2018 IBM Corp.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
from pypowervm import const as pvm_const
|
||||
from pypowervm.tasks import hdisk
|
||||
from pypowervm.tasks import partition as pvm_tpar
|
||||
from pypowervm.tasks import scsi_mapper as tsk_map
|
||||
from pypowervm.utils import transaction as pvm_tx
|
||||
from pypowervm.wrappers import storage as pvm_stor
|
||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||
import six
|
||||
from taskflow import task
|
||||
|
||||
from nova import conf as cfg
|
||||
from nova import exception as exc
|
||||
from nova.i18n import _
|
||||
from nova.virt import block_device
|
||||
from nova.virt.powervm import vm
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
LOCAL_FEED_TASK = 'local_feed_task'
|
||||
UDID_KEY = 'target_UDID'
|
||||
|
||||
# A global variable that will cache the physical WWPNs on the system.
|
||||
_vscsi_pfc_wwpns = None
|
||||
|
||||
|
||||
@lockutils.synchronized('vscsi_wwpns')
|
||||
def wwpns(adapter):
|
||||
"""Builds the WWPNs of the adapters that will connect the ports.
|
||||
|
||||
:return: The list of WWPNs that need to be included in the zone set.
|
||||
"""
|
||||
return pvm_tpar.get_physical_wwpns(adapter, force_refresh=False)
|
||||
|
||||
|
||||
class FCVscsiVolumeAdapter(object):
|
||||
|
||||
def __init__(self, adapter, instance, connection_info, stg_ftsk=None):
|
||||
"""Initialize the PowerVMVolumeAdapter
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param instance: The nova instance that the volume should attach to.
|
||||
:param connection_info: The volume connection info generated from the
|
||||
BDM. Used to determine how to attach the
|
||||
volume to the VM.
|
||||
:param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
|
||||
I/O Operations. If provided, the Virtual I/O Server
|
||||
mapping updates will be added to the FeedTask. This
|
||||
defers the updates to some later point in time. If
|
||||
the FeedTask is not provided, the updates will be run
|
||||
immediately when the respective method is executed.
|
||||
"""
|
||||
self.adapter = adapter
|
||||
self.instance = instance
|
||||
self.connection_info = connection_info
|
||||
self.vm_uuid = vm.get_pvm_uuid(instance)
|
||||
self.reset_stg_ftsk(stg_ftsk=stg_ftsk)
|
||||
self._pfc_wwpns = None
|
||||
|
||||
@property
|
||||
def volume_id(self):
|
||||
"""Method to return the volume id.
|
||||
|
||||
Every driver must implement this method if the default impl will
|
||||
not work for their data.
|
||||
"""
|
||||
return block_device.get_volume_id(self.connection_info)
|
||||
|
||||
def reset_stg_ftsk(self, stg_ftsk=None):
|
||||
"""Resets the pypowervm transaction FeedTask to a new value.
|
||||
|
||||
The previous updates from the original FeedTask WILL NOT be migrated
|
||||
to this new FeedTask.
|
||||
|
||||
:param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
|
||||
I/O Operations. If provided, the Virtual I/O Server
|
||||
mapping updates will be added to the FeedTask. This
|
||||
defers the updates to some later point in time. If
|
||||
the FeedTask is not provided, the updates will be run
|
||||
immediately when this method is executed.
|
||||
"""
|
||||
if stg_ftsk is None:
|
||||
getter = pvm_vios.VIOS.getter(
|
||||
self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
|
||||
self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter)
|
||||
else:
|
||||
self.stg_ftsk = stg_ftsk
|
||||
|
||||
def _set_udid(self, udid):
|
||||
"""This method will set the hdisk udid in the connection_info.
|
||||
|
||||
:param udid: The hdisk target_udid to be stored in system_metadata
|
||||
"""
|
||||
self.connection_info['data'][UDID_KEY] = udid
|
||||
|
||||
def _get_udid(self):
|
||||
"""This method will return the hdisk udid stored in connection_info.
|
||||
|
||||
:return: The target_udid associated with the hdisk
|
||||
"""
|
||||
try:
|
||||
return self.connection_info['data'][UDID_KEY]
|
||||
except (KeyError, ValueError):
|
||||
# It's common to lose our specific data in the BDM. The connection
|
||||
# information can be 'refreshed' by operations like live migrate
|
||||
# and resize
|
||||
LOG.info('Failed to retrieve target_UDID key from BDM for volume '
|
||||
'id %s', self.volume_id, instance=self.instance)
|
||||
return None
|
||||
|
||||
def attach_volume(self):
|
||||
"""Attaches the volume."""
|
||||
|
||||
# Check if the VM is in a state where the attach is acceptable.
|
||||
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
|
||||
capable, reason = lpar_w.can_modify_io()
|
||||
if not capable:
|
||||
raise exc.VolumeAttachFailed(
|
||||
volume_id=self.volume_id, reason=reason)
|
||||
|
||||
# Its about to get weird. The transaction manager has a list of
|
||||
# VIOSes. We could use those, but they only have SCSI mappings (by
|
||||
# design). They do not have storage (super expensive).
|
||||
#
|
||||
# We need the storage xag when we are determining which mappings to
|
||||
# add to the system. But we don't want to tie it to the stg_ftsk. If
|
||||
# we do, every retry, every etag gather, etc... takes MUCH longer.
|
||||
#
|
||||
# So we get the VIOSes with the storage xag here, separately, to save
|
||||
# the stg_ftsk from potentially having to run it multiple times.
|
||||
attach_ftsk = pvm_tx.FeedTask(
|
||||
'attach_volume_to_vio', pvm_vios.VIOS.getter(
|
||||
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
|
||||
pvm_const.XAG.VIO_SMAP]))
|
||||
|
||||
# Find valid hdisks and map to VM.
|
||||
attach_ftsk.add_functor_subtask(
|
||||
self._attach_volume_to_vio, provides='vio_modified',
|
||||
flag_update=False)
|
||||
|
||||
ret = attach_ftsk.execute()
|
||||
|
||||
# Check the number of VIOSes
|
||||
vioses_modified = 0
|
||||
for result in ret['wrapper_task_rets'].values():
|
||||
if result['vio_modified']:
|
||||
vioses_modified += 1
|
||||
|
||||
# Validate that a vios was found
|
||||
if vioses_modified == 0:
|
||||
msg = (_('Failed to discover valid hdisk on any Virtual I/O '
|
||||
'Server for volume %(volume_id)s.') %
|
||||
{'volume_id': self.volume_id})
|
||||
ex_args = {'volume_id': self.volume_id, 'reason': msg}
|
||||
raise exc.VolumeAttachFailed(**ex_args)
|
||||
|
||||
self.stg_ftsk.execute()
|
||||
|
||||
def _attach_volume_to_vio(self, vios_w):
|
||||
"""Attempts to attach a volume to a given VIO.
|
||||
|
||||
:param vios_w: The Virtual I/O Server wrapper to attach to.
|
||||
:return: True if the volume was attached. False if the volume was
|
||||
not (could be the Virtual I/O Server does not have
|
||||
connectivity to the hdisk).
|
||||
"""
|
||||
status, device_name, udid = self._discover_volume_on_vios(vios_w)
|
||||
|
||||
if hdisk.good_discovery(status, device_name):
|
||||
# Found a hdisk on this Virtual I/O Server. Add the action to
|
||||
# map it to the VM when the stg_ftsk is executed.
|
||||
with lockutils.lock(self.volume_id):
|
||||
self._add_append_mapping(vios_w.uuid, device_name,
|
||||
tag=self.volume_id)
|
||||
|
||||
# Save the UDID for the disk in the connection info. It is
|
||||
# used for the detach.
|
||||
self._set_udid(udid)
|
||||
LOG.debug('Added deferred task to attach device %(device_name)s '
|
||||
'to vios %(vios_name)s.',
|
||||
{'device_name': device_name, 'vios_name': vios_w.name},
|
||||
instance=self.instance)
|
||||
|
||||
# Valid attachment
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def extend_volume(self):
|
||||
# The compute node does not need to take any additional steps for the
|
||||
# client to see the extended volume.
|
||||
pass
|
||||
|
||||
def _discover_volume_on_vios(self, vios_w):
|
||||
"""Discovers an hdisk on a single vios for the volume.
|
||||
|
||||
:param vios_w: VIOS wrapper to process
|
||||
:returns: Status of the volume or None
|
||||
:returns: Device name or None
|
||||
:returns: UDID or None
|
||||
"""
|
||||
# Get the initiatior WWPNs, targets and Lun for the given VIOS.
|
||||
vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
|
||||
|
||||
# Build the ITL map and discover the hdisks on the Virtual I/O
|
||||
# Server (if any).
|
||||
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
|
||||
if len(itls) == 0:
|
||||
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
|
||||
{'vios': vios_w.name, 'volume_id': self.volume_id},
|
||||
instance=self.instance)
|
||||
return None, None, None
|
||||
|
||||
status, device_name, udid = hdisk.discover_hdisk(self.adapter,
|
||||
vios_w.uuid, itls)
|
||||
|
||||
if hdisk.good_discovery(status, device_name):
|
||||
LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
|
||||
'%(volume_id)s. Status code: %(status)s.',
|
||||
{'hdisk': device_name, 'vios': vios_w.name,
|
||||
'volume_id': self.volume_id, 'status': status},
|
||||
instance=self.instance)
|
||||
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
||||
LOG.warning('Discovered device %(dev)s for volume %(volume)s '
|
||||
'on %(vios)s is in use. Error code: %(status)s.',
|
||||
{'dev': device_name, 'volume': self.volume_id,
|
||||
'vios': vios_w.name, 'status': status},
|
||||
instance=self.instance)
|
||||
|
||||
return status, device_name, udid
|
||||
|
||||
def _get_hdisk_itls(self, vios_w):
|
||||
"""Returns the mapped ITLs for the hdisk for the given VIOS.
|
||||
|
||||
A PowerVM system may have multiple Virtual I/O Servers to virtualize
|
||||
the I/O to the virtual machines. Each Virtual I/O server may have their
|
||||
own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
|
||||
mapped. It will determine and return the ITLs for the given VIOS.
|
||||
|
||||
:param vios_w: A virtual I/O Server wrapper.
|
||||
:return: List of the i_wwpns that are part of the vios_w,
|
||||
:return: List of the t_wwpns that are part of the vios_w,
|
||||
:return: Target lun id of the hdisk for the vios_w.
|
||||
"""
|
||||
it_map = self.connection_info['data']['initiator_target_map']
|
||||
i_wwpns = it_map.keys()
|
||||
|
||||
active_wwpns = vios_w.get_active_pfc_wwpns()
|
||||
vio_wwpns = [x for x in i_wwpns if x in active_wwpns]
|
||||
|
||||
t_wwpns = []
|
||||
for it_key in vio_wwpns:
|
||||
t_wwpns.extend(it_map[it_key])
|
||||
lun = self.connection_info['data']['target_lun']
|
||||
|
||||
return vio_wwpns, t_wwpns, lun
|
||||
|
||||
def _add_append_mapping(self, vios_uuid, device_name, tag=None):
|
||||
"""Update the stg_ftsk to append the mapping to the VIOS.
|
||||
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param device_name: The The hdisk device name.
|
||||
:param tag: String tag to set on the physical volume.
|
||||
"""
|
||||
def add_func(vios_w):
|
||||
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on "
|
||||
"vios %(vios)s.",
|
||||
{'dev': device_name, 'vios': vios_w.name},
|
||||
instance=self.instance)
|
||||
pv = pvm_stor.PV.bld(self.adapter, device_name, tag=tag)
|
||||
v_map = tsk_map.build_vscsi_mapping(None, vios_w, self.vm_uuid, pv)
|
||||
return tsk_map.add_map(vios_w, v_map)
|
||||
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
|
||||
|
||||
def detach_volume(self):
|
||||
"""Detach the volume."""
|
||||
|
||||
# Check if the VM is in a state where the detach is acceptable.
|
||||
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
|
||||
capable, reason = lpar_w.can_modify_io()
|
||||
if not capable:
|
||||
raise exc.VolumeDetachFailed(
|
||||
volume_id=self.volume_id, reason=reason)
|
||||
|
||||
# Run the detach
|
||||
try:
|
||||
# See logic in attach_volume for why this new FeedTask is here.
|
||||
detach_ftsk = pvm_tx.FeedTask(
|
||||
'detach_volume_from_vio', pvm_vios.VIOS.getter(
|
||||
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
|
||||
pvm_const.XAG.VIO_SMAP]))
|
||||
# Find hdisks to detach
|
||||
detach_ftsk.add_functor_subtask(
|
||||
self._detach_vol_for_vio, provides='vio_modified',
|
||||
flag_update=False)
|
||||
|
||||
ret = detach_ftsk.execute()
|
||||
|
||||
# Warn if no hdisks detached.
|
||||
if not any([result['vio_modified']
|
||||
for result in ret['wrapper_task_rets'].values()]):
|
||||
LOG.warning("Detach Volume: Failed to detach the "
|
||||
"volume %(volume_id)s on ANY of the Virtual "
|
||||
"I/O Servers.", {'volume_id': self.volume_id},
|
||||
instance=self.instance)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception('PowerVM error detaching volume from virtual '
|
||||
'machine.', instance=self.instance)
|
||||
ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e)}
|
||||
raise exc.VolumeDetachFailed(**ex_args)
|
||||
self.stg_ftsk.execute()
|
||||
|
||||
def _detach_vol_for_vio(self, vios_w):
|
||||
"""Removes the volume from a specific Virtual I/O Server.
|
||||
|
||||
:param vios_w: The VIOS wrapper.
|
||||
:return: True if a remove action was done against this VIOS. False
|
||||
otherwise.
|
||||
"""
|
||||
LOG.debug("Detach volume %(vol)s from vios %(vios)s",
|
||||
dict(vol=self.volume_id, vios=vios_w.name),
|
||||
instance=self.instance)
|
||||
device_name = None
|
||||
udid = self._get_udid()
|
||||
try:
|
||||
if udid:
|
||||
# This will only work if vios_w has the Storage XAG.
|
||||
device_name = vios_w.hdisk_from_uuid(udid)
|
||||
|
||||
if not udid or not device_name:
|
||||
# We lost our bdm data. We'll need to discover it.
|
||||
status, device_name, udid = self._discover_volume_on_vios(
|
||||
vios_w)
|
||||
|
||||
# Check if the hdisk is in a bad state in the I/O Server.
|
||||
# Subsequent scrub code on future deploys will clean this up.
|
||||
if not hdisk.good_discovery(status, device_name):
|
||||
LOG.warning(
|
||||
"Detach Volume: The backing hdisk for volume "
|
||||
"%(volume_id)s on Virtual I/O Server %(vios)s is "
|
||||
"not in a valid state. This may be the result of "
|
||||
"an evacuate.",
|
||||
{'volume_id': self.volume_id, 'vios': vios_w.name},
|
||||
instance=self.instance)
|
||||
return False
|
||||
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
"Detach Volume: Failed to find disk on Virtual I/O "
|
||||
"Server %(vios_name)s for volume %(volume_id)s. Volume "
|
||||
"UDID: %(volume_uid)s.",
|
||||
{'vios_name': vios_w.name, 'volume_id': self.volume_id,
|
||||
'volume_uid': udid, }, instance=self.instance)
|
||||
return False
|
||||
|
||||
# We have found the device name
|
||||
LOG.info("Detach Volume: Discovered the device %(hdisk)s "
|
||||
"on Virtual I/O Server %(vios_name)s for volume "
|
||||
"%(volume_id)s. Volume UDID: %(volume_uid)s.",
|
||||
{'hdisk': device_name, 'vios_name': vios_w.name,
|
||||
'volume_id': self.volume_id, 'volume_uid': udid},
|
||||
instance=self.instance)
|
||||
|
||||
# Add the action to remove the mapping when the stg_ftsk is run.
|
||||
partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid,
|
||||
qprop='PartitionID')
|
||||
|
||||
with lockutils.lock(self.volume_id):
|
||||
self._add_remove_mapping(partition_id, vios_w.uuid,
|
||||
device_name)
|
||||
|
||||
# Add a step to also remove the hdisk
|
||||
self._add_remove_hdisk(vios_w, device_name)
|
||||
|
||||
# Found a valid element to remove
|
||||
return True
|
||||
|
||||
def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name):
|
||||
"""Adds a subtask to remove the storage mapping.
|
||||
|
||||
:param vm_uuid: The UUID of the VM instance
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param device_name: The The hdisk device name.
|
||||
"""
|
||||
def rm_func(vios_w):
|
||||
LOG.info("Removing vSCSI mapping from physical volume %(dev)s "
|
||||
"on vios %(vios)s",
|
||||
{'dev': device_name, 'vios': vios_w.name},
|
||||
instance=self.instance)
|
||||
removed_maps = tsk_map.remove_maps(
|
||||
vios_w, vm_uuid,
|
||||
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
||||
return removed_maps
|
||||
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
|
||||
|
||||
def _add_remove_hdisk(self, vio_wrap, device_name):
|
||||
"""Adds a post-mapping task to remove the hdisk from the VIOS.
|
||||
|
||||
This removal is only done after the mapping updates have completed.
|
||||
|
||||
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
|
||||
from.
|
||||
:param device_name: The hdisk name to remove.
|
||||
"""
|
||||
def rm_hdisk():
|
||||
LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
|
||||
"%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
|
||||
instance=self.instance)
|
||||
try:
|
||||
# Attempt to remove the hDisk
|
||||
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
|
||||
vio_wrap.uuid)
|
||||
except Exception:
|
||||
# If there is a failure, log it, but don't stop the process
|
||||
LOG.exception("There was an error removing the hdisk "
|
||||
"%(disk)s from Virtual I/O Server %(vios)s.",
|
||||
{'disk': device_name, 'vios': vio_wrap.name},
|
||||
instance=self.instance)
|
||||
|
||||
# Check if there are not multiple mapping for the device
|
||||
if not self._check_host_mappings(vio_wrap, device_name):
|
||||
name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
|
||||
self.stg_ftsk.add_post_execute(task.FunctorTask(
|
||||
rm_hdisk, name=name))
|
||||
else:
|
||||
LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
|
||||
"%(vios)s because it has existing storage mappings",
|
||||
{'disk': device_name, 'vios': vio_wrap.name},
|
||||
instance=self.instance)
|
||||
|
||||
def _check_host_mappings(self, vios_wrap, device_name):
|
||||
"""Checks if the given hdisk has multiple mappings
|
||||
|
||||
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
|
||||
from.
|
||||
:param device_name: The hdisk name to remove.
|
||||
:return: True if there are multiple instances using the given hdisk
|
||||
"""
|
||||
vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
|
||||
if v.uuid == vios_wrap.uuid)
|
||||
mappings = tsk_map.find_maps(
|
||||
vios_scsi_mappings, None,
|
||||
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
||||
|
||||
LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
|
||||
"%(vios)s", {'num': len(mappings), 'dev': device_name,
|
||||
'vios': vios_wrap.name}, instance=self.instance)
|
||||
# The mapping is still present as the task feed removes it later.
|
||||
return len(mappings) > 1
|
6
releasenotes/notes/powervm-vscsi-46c82559f082d4ed.yaml
Normal file
6
releasenotes/notes/powervm-vscsi-46c82559f082d4ed.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
The PowerVM virt driver now supports vSCSI Fibre Channel cinder volumes.
|
||||
PowerVM now supports attaching, detaching, and extending the size of vSCSI
|
||||
FC cinder volumes.
|
Loading…
Reference in New Issue
Block a user