diff --git a/doc/source/devref/usage.rst b/doc/source/devref/usage.rst index e812f00c..5e242462 100644 --- a/doc/source/devref/usage.rst +++ b/doc/source/devref/usage.rst @@ -118,6 +118,10 @@ Volume Options | | Cinder volumes should be attached to the Virtual Machine. | | | The options are: npiv or vscsi. | +--------------------------------------+------------------------------------------------------------+ +| vscsi_vios_connections_required = 1 | (IntOpt) Indicates a minimum number of Virtual I/O Servers | +| | that are required to support a Cinder volume attach with | +| | the vSCSI volume connector. | ++--------------------------------------+------------------------------------------------------------+ | ports_per_fabric = 1 | (IntOpt) (NPIV only) The number of physical ports that | | | should be connected directly to the Virtual Machine, per | | | fabric. | diff --git a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py b/nova_powervm/tests/virt/powervm/volume/test_vscsi.py index c70212da..6e1d8c54 100644 --- a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py +++ b/nova_powervm/tests/virt/powervm/volume/test_vscsi.py @@ -146,8 +146,11 @@ class TestVSCSIAdapter(BaseVSCSITest): @mock.patch('pypowervm.tasks.scsi_mapper.add_map') @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping') @mock.patch('pypowervm.tasks.hdisk.discover_hdisk') - def test_connect_volume_no_update(self, mock_disc_hdisk, mock_build_map, - mock_add_map): + @mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.' + '_validate_vios_on_connection') + def test_connect_volume_no_update( + self, mock_validate_vioses, mock_disc_hdisk, mock_build_map, + mock_add_map): """Make sure we don't do an actual update of the VIOS if not needed.""" # The mock return values mock_build_map.return_value = 'fake_map' @@ -159,6 +162,7 @@ class TestVSCSIAdapter(BaseVSCSITest): self.vol_drv.connect_volume() # As initialized above, remove_maps returns True to trigger update. + mock_validate_vioses.assert_called_with(1) self.assertEqual(1, mock_add_map.call_count) self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count) self.assertEqual(1, mock_disc_hdisk.call_count) @@ -166,8 +170,11 @@ class TestVSCSIAdapter(BaseVSCSITest): @mock.patch('pypowervm.tasks.hdisk.build_itls') @mock.patch('pypowervm.tasks.hdisk.lua_recovery') @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping') - def test_connect_volume_to_initiatiors(self, mock_add_vscsi_mapping, - mock_lua_recovery, mock_build_itls): + @mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.' + '_validate_vios_on_connection') + def test_connect_volume_to_initiatiors( + self, mock_validate_vioses, mock_add_vscsi_mapping, mock_lua_recovery, + mock_build_itls): """Tests that the connect w/out initiators throws errors.""" mock_lua_recovery.return_value = ( hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') @@ -175,10 +182,32 @@ class TestVSCSIAdapter(BaseVSCSITest): mock_instance = mock.Mock() mock_instance.system_metadata = {} + mock_validate_vioses.side_effect = p_exc.VolumeAttachFailed( + volume_id='1', reason='message', instance_name='inst') + mock_build_itls.return_value = [] self.assertRaises(p_exc.VolumeAttachFailed, self.vol_drv.connect_volume) + # Validate that the validate was called with no vioses. + mock_validate_vioses.assert_called_with(0) + + def test_validate_vios_on_connection(self): + # Happy path! + self.vol_drv._validate_vios_on_connection(1) + + # Raise if no VIOSes are found + self.assertRaises(p_exc.VolumeAttachFailed, + self.vol_drv._validate_vios_on_connection, 0) + + # Multi VIOS required happy path. + self.flags(vscsi_vios_connections_required=2, group='powervm') + self.vol_drv._validate_vios_on_connection(2) + + # Raise if multiple VIOSes required + self.assertRaises(p_exc.VolumeAttachFailed, + self.vol_drv._validate_vios_on_connection, 1) + @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps') diff --git a/nova_powervm/virt/powervm/volume/__init__.py b/nova_powervm/virt/powervm/volume/__init__.py index fe7f6863..35d28a15 100644 --- a/nova_powervm/virt/powervm/volume/__init__.py +++ b/nova_powervm/virt/powervm/volume/__init__.py @@ -34,7 +34,11 @@ vol_adapter_opts = [ default='nova_powervm.virt.powervm.volume.vscsi.' 'VscsiVolumeAdapter', help='Volume Adapter API to connect FC volumes through Virtual ' - 'I/O Server using PowerVM vSCSI connection mechanism') + 'I/O Server using PowerVM vSCSI connection mechanism'), + cfg.IntOpt('vscsi_vios_connections_required', default=1, + help='Indicates a minimum number of Virtual I/O Servers that ' + 'are required to support a Cinder volume attach with the ' + 'vSCSI volume connector.') ] CONF.register_opts(vol_adapter_opts, group='powervm') diff --git a/nova_powervm/virt/powervm/volume/vscsi.py b/nova_powervm/virt/powervm/volume/vscsi.py index 74feebae..2feb4b01 100644 --- a/nova_powervm/virt/powervm/volume/vscsi.py +++ b/nova_powervm/virt/powervm/volume/vscsi.py @@ -200,16 +200,44 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter): connect_volume_to_vio, provides='vio_modified', flag_update=False) ret = connect_ftsk.execute() - # If no valid hdisk was found, log and exit - if not any([result['vio_modified'] - for result in ret['wrapper_task_rets'].values()]): + # Check the number of VIOSes + vioses_modified = 0 + for result in ret['wrapper_task_rets'].values(): + if result['vio_modified']: + vioses_modified += 1 + self._validate_vios_on_connection(vioses_modified) + + def _validate_vios_on_connection(self, num_vioses_found): + """Validates that the correct number of VIOSes were discovered. + + Certain environments may have redundancy requirements. For PowerVM + this is achieved by having multiple Virtual I/O Servers. This method + will check to ensure that the operator's requirements for redundancy + have been met. If not, a specific error message will be raised. + + :param num_vioses_found: The number of VIOSes the hdisk was found on. + """ + # Is valid as long as the vios count exceeds the conf value. + if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required: + return + + # Should have a custom message based on zero or 'some but not enough' + # I/O Servers. + if num_vioses_found == 0: msg = (_('Failed to discover valid hdisk on any Virtual I/O ' 'Server for volume %(volume_id)s.') % {'volume_id': self.volume_id}) - LOG.error(msg) - ex_args = {'volume_id': self.volume_id, 'reason': msg, - 'instance_name': self.instance.name} - raise p_exc.VolumeAttachFailed(**ex_args) + else: + msg = (_('Failed to discover the hdisk on the required number of ' + 'Virtual I/O Servers. Volume %(volume_id)s required ' + '%(vios_req)d Virtual I/O Servers, but the disk was only ' + 'found on %(vios_act)d Virtual I/O Servers.') % + {'volume_id': self.volume_id, 'vios_act': num_vioses_found, + 'vios_req': CONF.powervm.vscsi_vios_connections_required}) + LOG.error(msg) + ex_args = {'volume_id': self.volume_id, 'reason': msg, + 'instance_name': self.instance.name} + raise p_exc.VolumeAttachFailed(**ex_args) def _disconnect_volume(self): """Disconnect the volume."""