Merge "Add iscsi support"

This commit is contained in:
Jenkins
2016-08-31 20:29:46 +00:00
committed by Gerrit Code Review
10 changed files with 828 additions and 304 deletions

View File

@@ -55,6 +55,11 @@ powervm_opts = [
default='localdisk',
help='The disk driver to use for PowerVM disks. '
'Valid options are: localdisk, ssp'),
cfg.StrOpt('volume_adapter',
choices=['fibre_channel', 'iscsi'], ignore_case=True,
default='fibre_channel',
help='The volume adapter to use for PowerVM volumes. '
'Valid options are: fibre_channel, iscsi'),
cfg.StrOpt('pvm_vswitch_for_novalink_io',
default='NovaLinkVEABridge',
help="Name of the PowerVM virtual switch to be used when "
@@ -119,6 +124,12 @@ vol_adapter_opts = [
'specified and at least one fabric_X_port_wwpns option '
'(where X corresponds to the fabric name) must be '
'specified.'),
cfg.StrOpt('network_attach_strategy',
choices=['iscsi'], ignore_case=True,
default='iscsi',
help='The iSCSI Volume Strategy defines how iSCSI Cinder '
'volumes should be attached to the Virtual Machine. The '
'option is: iscsi.'),
cfg.StrOpt('fc_npiv_adapter_api',
default='nova_powervm.virt.powervm.volume.npiv.'
'NPIVVolumeAdapter',
@@ -126,7 +137,7 @@ vol_adapter_opts = [
'connection mechanism.'),
cfg.StrOpt('fc_vscsi_adapter_api',
default='nova_powervm.virt.powervm.volume.vscsi.'
'VscsiVolumeAdapter',
'PVVscsiFCVolumeAdapter',
help='Volume Adapter API to connect FC volumes through Virtual '
'I/O Server using PowerVM vSCSI connection mechanism.'),
cfg.IntOpt('vscsi_vios_connections_required',

View File

@@ -72,17 +72,19 @@ class ComprehensiveScrub(fixtures.Fixture):
class VolumeAdapter(fixtures.Fixture):
"""Mock out the VolumeAdapter."""
def __init__(self, patch_class):
self.patch_class = patch_class
def setUp(self):
super(VolumeAdapter, self).setUp()
self.std_vol_adpt_fx = self.useFixture(
fixtures.MockPatch('nova_powervm.virt.powervm.volume.vscsi.'
'VscsiVolumeAdapter', __name__='MockVSCSI'))
fixtures.MockPatch(self.patch_class, __name__='MockVolumeAdapter'))
self.std_vol_adpt = self.std_vol_adpt_fx.mock
# We want to mock out the connection_info individually so it gives
# back a new mock on every call. That's because the vol id is
# used for task names and we can't have duplicates. Here we have
# just one mock for simplicity of the vol driver but we need
# mulitiple names.
# multiple names.
self.std_vol_adpt.return_value.connection_info.__getitem__\
.side_effect = mock.MagicMock
self.drv = self.std_vol_adpt.return_value

View File

@@ -50,6 +50,7 @@ from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm import live_migration as lpm
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm import volume as vol_attach
LOG = logging.getLogger(__name__)
logging.basicConfig()
@@ -101,8 +102,12 @@ class TestPowerVMDriver(test.TestCase):
self._setup_lpm()
self.disk_dvr = self.drv.disk_dvr
self.vol_fix = self.useFixture(fx.VolumeAdapter())
self.vol_fix = self.useFixture(fx.VolumeAdapter(
'nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter'))
self.vol_drv = self.vol_fix.drv
self.iscsi_vol_fix = self.useFixture(fx.VolumeAdapter(
'nova_powervm.virt.powervm.volume.iscsi.IscsiVolumeAdapter'))
self.iscsi_vol_drv = self.iscsi_vol_fix.drv
self.crt_lpar = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.vm.crt_lpar')).mock
@@ -156,6 +161,15 @@ class TestPowerVMDriver(test.TestCase):
self.lpm_inst.uuid = 'inst1'
self.drv.live_migrations = {'inst1': self.lpm}
def _vol_drv_maps(self):
VOLUME_DRIVER_MAPPINGS = {
'fibre_channel': vol_attach.FC_STRATEGY_MAPPING[
driver.CONF.powervm.fc_attach_strategy.lower()],
'iscsi': vol_attach.NETWORK_STRATEGY_MAPPING[
driver.CONF.powervm.network_attach_strategy.lower()],
}
return VOLUME_DRIVER_MAPPINGS
def test_driver_create(self):
"""Validates that a driver of the PowerVM type can be initialized."""
test_drv = driver.PowerVMDriver(fake.FakeVirtAPI())
@@ -168,10 +182,16 @@ class TestPowerVMDriver(test.TestCase):
def test_get_volume_connector(self):
"""Tests that a volume connector can be built."""
self.flags(volume_adapter='fibre_channel', group='powervm')
vol_connector = self.drv.get_volume_connector(mock.Mock())
self.assertIsNotNone(vol_connector['wwpns'])
self.assertIsNotNone(vol_connector['host'])
self.flags(volume_adapter='iscsi', group='powervm')
vol_connector = self.drv.get_volume_connector(mock.Mock())
self.assertIsNotNone(vol_connector['initiator'])
def test_setup_disk_adapter(self):
# Ensure we can handle upper case option and we instantiate the class
self.flags(disk_driver='LoCaLDisK', group='powervm')
@@ -839,7 +859,13 @@ class TestPowerVMDriver(test.TestCase):
ret = self.drv._is_booted_from_volume(None)
self.assertFalse(ret)
def test_get_inst_xag(self):
@mock.patch('nova_powervm.virt.powervm.driver.VOLUME_DRIVER_MAPPINGS')
def test_get_inst_xag(self, mock_mapping):
def getitem(name):
return self._vol_drv_maps()[name]
mock_mapping.__getitem__.side_effect = getitem
self.flags(volume_adapter='fibre_channel', group='powervm')
# No volumes - should be just the SCSI mapping
xag = self.drv._get_inst_xag(mock.Mock(), None)
self.assertEqual([pvm_const.XAG.VIO_SMAP], xag)
@@ -851,6 +877,7 @@ class TestPowerVMDriver(test.TestCase):
# The NPIV volume attach - requires SCSI, Storage and FC Mapping
self.flags(fc_attach_strategy='npiv', group='powervm')
mock_mapping.return_value = self._vol_drv_maps()
xag = self.drv._get_inst_xag(mock.Mock(), [mock.Mock()])
self.assertEqual({pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP,
@@ -858,6 +885,13 @@ class TestPowerVMDriver(test.TestCase):
# The vSCSI Volume attach - Ensure case insensitive.
self.flags(fc_attach_strategy='VSCSI', group='powervm')
mock_mapping.return_value = self._vol_drv_maps()
xag = self.drv._get_inst_xag(mock.Mock(), [mock.Mock()])
self.assertEqual([pvm_const.XAG.VIO_SMAP], xag)
# The iSCSI volume attach - only nees the SCSI mapping.
self.flags(volume_adapter='iscsi', group='powervm')
mock_mapping.return_value = self._vol_drv_maps()
xag = self.drv._get_inst_xag(mock.Mock(), [mock.Mock()])
self.assertEqual([pvm_const.XAG.VIO_SMAP], xag)

View File

@@ -0,0 +1,173 @@
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova_powervm import conf as cfg
from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol
from nova_powervm.virt.powervm.volume import iscsi
from pypowervm import const as pvm_const
from pypowervm.tasks import hdisk
from pypowervm.tests.tasks.util import load_file
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
CONF = cfg.CONF
VIOS_FEED = 'fake_vios_feed2.txt'
class TestISCSIAdapter(test_vol.TestVolumeAdapter):
"""Tests the vSCSI Volume Connector Adapter. Single VIOS tests"""
def setUp(self):
super(TestISCSIAdapter, self).setUp()
self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.vios_feed_resp = load_file(VIOS_FEED)
self.feed = pvm_vios.VIOS.wrap(self.vios_feed_resp)
self.ft_fx = pvm_fx.FeedTaskFx(self.feed)
self.useFixture(self.ft_fx)
self.adpt.read.return_value = self.vios_feed_resp
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.getter')
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.tasks.hdisk.discover_iscsi_initiator')
def init_vol_adpt(mock_initiator, mock_mgmt_part, mock_pvm_uuid,
mock_getter):
con_info = {
'data': {
'target_iqn': 'iqn.2016-08.bar.foo:target',
'target_lun': '1',
'target_portal': '10.0.0.1',
'auth_username': 'user',
'auth_password': 'password',
'volume_id': 'f042c68a-c5a5-476a-ba34-2f6d43f4226c'
},
}
mock_inst = mock.MagicMock()
mock_pvm_uuid.return_value = '1234'
mock_initiator.return_value = 'initiatior iqn'
# The getter can just return the VIOS values (to remove a read
# that would otherwise need to be mocked).
mock_getter.return_value = self.feed
return iscsi.IscsiVolumeAdapter(self.adpt, 'host_uuid', mock_inst,
con_info)
self.vol_drv = init_vol_adpt()
# setup system_metadata tests
self.devname = "/dev/fake"
self.slot_mgr = mock.Mock()
self.slot_mgr.build_map.get_vscsi_slot.return_value = 62, 'the_lua'
@mock.patch('pypowervm.tasks.hdisk.discover_iscsi')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.hdisk.lua_recovery')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_connect_volume(self, mock_get_vm_id, mock_lua_recovery,
mock_build_map, mock_add_map, mock_discover):
# The mock return values
mock_lua_recovery.return_value = (
hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid')
mock_get_vm_id.return_value = '2'
mock_discover.return_value = '/dev/fake'
def build_map_func(host_uuid, vios_w, lpar_uuid, pv,
lpar_slot_num=None, lua=None, target_name=None):
self.assertEqual('host_uuid', host_uuid)
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual('1234', lpar_uuid)
self.assertIsInstance(pv, pvm_stor.PV)
self.assertEqual(62, lpar_slot_num)
self.assertEqual('the_lua', lua)
self.assertEqual('ISCSI-target', target_name)
return 'fake_map'
mock_build_map.side_effect = build_map_func
# Run the method
self.vol_drv.connect_volume(self.slot_mgr)
# As initialized above, remove_maps returns True to trigger update.
self.assertEqual(1, mock_add_map.call_count)
self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count)
self.assertEqual(1, mock_build_map.call_count)
@mock.patch('pypowervm.tasks.hdisk.remove_hdisk')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid')
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_disconnect_volume(self, mock_get_vm_id, mock_remove_maps,
mock_hdisk_from_uuid, mock_remove_hdisk):
# The mock return values
mock_hdisk_from_uuid.return_value = 'device_name'
mock_get_vm_id.return_value = 'partition_id'
self.vol_drv._set_devname('/dev/fake')
def validate_remove_maps(vios_w, vm_uuid, match_func):
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual('partition_id', vm_uuid)
return 'removed'
mock_remove_maps.side_effect = validate_remove_maps
# Run the method
self.vol_drv.disconnect_volume(self.slot_mgr)
# As initialized above, remove_maps returns True to trigger update.
self.assertEqual(1, mock_remove_maps.call_count)
self.assertEqual(1, self.ft_fx.patchers['update'].mock.call_count)
def test_min_xags(self):
xags = self.vol_drv.min_xags()
self.assertEqual(1, len(xags))
self.assertIn(pvm_const.XAG.VIO_SMAP, xags)
def test_vol_type(self):
self.assertEqual('iscsi', self.vol_drv.vol_type())
def test_set_devname(self):
# Mock connection info
self.vol_drv.connection_info['data'][iscsi.DEVNAME_KEY] = None
# Set the Device Name
self.vol_drv._set_devname(self.devname)
# Verify
dev_name = self.vol_drv.connection_info['data'][iscsi.DEVNAME_KEY]
self.assertEqual(self.devname, dev_name)
def test_get_devname(self):
# Set the value to retrieve
self.vol_drv.connection_info['data'][iscsi.DEVNAME_KEY] = self.devname
retrieved_devname = self.vol_drv._get_devname()
# Check key found
self.assertEqual(self.devname, retrieved_devname)
# Check key not found
self.vol_drv.connection_info['data'].pop(iscsi.DEVNAME_KEY)
retrieved_devname = self.vol_drv._get_devname()
# Check key not found
self.assertIsNone(retrieved_devname)

View File

@@ -19,6 +19,7 @@ import mock
from nova_powervm import conf as cfg
from nova_powervm.tests.virt.powervm.volume import test_driver as test_vol
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.volume import volume
from nova_powervm.virt.powervm.volume import vscsi
from pypowervm import const as pvm_const
@@ -78,8 +79,8 @@ class BaseVSCSITest(test_vol.TestVolumeAdapter):
# that would otherwise need to be mocked).
mock_getter.return_value = self.feed
return vscsi.VscsiVolumeAdapter(self.adpt, 'host_uuid', mock_inst,
con_info)
return vscsi.PVVscsiFCVolumeAdapter(self.adpt, 'host_uuid',
mock_inst, con_info)
self.vol_drv = init_vol_adpt()
@@ -124,7 +125,7 @@ class TestVSCSIAdapter(BaseVSCSITest):
# Bad path. udid not found
# Run the method - this should produce a warning
with self.assertLogs(vscsi.__name__, 'WARNING'):
with self.assertLogs(volume.__name__, 'WARNING'):
self.vol_drv._cleanup_volume(None)
# Good path
@@ -204,7 +205,7 @@ class TestVSCSIAdapter(BaseVSCSITest):
mock_get_vm_id.return_value = 'partition_id'
def build_map_func(host_uuid, vios_w, lpar_uuid, pv,
lpar_slot_num=None, lua=None):
lpar_slot_num=None, lua=None, target_name=None):
self.assertEqual('host_uuid', host_uuid)
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual('1234', lpar_uuid)
@@ -226,8 +227,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.hdisk.discover_hdisk')
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.'
'_validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter'
'._validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_connect_volume_no_update(
self, mock_get_vm_id, mock_validate_vioses, mock_disc_hdisk,
@@ -251,8 +252,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
@mock.patch('pypowervm.tasks.hdisk.build_itls')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.'
'_validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.PVVscsiFCVolumeAdapter'
'._validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_connect_volume_to_initiators(
self, mock_get_vm_id, mock_validate_vioses, mock_add_vscsi_mapping,
@@ -547,7 +548,7 @@ class TestVSCSIAdapterMultiVIOS(BaseVSCSITest):
mock_vm_id.return_value = 'partition_id'
def build_map_func(host_uuid, vios_w, lpar_uuid, pv,
lpar_slot_num=None, lua=None):
lpar_slot_num=None, lua=None, target_name=None):
self.assertEqual('host_uuid', host_uuid)
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual('1234', lpar_uuid)

View File

@@ -74,11 +74,13 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# Defines, for all cinder volume types, which volume driver to use. Currently
# only supports Fibre Channel, which has multiple options for connections.
# The connection strategy is defined above.
# only supports Fibre Channel, which has multiple options for connections, and
# iSCSI.
VOLUME_DRIVER_MAPPINGS = {
'fibre_channel': vol_attach.FC_STRATEGY_MAPPING[
CONF.powervm.fc_attach_strategy]
CONF.powervm.fc_attach_strategy.lower()],
'iscsi': vol_attach.NETWORK_STRATEGY_MAPPING[
CONF.powervm.network_attach_strategy.lower()],
}
DISK_ADPT_NS = 'nova_powervm.virt.powervm.disk'
@@ -1132,15 +1134,14 @@ class PowerVMDriver(driver.ComputeDriver):
# The WWPNs in case of FC connection.
if vol_drv is not None:
# Override the host name.
# TODO(IBM) See if there is a way to support a FC host name that
# is independent of overall host name.
connector['host'] = vol_drv.host_name()
# Set the WWPNs
wwpn_list = vol_drv.wwpns()
if wwpn_list is not None:
connector["wwpns"] = wwpn_list
if CONF.powervm.volume_adapter.lower() == "fibre_channel":
# Set the WWPNs
wwpn_list = vol_drv.wwpns()
if wwpn_list is not None:
connector["wwpns"] = wwpn_list
connector['host'] = vol_drv.host_name()
connector['initiator'] = vol_drv.host_name()
return connector
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -1824,10 +1825,8 @@ class PowerVMDriver(driver.ComputeDriver):
{'inst': instance.name,
'xags': ','.join(xags)})
return list(xags)
# If we have any volumes, add the volumes required mapping XAGs.
adp_type = vol_attach.FC_STRATEGY_MAPPING[
CONF.powervm.fc_attach_strategy.lower()]
adp_type = VOLUME_DRIVER_MAPPINGS[CONF.powervm.volume_adapter]
vol_cls = importutils.import_class(adp_type)
xags.update(set(vol_cls.min_xags()))
LOG.debug('Instance XAGs for VM %(inst)s is %(xags)s.',
@@ -1856,8 +1855,7 @@ class PowerVMDriver(driver.ComputeDriver):
the adapter based on the connection-type of
connection_info.
"""
adp_type = vol_attach.FC_STRATEGY_MAPPING[
CONF.powervm.fc_attach_strategy]
adp_type = VOLUME_DRIVER_MAPPINGS[CONF.powervm.volume_adapter]
vol_cls = importutils.import_class(adp_type)
if conn_info:
LOG.debug('Volume Adapter returned for connection_info=%s' %
@@ -1883,14 +1881,20 @@ class PowerVMDriver(driver.ComputeDriver):
if self._is_booted_from_volume(block_device_info) and bdms is not None:
for bdm in bdms:
if bdm.get('boot_index') == 0:
conn_info = bdm.get('connection_info')
connectivity_type = conn_info['data']['connection-type']
boot_conn_type = ('vscsi' if connectivity_type ==
'pv_vscsi' else connectivity_type)
return boot_conn_type
return self._get_connectivity_type(bdm)
else:
return boot_conn_type
def _get_connectivity_type(self, bdm):
conn_info = bdm.get('connection_info')
if 'connection-type' in conn_info['data']:
connectivity_type = conn_info['data']['connection-type']
boot_conn_type = ('vscsi' if connectivity_type == 'pv_vscsi' else
connectivity_type)
elif 'driver_volume_type' in conn_info['data']:
boot_conn_type = conn_info['data']['driver_volume_type']
return boot_conn_type
class NovaEventHandler(pvm_apt.RawEventHandler):
"""Used to receive and handle events from PowerVM."""

View File

@@ -23,3 +23,6 @@ FC_STRATEGY_MAPPING = {
'npiv': CONF.powervm.fc_npiv_adapter_api,
'vscsi': CONF.powervm.fc_vscsi_adapter_api
}
NETWORK_STRATEGY_MAPPING = {
'iscsi': 'nova_powervm.virt.powervm.volume.iscsi.IscsiVolumeAdapter'
}

View File

@@ -0,0 +1,221 @@
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
from oslo_log import log as logging
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from nova_powervm.virt.powervm.volume import volume as volume
from pypowervm import const as pvm_const
from pypowervm.tasks import hdisk
from pypowervm.tasks import partition
from pypowervm.utils import transaction as tx
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEVNAME_KEY = 'target_devname'
class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
v_driver.PowerVMVolumeAdapter):
"""The iSCSI implementation of the Volume Adapter.
This driver will connect a volume to a VM. First using iSCSI to connect the
volume to the I/O Host (NovaLink partition). Then using the PowerVM vSCSI
technology to host it to the VM itself.
"""
def __init__(self, adapter, host_uuid, instance, connection_info,
stg_ftsk=None):
super(IscsiVolumeAdapter, self).__init__(
adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk)
# iSCSI volumes are assumed to be on the novalink partition
mgmt_part = partition.get_mgmt_partition(adapter)
vios_uuid = mgmt_part.uuid
self.initiator_name = hdisk.discover_iscsi_initiator(
adapter, vios_uuid)
@classmethod
def vol_type(cls):
"""The type of volume supported by this type."""
return 'iscsi'
def host_name(self):
return self.initiator_name
@classmethod
def min_xags(cls):
"""List of pypowervm XAGs needed to support this adapter."""
return [pvm_const.XAG.VIO_SMAP]
def pre_live_migration_on_destination(self, mig_data):
"""Perform pre live migration steps for the volume on the target host.
This method performs any pre live migration that is needed.
Certain volume connectors may need to pass data from the source host
to the target. This may be required to determine how volumes connect
through the Virtual I/O Servers.
This method will be called after the pre_live_migration_on_source
method. The data from the pre_live call will be passed in via the
mig_data. This method should put its output into the dest_mig_data.
:param mig_data: Dict of migration data for the destination server.
If the volume connector needs to provide
information to the live_migration command, it
should be added to this dictionary.
"""
raise NotImplementedError()
def _connect_volume_to_vio(self, vios_w, slot_mgr):
"""Attempts to connect a volume to a given VIO.
:param vios_w: The Virtual I/O Server wrapper to connect to.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM
:return: True if the volume was connected. False if the volume was
not (could be the Virtual I/O Server does not have
connectivity to the hdisk).
"""
host_ip = self.connection_info["data"]["target_portal"]
iqn = self.connection_info["data"]["target_iqn"]
password = self.connection_info["data"]["auth_password"]
user = self.connection_info["data"]["auth_username"]
target_name = "ISCSI-" + iqn.split(":")[1]
device_name = hdisk.discover_iscsi(
self.adapter, host_ip, user, password, iqn, vios_w.uuid)
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, device_name)
if device_name is not None:
device_name = '/dev/' + device_name
# Found a hdisk on this Virtual I/O Server. Add the action to
# map it to the VM when the stg_ftsk is executed.
with lockutils.lock(hash(self)):
self._add_append_mapping(
vios_w.uuid, device_name, lpar_slot_num=slot, lua=lua,
target_name=target_name)
# Save the devname for the disk in the connection info. It is
# used for the detach.
self._set_devname(device_name)
self._set_udid(target_name)
LOG.debug('Device attached: %s', device_name)
# Valid attachment
return True
return False
def _disconnect_volume(self, slot_mgr):
"""Disconnect the volume.
This is the actual method to implement within the subclass. Some
transaction maintenance is done by the parent class.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM
"""
def discon_vol_for_vio(vios_w):
"""Removes the volume from a specific Virtual I/O Server.
:param vios_w: The VIOS wrapper.
:return: True if a remove action was done against this VIOS. False
otherwise.
"""
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid))
device_name = None
try:
device_name = self._get_devname()
if not device_name:
# We lost our bdm data.
# If we have no device name, at this point
# we should not continue. Subsequent scrub code on future
# deploys will clean this up.
LOG.warning(_LW(
"Disconnect Volume: The backing hdisk for volume "
"%(volume_id)s on Virtual I/O Server %(vios)s is "
"not in a valid state. No disconnect "
"actions to be taken as volume is not healthy."),
{'volume_id': self.volume_id, 'vios': vios_w.name})
return False
except Exception as e:
LOG.warning(_LW(
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s."
" Error: %(error)s"),
{'error': e, 'vios_name': vios_w.name,
'volume_id': self.volume_id})
return False
# We have found the device name
LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s."),
{'volume_id': self.volume_id,
'vios_name': vios_w.name, 'hdisk': device_name})
# Add the action to remove the mapping when the stg_ftsk is run.
partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
with lockutils.lock(hash(self)):
self._add_remove_mapping(partition_id, vios_w.uuid,
device_name, slot_mgr)
# TODO(Taylor): Logout of iSCSI device
# Found a valid element to remove
return True
try:
# See logic in _connect_volume for why this new FeedTask is here.
discon_ftsk = tx.FeedTask(
'discon_volume_from_vio', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
# Find hdisks to disconnect
discon_ftsk.add_functor_subtask(
discon_vol_for_vio, provides='vio_modified', flag_update=False)
ret = discon_ftsk.execute()
# Warn if no hdisks disconnected.
if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]):
LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
"volume %(volume_id)s on ANY of the Virtual "
"I/O Servers for instance %(inst)s."),
{'inst': self.instance.name,
'volume_id': self.volume_id})
except Exception as e:
LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
self.vm_uuid)
LOG.exception(_LE('Error: %s'), e)
ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e),
'instance_name': self.instance.name}
raise p_exc.VolumeDetachFailed(**ex_args)

View File

@@ -0,0 +1,306 @@
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from taskflow import task
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from pypowervm import const as pvm_const
from pypowervm.tasks import client_storage as pvm_c_stor
from pypowervm.tasks import hdisk
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as tx
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
UDID_KEY = 'target_UDID'
DEVNAME_KEY = 'target_devname'
class VscsiVolumeAdapter(object):
"""VscsiVolumeAdapter that connects a Cinder volume to a VM.
This volume adapter is a generic adapter for volume types that use PowerVM
vSCSI to host the volume to the VM.
"""
def _connect_volume(self, slot_mgr):
"""Connects the volume.
:param connect_volume_to_vio: Function to connect a volume to the vio.
:param vios_w: Vios wrapper.
:return: True if mapping was created.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM
"""
# Its about to get weird. The transaction manager has a list of
# VIOSes. We could use those, but they only have SCSI mappings (by
# design). They do not have storage (super expensive).
#
# We need the storage xag when we are determining which mappings to
# add to the system. But we don't want to tie it to the stg_ftsk. If
# we do, every retry, every etag gather, etc... takes MUCH longer.
#
# So we get the VIOSes with the storage xag here, separately, to save
# the stg_ftsk from potentially having to run it multiple times.
connect_ftsk = tx.FeedTask(
'connect_volume_to_vio', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP]))
# Find valid hdisks and map to VM.
connect_ftsk.add_functor_subtask(
self._connect_volume_to_vio, slot_mgr, provides='vio_modified',
flag_update=False)
ret = connect_ftsk.execute()
# Check the number of VIOSes
vioses_modified = 0
for result in ret['wrapper_task_rets'].values():
if result['vio_modified']:
vioses_modified += 1
partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
# Update the slot information
def set_slot_info():
vios_wraps = self.stg_ftsk.feed
for vios_w in vios_wraps:
scsi_map = pvm_c_stor.udid_to_scsi_mapping(
vios_w, self._get_udid(), partition_id)
if not scsi_map:
continue
slot_mgr.register_vscsi_mapping(scsi_map)
self._validate_vios_on_connection(vioses_modified)
self.stg_ftsk.add_post_execute(task.FunctorTask(
set_slot_info, name='hdisk_slot_%s' % self._get_udid()))
def _validate_vios_on_connection(self, num_vioses_found):
"""Validates that the correct number of VIOSes were discovered.
Certain environments may have redundancy requirements. For PowerVM
this is achieved by having multiple Virtual I/O Servers. This method
will check to ensure that the operator's requirements for redundancy
have been met. If not, a specific error message will be raised.
:param num_vioses_found: The number of VIOSes the hdisk was found on.
"""
# Is valid as long as the vios count exceeds the conf value.
if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required:
return
# Should have a custom message based on zero or 'some but not enough'
# I/O Servers.
if num_vioses_found == 0:
msg = (_('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
{'volume_id': self.volume_id})
else:
msg = (_('Failed to discover the hdisk on the required number of '
'Virtual I/O Servers. Volume %(volume_id)s required '
'%(vios_req)d Virtual I/O Servers, but the disk was only '
'found on %(vios_act)d Virtual I/O Servers.') %
{'volume_id': self.volume_id, 'vios_act': num_vioses_found,
'vios_req': CONF.powervm.vscsi_vios_connections_required})
LOG.error(msg)
ex_args = {'volume_id': self.volume_id, 'reason': msg,
'instance_name': self.instance.name}
raise p_exc.VolumeAttachFailed(**ex_args)
def _add_append_mapping(self, vios_uuid, device_name, lpar_slot_num=None,
lua=None, target_name=None):
"""Update the stg_ftsk to append the mapping to the VIOS.
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The The hdisk device name.
:param lpar_slot_num: (Optional, Default:None) If specified, the client
lpar slot number to use on the mapping. If left
as None, it will use the next available slot
number.
:param lua: (Optional. Default: None) Logical Unit Address to set on
the TargetDevice. If None, the LUA will be assigned by the
server. Should be specified for all of the VSCSIMappings
for a particular bus, or none of them.
"""
def add_func(vios_w):
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name,
'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name, target_name)
v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv,
lpar_slot_num=lpar_slot_num, lua=lua, target_name=target_name)
return tsk_map.add_map(vios_w, v_map)
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
def _get_udid(self):
"""This method will return the hdisk udid stored in connection_info.
:return: The target_udid associated with the hdisk
"""
try:
return self.connection_info['data'][UDID_KEY]
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
'volume id %s'), self.volume_id)
return None
def _set_udid(self, udid):
"""This method will set the hdisk udid in the connection_info.
:param udid: The hdisk target_udid to be stored in system_metadata
"""
self.connection_info['data'][UDID_KEY] = udid
def _get_devname(self):
"""This method will return the hdisk devname stored in connection_info.
:return: The target_devname associated with the hdisk
"""
try:
return self.connection_info['data'][DEVNAME_KEY]
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
'volume id %s'), self.volume_id)
return None
def _set_devname(self, devname):
"""This method will set the hdisk devname in the connection_info.
:param devname: The hdisk target_devname to be stored in
system_metadata
"""
self.connection_info['data'][DEVNAME_KEY] = devname
def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name, slot_mgr):
"""Adds a transaction to remove the storage mapping.
:param vm_uuid: The UUID of the VM instance
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The The hdisk device name.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM.
"""
def rm_func(vios_w):
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name, 'vm': vm_uuid})
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
for rm_map in removed_maps:
slot_mgr.drop_vscsi_mapping(rm_map)
return removed_maps
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
def _add_remove_hdisk(self, vio_wrap, device_name,
stg_ftsk=None):
"""Adds a post-mapping task to remove the hdisk from the VIOS.
This removal is only done after the mapping updates have completed.
This method is also used during migration to remove hdisks that remain
on the source host after the VM is migrated to the destination.
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
"""
def rm_hdisk():
LOG.info(_LI("Running remove for hdisk: '%s'") % device_name)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid)
except Exception as e:
# If there is a failure, log it, but don't stop the process
LOG.warning(_LW("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server."),
{'disk': device_name})
LOG.warning(e)
# Check if there are not multiple mapping for the device
if not self._check_host_mappings(vio_wrap, device_name):
name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
stg_ftsk = stg_ftsk or self.stg_ftsk
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
else:
LOG.info(_LI("hdisk %(disk)s is not removed because it has "
"existing storage mappings"), {'disk': device_name})
def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
:return: True is there are multiple instances using the given hdisk
"""
vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
if v.uuid == vios_wrap.uuid)
mappings = tsk_map.find_maps(
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info(_LI("%(num)d Storage Mappings found for %(dev)s"),
{'num': len(mappings), 'dev': device_name})
# the mapping is still present as the task feed removes
# the mapping later
return len(mappings) > 1
def _cleanup_volume(self, udid=None, devname=None):
"""Cleanup the hdisk associated with this udid."""
if not udid and not devname:
LOG.warning(_LW('Could not remove hdisk for volume: %s')
% self.volume_id)
return
LOG.info(_LI('Removing hdisk for udid: %s') % udid)
def find_hdisk_to_remove(vios_w):
if devname is None:
device_name = vios_w.hdisk_from_uuid(udid)
else:
device_name = devname
if device_name is None:
return
LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
{'hdisk': device_name, 'vios': vios_w.name})
self._add_remove_hdisk(vios_w, device_name,
stg_ftsk=rmv_hdisk_ftsk)
# Create a feed task to get the vios, find the hdisk and remove it.
rmv_hdisk_ftsk = tx.FeedTask(
'find_hdisk_to_remove', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
# Find vios hdisks for this udid to remove.
rmv_hdisk_ftsk.add_functor_subtask(
find_hdisk_to_remove, flag_update=False)
rmv_hdisk_ftsk.execute()

View File

@@ -16,28 +16,23 @@
from oslo_concurrency import lockutils
from oslo_log import log as logging
from taskflow import task
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from nova_powervm.virt.powervm.volume import volume as volume
from pypowervm import const as pvm_const
from pypowervm.tasks import client_storage as pvm_c_stor
from pypowervm.tasks import hdisk
from pypowervm.tasks import partition as pvm_tpar
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as tx
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -47,9 +42,13 @@ UDID_KEY = 'target_UDID'
_vscsi_pfc_wwpns = None
class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
v_driver.FibreChannelVolumeAdapter):
"""The vSCSI implementation of the Volume Adapter.
For physical volumes, hosted to the VIOS through Fibre Channel, that
connect to the VMs with vSCSI.
vSCSI is the internal mechanism to link a given hdisk on the Virtual
I/O Server to a Virtual Machine. This volume driver will take the
information from the driver and link it to a given virtual machine.
@@ -70,7 +69,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
the FeedTask is not provided, the updates will be run
immediately when the respective method is executed.
"""
super(VscsiVolumeAdapter, self).__init__(
super(PVVscsiFCVolumeAdapter, self).__init__(
adapter, host_uuid, instance, connection_info, stg_ftsk=stg_ftsk)
self._pfc_wwpns = None
@@ -128,34 +127,6 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
instance_name=self.instance.name)
raise p_exc.VolumePreMigrationFailed(**ex_args)
def _cleanup_volume(self, udid):
"""Cleanup the hdisk associated with this udid."""
if not udid:
LOG.warning(_LW('Could not remove hdisk for volume: %s')
% self.volume_id)
return
LOG.info(_LI('Removing hdisk for udid: %s') % udid)
def find_hdisk_to_remove(vios_w):
device_name = vios_w.hdisk_from_uuid(udid)
if device_name is None:
return
LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
{'hdisk': device_name, 'vios': vios_w.name})
self._add_remove_hdisk(vios_w, device_name,
stg_ftsk=rmv_hdisk_ftsk)
# Create a feed task to get the vios, find the hdisk and remove it.
rmv_hdisk_ftsk = tx.FeedTask(
'find_hdisk_to_remove', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
# Find vios hdisks for this udid to remove.
rmv_hdisk_ftsk.add_functor_subtask(
find_hdisk_to_remove, flag_update=False)
rmv_hdisk_ftsk.execute()
def post_live_migration_at_source(self, migrate_data):
"""Performs post live migration for the volume on the source host.
@@ -230,124 +201,45 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return status, device_name, udid
def _connect_volume(self, slot_mgr):
"""Connects the volume.
def _connect_volume_to_vio(self, vios_w, slot_mgr):
"""Attempts to connect a volume to a given VIO.
:param slot_mgr: A NovaSlotManager. Used to store/retrieve the client
slots used when a volume is attached to the VM
:param vios_w: The Virtual I/O Server wrapper to connect to.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM
:return: True if the volume was connected. False if the volume was
not (could be the Virtual I/O Server does not have
connectivity to the hdisk).
"""
status, device_name, udid = self._discover_volume_on_vios(
vios_w, self.volume_id)
def connect_volume_to_vio(vios_w):
"""Attempts to connect a volume to a given VIO.
:param vios_w: The Virtual I/O Server wrapper to connect to.
:return: True if the volume was connected. False if the volume was
not (could be the Virtual I/O Server does not have
connectivity to the hdisk).
"""
status, device_name, udid = self._discover_volume_on_vios(
vios_w, self.volume_id)
# Get the slot and LUA to assign.
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)
if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with UDID %s on VIOS %s on the '
'rebuild that did not exist on the source. '
'Ignoring.', udid, vios_w.uuid)
return False
if hdisk.good_discovery(status, device_name):
# Found a hdisk on this Virtual I/O Server. Add the action to
# map it to the VM when the stg_ftsk is executed.
with lockutils.lock(hash(self)):
self._add_append_mapping(vios_w.uuid, device_name,
lpar_slot_num=slot, lua=lua)
# Save the UDID for the disk in the connection info. It is
# used for the detach.
self._set_udid(udid)
LOG.debug('Device attached: %s', device_name)
# Valid attachment
return True
# Get the slot and LUA to assign.
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)
if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with UDID %s on VIOS %s on the '
'rebuild that did not exist on the source. '
'Ignoring.', udid, vios_w.uuid)
return False
# Its about to get weird. The transaction manager has a list of
# VIOSes. We could use those, but they only have SCSI mappings (by
# design). They do not have storage (super expensive).
#
# We need the storage xag when we are determining which mappings to
# add to the system. But we don't want to tie it to the stg_ftsk. If
# we do, every retry, every etag gather, etc... takes MUCH longer.
#
# So we get the VIOSes with the storage xag here, separately, to save
# the stg_ftsk from potentially having to run it multiple times.
connect_ftsk = tx.FeedTask(
'connect_volume_to_vio', pvm_vios.VIOS.getter(
self.adapter, xag=[pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP]))
if hdisk.good_discovery(status, device_name):
# Found a hdisk on this Virtual I/O Server. Add the action to
# map it to the VM when the stg_ftsk is executed.
with lockutils.lock(hash(self)):
self._add_append_mapping(vios_w.uuid, device_name,
lpar_slot_num=slot, lua=lua)
# Find valid hdisks and map to VM.
connect_ftsk.add_functor_subtask(
connect_volume_to_vio, provides='vio_modified', flag_update=False)
# Save the UDID for the disk in the connection info. It is
# used for the detach.
self._set_udid(udid)
LOG.debug('Device attached: %s', device_name)
ret = connect_ftsk.execute()
# Valid attachment
return True
# Check the number of VIOSes
vioses_modified = 0
for result in ret['wrapper_task_rets'].values():
if result['vio_modified']:
vioses_modified += 1
partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
# Update the slot information
def set_slot_info():
vios_wraps = self.stg_ftsk.feed
for vios_w in vios_wraps:
scsi_map = pvm_c_stor.udid_to_scsi_mapping(
vios_w, self._get_udid(), partition_id)
if not scsi_map:
continue
slot_mgr.register_vscsi_mapping(scsi_map)
self._validate_vios_on_connection(vioses_modified)
self.stg_ftsk.add_post_execute(task.FunctorTask(
set_slot_info, name='hdisk_slot_%s' % self._get_udid()))
def _validate_vios_on_connection(self, num_vioses_found):
"""Validates that the correct number of VIOSes were discovered.
Certain environments may have redundancy requirements. For PowerVM
this is achieved by having multiple Virtual I/O Servers. This method
will check to ensure that the operator's requirements for redundancy
have been met. If not, a specific error message will be raised.
:param num_vioses_found: The number of VIOSes the hdisk was found on.
"""
# Is valid as long as the vios count exceeds the conf value.
if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required:
return
# Should have a custom message based on zero or 'some but not enough'
# I/O Servers.
if num_vioses_found == 0:
msg = (_('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
{'volume_id': self.volume_id})
else:
msg = (_('Failed to discover the hdisk on the required number of '
'Virtual I/O Servers. Volume %(volume_id)s required '
'%(vios_req)d Virtual I/O Servers, but the disk was only '
'found on %(vios_act)d Virtual I/O Servers.') %
{'volume_id': self.volume_id, 'vios_act': num_vioses_found,
'vios_req': CONF.powervm.vscsi_vios_connections_required})
LOG.error(msg)
ex_args = {'volume_id': self.volume_id, 'reason': msg,
'instance_name': self.instance.name}
raise p_exc.VolumeAttachFailed(**ex_args)
return False
def _disconnect_volume(self, slot_mgr):
"""Disconnect the volume.
@@ -446,62 +338,6 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
'instance_name': self.instance.name}
raise p_exc.VolumeDetachFailed(**ex_args)
def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
:return: True is there are multiple instances using the given hdisk
"""
vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
if v.uuid == vios_wrap.uuid)
mappings = tsk_map.find_maps(
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info(_LI("%(num)d Storage Mappings found for %(dev)s"),
{'num': len(mappings), 'dev': device_name})
# the mapping is still present as the task feed removes
# the mapping later
return len(mappings) > 1
def _add_remove_hdisk(self, vio_wrap, device_name,
stg_ftsk=None):
"""Adds a post-mapping task to remove the hdisk from the VIOS.
This removal is only done after the mapping updates have completed.
This method is also used during migration to remove hdisks that remain
on the source host after the VM is migrated to the destination.
:param vio_wrap: The Virtual I/O Server wrapper to remove the disk
from.
:param device_name: The hdisk name to remove.
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
"""
def rm_hdisk():
LOG.info(_LI("Running remove for hdisk: '%s'") % device_name)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid)
except Exception as e:
# If there is a failure, log it, but don't stop the process
LOG.warning(_LW("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server."),
{'disk': device_name})
LOG.warning(e)
# Check if there are not multiple mapping for the device
if not self._check_host_mappings(vio_wrap, device_name):
name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
stg_ftsk = stg_ftsk or self.stg_ftsk
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
else:
LOG.info(_LI("hdisk %(disk)s is not removed because it has "
"existing storage mappings"), {'disk': device_name})
@lockutils.synchronized('vscsi_wwpns')
def wwpns(self):
"""Builds the WWPNs of the adapters that will connect the ports.
@@ -521,59 +357,6 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
"""
return CONF.host
def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name, slot_mgr):
"""Adds a transaction to remove the storage mapping.
:param vm_uuid: The UUID of the VM instance
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The The hdisk device name.
:param slot_mgr: A NovaSlotManager. Used to delete the client slots
used when a volume is detached from the VM.
"""
def rm_func(vios_w):
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name, 'vm': vm_uuid})
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
for rm_map in removed_maps:
slot_mgr.drop_vscsi_mapping(rm_map)
return removed_maps
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
def _add_append_mapping(self, vios_uuid, device_name, lpar_slot_num=None,
lua=None):
"""Update the stg_ftsk to append the mapping to the VIOS.
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
:param device_name: The The hdisk device name.
:param lpar_slot_num: (Optional, Default:None) If specified, the client
lpar slot number to use on the mapping. If left
as None, it will use the next available slot
number.
:param lua: (Optional. Default: None) Logical Unit Address to set on
the TargetDevice. If None, the LUA will be assigned by the
server. Should be specified for all of the VSCSIMappings
for a particular bus, or none of them.
"""
def add_func(vios_w):
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name,
'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name)
v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv,
lpar_slot_num=lpar_slot_num, lua=lua)
return tsk_map.add_map(vios_w, v_map)
self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
def _set_udid(self, udid):
"""This method will set the hdisk udid in the connection_info.
:param udid: The hdisk target_udid to be stored in system_metadata
"""
self.connection_info['data'][UDID_KEY] = udid
def _get_hdisk_itls(self, vios_w):
"""Returns the mapped ITLs for the hdisk for the given VIOS.
@@ -599,17 +382,3 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
lun = self.connection_info['data']['target_lun']
return vio_wwpns, t_wwpns, lun
def _get_udid(self):
"""This method will return the hdisk udid stored in connection_info.
:return: The target_udid associated with the hdisk
"""
try:
return self.connection_info['data'][UDID_KEY]
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
'volume id %s'), self.volume_id)
return None