Extendable Nova PowerVM Volume adapters
Refactored the Nova PowerVM Volume adapter to introduce the init routine which will consolidate the required attributes that needs to be passed around, this will simplify the task flow parameters. Change-Id: I62a9deb0de3df177d7e8b6f39445dd17cdb7b327
This commit is contained in:
@@ -24,7 +24,6 @@ from nova_powervm.virt.powervm import driver
|
||||
|
||||
from nova.virt import fake
|
||||
from pypowervm.tests.wrappers.util import pvmhttp
|
||||
|
||||
MS_HTTPRESP_FILE = "managedsystem.txt"
|
||||
|
||||
|
||||
@@ -83,6 +82,14 @@ class VolumeAdapter(fixtures.Fixture):
|
||||
self._std_vol_adpt = mock.patch('nova_powervm.virt.powervm.volume.'
|
||||
'vscsi.VscsiVolumeAdapter')
|
||||
self.std_vol_adpt = self._std_vol_adpt.start()
|
||||
# We want to mock out the connection_info individually so it gives
|
||||
# back a new mock on every call. That's because the vol id is
|
||||
# used for task names and we can't have duplicates. Here we have
|
||||
# just one mock for simplicity of the vol driver but we need
|
||||
# mulitiple names.
|
||||
self.std_vol_adpt.return_value.connection_info.__getitem__\
|
||||
.side_effect = mock.MagicMock
|
||||
self.drv = self.std_vol_adpt.return_value
|
||||
self.addCleanup(self._std_vol_adpt.stop)
|
||||
|
||||
|
||||
@@ -114,9 +121,5 @@ class PowerVMComputeDriver(fixtures.Fixture):
|
||||
self._init_host()
|
||||
self.drv.image_api = mock.Mock()
|
||||
|
||||
# Set up the mock volume and disk drivers.
|
||||
vol_adpt = self.useFixture(VolumeAdapter())
|
||||
self.drv.vol_drvs['fibre_channel'] = vol_adpt.std_vol_adpt
|
||||
|
||||
disk_adpt = self.useFixture(DiskAdapter())
|
||||
self.drv.disk_dvr = disk_adpt.std_disk_adpt
|
||||
|
||||
@@ -76,8 +76,9 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.drv = self.drv_fix.drv
|
||||
self.apt = self.drv_fix.pypvm.apt
|
||||
|
||||
self.fc_vol_drv = self.drv.vol_drvs['fibre_channel']
|
||||
self.disk_dvr = self.drv.disk_dvr
|
||||
self.vol_fix = self.useFixture(fx.VolumeAdapter())
|
||||
self.vol_drv = self.vol_fix.drv
|
||||
|
||||
self.crt_lpar_p = mock.patch('nova_powervm.virt.powervm.vm.crt_lpar')
|
||||
self.crt_lpar = self.crt_lpar_p.start()
|
||||
@@ -94,7 +95,9 @@ class TestPowerVMDriver(test.TestCase):
|
||||
test_drv = driver.PowerVMDriver(fake.FakeVirtAPI())
|
||||
self.assertIsNotNone(test_drv)
|
||||
|
||||
def test_get_volume_connector(self):
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
def test_get_volume_connector(self, mock_getuuid):
|
||||
mock_getuuid.return_value = '1234'
|
||||
vol_connector = self.drv.get_volume_connector(None)
|
||||
self.assertIsNotNone(vol_connector['wwpns'])
|
||||
self.assertIsNotNone(vol_connector['host'])
|
||||
@@ -247,7 +250,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertTrue(mock_pwron.called)
|
||||
|
||||
# Check that the connect volume was called
|
||||
self.assertEqual(2, self.fc_vol_drv.connect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
# Make sure the save was invoked
|
||||
self.assertEqual(2, mock_save.call_count)
|
||||
@@ -307,7 +310,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertTrue(mock_pwron.called)
|
||||
|
||||
# Check that the connect volume was called
|
||||
self.assertEqual(2, self.fc_vol_drv.connect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
|
||||
@mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg'
|
||||
@@ -353,7 +356,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertTrue(mock_pwron.called)
|
||||
|
||||
# Check that the connect volume was called
|
||||
self.assertEqual(2, self.fc_vol_drv.connect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
# Make sure the BDM save was invoked twice.
|
||||
self.assertEqual(2, mock_save.call_count)
|
||||
@@ -389,13 +392,13 @@ class TestPowerVMDriver(test.TestCase):
|
||||
# Create LPAR was called
|
||||
self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper,
|
||||
inst, my_flavor)
|
||||
self.assertEqual(2, self.fc_vol_drv.connect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
# Power on was called
|
||||
self.assertTrue(mock_pwron.called)
|
||||
|
||||
# Validate the rollbacks were called
|
||||
self.assertEqual(2, self.fc_vol_drv.disconnect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.disconnect_volume.call_count)
|
||||
|
||||
@mock.patch('nova.block_device.get_root_bdm')
|
||||
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
|
||||
@@ -448,8 +451,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertTrue(mock_dlt_vopt.called)
|
||||
|
||||
# Validate that the volume detach was called
|
||||
self.assertEqual(2, self.fc_vol_drv.disconnect_volume.call_count)
|
||||
|
||||
self.assertEqual(2, self.vol_drv.disconnect_volume.call_count)
|
||||
# Delete LPAR was called
|
||||
mock_dlt.assert_called_with(self.apt, mock.ANY)
|
||||
|
||||
@@ -463,7 +465,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
def reset_mocks():
|
||||
# Reset the mocks
|
||||
for mk in [mock_pwroff, mock_dlt, mock_dlt_vopt,
|
||||
self.fc_vol_drv.disconnect_volume, mock_dlt,
|
||||
self.vol_drv, mock_dlt,
|
||||
mock_boot_from_vol]:
|
||||
mk.reset_mock()
|
||||
|
||||
@@ -475,7 +477,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertFalse(mock_dlt_vopt.called)
|
||||
|
||||
# Validate that the volume detach was not called
|
||||
self.assertFalse(self.fc_vol_drv.disconnect_volume.called)
|
||||
self.assertFalse(self.vol_drv.disconnect_volume.called)
|
||||
|
||||
# Delete LPAR was not called
|
||||
self.assertFalse(mock_dlt.called)
|
||||
@@ -531,16 +533,13 @@ class TestPowerVMDriver(test.TestCase):
|
||||
mock.Mock(), block_device_info=mock_bdms)
|
||||
assert_not_called()
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.'
|
||||
'connect_volume')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
||||
def test_attach_volume(self, mock_inst_wrap, mock_conn_volume):
|
||||
def test_attach_volume(self, mock_inst_wrap):
|
||||
|
||||
"""Validates the basic PowerVM destroy."""
|
||||
# Set up the mocks to the tasks.
|
||||
inst = objects.Instance(**powervm.TEST_INSTANCE)
|
||||
inst.task_state = None
|
||||
|
||||
# BDMs
|
||||
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
|
||||
|
||||
@@ -549,12 +548,10 @@ class TestPowerVMDriver(test.TestCase):
|
||||
inst, mock.Mock())
|
||||
|
||||
# Verify the connect volume was invoked
|
||||
self.assertEqual(1, mock_conn_volume.call_count)
|
||||
self.assertEqual(1, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter.'
|
||||
'disconnect_volume')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
def test_detach_volume(self, mock_pvmuuid, mock_disconn_volume):
|
||||
def test_detach_volume(self, mock_pvmuuid):
|
||||
|
||||
"""Validates the basic PowerVM destroy."""
|
||||
# Set up the mocks to the tasks.
|
||||
@@ -563,13 +560,12 @@ class TestPowerVMDriver(test.TestCase):
|
||||
|
||||
# BDMs
|
||||
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
|
||||
|
||||
# Invoke the method.
|
||||
self.drv.detach_volume(mock_bdm.get('connection_info'), inst,
|
||||
mock.Mock())
|
||||
|
||||
# Verify the connect volume was invoked
|
||||
self.assertEqual(1, mock_disconn_volume.call_count)
|
||||
self.assertEqual(1, self.vol_drv.disconnect_volume.call_count)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.dlt_lpar')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
|
||||
@@ -604,13 +600,13 @@ class TestPowerVMDriver(test.TestCase):
|
||||
self.assertTrue(mock_dlt_vopt.called)
|
||||
|
||||
# Validate that the volume detach was called
|
||||
self.assertEqual(2, self.fc_vol_drv.disconnect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.disconnect_volume.call_count)
|
||||
|
||||
# Delete LPAR was called
|
||||
mock_dlt.assert_called_with(self.apt, mock.ANY)
|
||||
|
||||
# Validate the rollbacks were called.
|
||||
self.assertEqual(2, self.fc_vol_drv.connect_volume.call_count)
|
||||
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
|
||||
@@ -897,9 +893,9 @@ class TestPowerVMDriver(test.TestCase):
|
||||
def _fake_bdm(volume_id, target_lun):
|
||||
connection_info = {'driver_volume_type': 'fibre_channel',
|
||||
'data': {'volume_id': volume_id,
|
||||
'target_lun': target_lun
|
||||
}
|
||||
}
|
||||
'target_lun': target_lun,
|
||||
'initiator_target_map':
|
||||
{'21000024F5': ['50050768']}}}
|
||||
mapping_dict = {'source_type': 'volume', 'volume_id': volume_id,
|
||||
'destination_type': 'volume',
|
||||
'connection_info':
|
||||
|
||||
@@ -59,13 +59,21 @@ class TestNPIVAdapter(test.TestCase):
|
||||
self.mock_fabric_ports = self.mock_fabric_ports_p.start()
|
||||
self.mock_fabric_ports.return_value = [self.wwpn1, self.wwpn2]
|
||||
|
||||
# The volume driver, that uses the mocking
|
||||
self.vol_drv = npiv.NPIVVolumeAdapter()
|
||||
|
||||
# Fixtures
|
||||
self.adpt_fix = self.useFixture(fx.PyPowerVM())
|
||||
self.adpt = self.adpt_fix.apt
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
def init_vol_adpt(mock_pvm_uuid):
|
||||
con_info = {'data': {'initiator_target_map': {'i1': ['t1'],
|
||||
'i2': ['t2', 't3']},
|
||||
'target_lun': '1', 'volume_id': 'id'}}
|
||||
mock_inst = mock.MagicMock()
|
||||
mock_pvm_uuid.return_value = '1234'
|
||||
return npiv.NPIVVolumeAdapter(self.adpt, 'host_uuid',
|
||||
mock_inst, con_info)
|
||||
self.vol_drv = init_vol_adpt()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestNPIVAdapter, self).tearDown()
|
||||
|
||||
@@ -77,12 +85,10 @@ class TestNPIVAdapter(test.TestCase):
|
||||
@mock.patch('pypowervm.tasks.vfc_mapper.remove_npiv_port_mappings')
|
||||
def test_connect_volume(self, mock_remove_p_maps, mock_add_p_maps):
|
||||
# Invoke
|
||||
inst = mock.MagicMock()
|
||||
meta_key = self.vol_drv._sys_fabric_state_key('A')
|
||||
self.vol_drv.instance.system_metadata = {meta_key: npiv.FS_MGMT_MAPPED}
|
||||
# Test connect volume when the fabric is mapped with mgmt partition
|
||||
inst.system_metadata = {meta_key: npiv.FS_MGMT_MAPPED}
|
||||
self.vol_drv.connect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.MagicMock())
|
||||
self.vol_drv.connect_volume()
|
||||
|
||||
# Verify
|
||||
# Mgmt mapping should be removed
|
||||
@@ -90,7 +96,7 @@ class TestNPIVAdapter(test.TestCase):
|
||||
self.assertEqual(1, mock_add_p_maps.call_count)
|
||||
self.assertEqual(1, mock_remove_p_maps.call_count)
|
||||
# Check the fabric state should be mapped to instance
|
||||
fc_state = self.vol_drv._get_fabric_state(inst, 'A')
|
||||
fc_state = self.vol_drv._get_fabric_state('A')
|
||||
self.assertEqual(npiv.FS_INST_MAPPED, fc_state)
|
||||
|
||||
@mock.patch('pypowervm.tasks.vfc_mapper.add_npiv_port_mappings')
|
||||
@@ -98,13 +104,11 @@ class TestNPIVAdapter(test.TestCase):
|
||||
def test_connect_volume_inst_mapped(self, mock_remove_p_maps,
|
||||
mock_add_p_maps):
|
||||
# Invoke
|
||||
inst = mock.MagicMock()
|
||||
meta_key = self.vol_drv._sys_fabric_state_key('A')
|
||||
# Test subsequent connect volume calls when the fabric
|
||||
# is mapped with inst partition
|
||||
inst.system_metadata = {meta_key: npiv.FS_INST_MAPPED}
|
||||
self.vol_drv.connect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.MagicMock())
|
||||
self.vol_drv.instance.system_metadata = {meta_key: npiv.FS_INST_MAPPED}
|
||||
self.vol_drv.connect_volume()
|
||||
|
||||
# Verify
|
||||
# Remove mapping should not be called
|
||||
@@ -112,7 +116,7 @@ class TestNPIVAdapter(test.TestCase):
|
||||
self.assertEqual(1, mock_add_p_maps.call_count)
|
||||
self.assertEqual(0, mock_remove_p_maps.call_count)
|
||||
# Check the fabric state should be mapped to instance
|
||||
fc_state = self.vol_drv._get_fabric_state(inst, 'A')
|
||||
fc_state = self.vol_drv._get_fabric_state('A')
|
||||
self.assertEqual(npiv.FS_INST_MAPPED, fc_state)
|
||||
|
||||
@mock.patch('pypowervm.tasks.vfc_mapper.add_npiv_port_mappings')
|
||||
@@ -120,12 +124,10 @@ class TestNPIVAdapter(test.TestCase):
|
||||
def test_connect_volume_fc_unmap(self, mock_remove_p_maps,
|
||||
mock_add_p_maps):
|
||||
# Invoke
|
||||
inst = mock.MagicMock()
|
||||
meta_key = self.vol_drv._sys_fabric_state_key('A')
|
||||
# TestCase when there is no mapping
|
||||
inst.system_metadata = {meta_key: npiv.FS_UNMAPPED}
|
||||
self.vol_drv.connect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.MagicMock())
|
||||
self.vol_drv.instance.system_metadata = {meta_key: npiv.FS_UNMAPPED}
|
||||
self.vol_drv.connect_volume()
|
||||
|
||||
# Verify
|
||||
# Remove mapping should not be called
|
||||
@@ -133,18 +135,16 @@ class TestNPIVAdapter(test.TestCase):
|
||||
self.assertEqual(1, mock_add_p_maps.call_count)
|
||||
self.assertEqual(0, mock_remove_p_maps.call_count)
|
||||
# Check the fabric state should be mapped to instance
|
||||
fc_state = self.vol_drv._get_fabric_state(inst, 'A')
|
||||
fc_state = self.vol_drv._get_fabric_state('A')
|
||||
self.assertEqual(npiv.FS_INST_MAPPED, fc_state)
|
||||
|
||||
@mock.patch('pypowervm.tasks.vfc_mapper.remove_npiv_port_mappings')
|
||||
def test_disconnect_volume(self, mock_remove_p_maps):
|
||||
# Mock Data
|
||||
inst = mock.MagicMock()
|
||||
inst.task_state = 'deleting'
|
||||
self.vol_drv.instance.task_state = 'deleting'
|
||||
|
||||
# Invoke
|
||||
self.vol_drv.disconnect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.MagicMock())
|
||||
self.vol_drv.disconnect_volume()
|
||||
|
||||
# Verify
|
||||
self.assertEqual(1, mock_remove_p_maps.call_count)
|
||||
@@ -152,31 +152,26 @@ class TestNPIVAdapter(test.TestCase):
|
||||
@mock.patch('pypowervm.tasks.vfc_mapper.remove_npiv_port_mappings')
|
||||
def test_disconnect_volume_no_op(self, mock_remove_p_maps):
|
||||
"""Tests that when the task state is not set, connections are left."""
|
||||
# Mock Data
|
||||
inst = mock.MagicMock()
|
||||
inst.task_state = None
|
||||
|
||||
# Invoke
|
||||
self.vol_drv.disconnect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.MagicMock())
|
||||
self.vol_drv.disconnect_volume()
|
||||
|
||||
# Verify
|
||||
self.assertEqual(0, mock_remove_p_maps.call_count)
|
||||
|
||||
def test_disconnect_volume_no_op_other_state(self):
|
||||
"""Tests that the deletion doesn't go through on certain states."""
|
||||
inst = mock.MagicMock()
|
||||
inst.task_state = task_states.RESUMING
|
||||
self.vol_drv.disconnect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
inst, mock.ANY)
|
||||
self.vol_drv.instance.task_state = task_states.RESUMING
|
||||
|
||||
# Invoke
|
||||
self.vol_drv.disconnect_volume()
|
||||
self.assertEqual(0, self.adpt.read.call_count)
|
||||
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap')
|
||||
def test_connect_volume_no_map(self, mock_vio_wrap):
|
||||
"""Tests that if the VFC Mapping exists, another is not added."""
|
||||
# Mock Data
|
||||
con_info = {'data': {'initiator_target_map': {'a': None,
|
||||
'b': None}}}
|
||||
self.vol_drv.connection_info = {'data': {'initiator_target_map':
|
||||
{'a': None, 'b': None}}}
|
||||
|
||||
mock_mapping = mock.MagicMock()
|
||||
mock_mapping.client_adapter.wwpns = {'a', 'b'}
|
||||
@@ -187,8 +182,7 @@ class TestNPIVAdapter(test.TestCase):
|
||||
mock_vio_wrap.return_value = mock_vios
|
||||
|
||||
# Invoke
|
||||
self.vol_drv.connect_volume(self.adpt, 'host_uuid', 'vm_uuid',
|
||||
mock.MagicMock(), con_info)
|
||||
self.vol_drv.connect_volume()
|
||||
|
||||
# Verify
|
||||
self.assertEqual(0, self.adpt.update.call_count)
|
||||
@@ -198,9 +192,6 @@ class TestNPIVAdapter(test.TestCase):
|
||||
def test_wwpns(self, mock_add_port, mock_mgmt_part):
|
||||
"""Tests that new WWPNs get generated properly."""
|
||||
# Mock Data
|
||||
inst = mock.Mock()
|
||||
meta_key = self.vol_drv._sys_meta_fabric_key('A')
|
||||
inst.system_metadata = {meta_key: None}
|
||||
mock_add_port.return_value = [('21000024FF649104', 'AA BB'),
|
||||
('21000024FF649105', 'CC DD')]
|
||||
mock_vios = mock.MagicMock()
|
||||
@@ -208,13 +199,16 @@ class TestNPIVAdapter(test.TestCase):
|
||||
mock_mgmt_part.return_value = mock_vios
|
||||
self.adpt.read.return_value = self.vios_feed_resp
|
||||
|
||||
meta_key = self.vol_drv._sys_meta_fabric_key('A')
|
||||
self.vol_drv.instance.system_metadata = {meta_key: None}
|
||||
|
||||
# Invoke
|
||||
wwpns = self.vol_drv.wwpns(self.adpt, 'host_uuid', inst)
|
||||
wwpns = self.vol_drv.wwpns()
|
||||
|
||||
# Check
|
||||
self.assertListEqual(['AA', 'BB', 'CC', 'DD'], wwpns)
|
||||
self.assertEqual('21000024FF649104,AA,BB,21000024FF649105,CC,DD',
|
||||
inst.system_metadata[meta_key])
|
||||
self.vol_drv.instance.system_metadata[meta_key])
|
||||
xags = [pvm_vios.VIOS.xags.FC_MAPPING, pvm_vios.VIOS.xags.STORAGE]
|
||||
self.adpt.read.assert_called_once_with('VirtualIOServer', xag=xags)
|
||||
self.assertEqual(1, mock_add_port.call_count)
|
||||
@@ -222,23 +216,19 @@ class TestNPIVAdapter(test.TestCase):
|
||||
# Check when mgmt_uuid is None
|
||||
mock_add_port.reset_mock()
|
||||
mock_vios.uuid = None
|
||||
wwpns = self.vol_drv.wwpns(self.adpt, 'host_uuid', inst)
|
||||
self.vol_drv.wwpns()
|
||||
self.assertEqual(0, mock_add_port.call_count)
|
||||
self.assertEqual('mgmt_mapped',
|
||||
self.vol_drv._get_fabric_state(inst, 'A'))
|
||||
self.vol_drv._get_fabric_state('A'))
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter.'
|
||||
'_get_fabric_state')
|
||||
def test_wwpns_on_sys_meta(self, mock_fabric_state):
|
||||
"""Tests that previously stored WWPNs are returned."""
|
||||
# Mock
|
||||
inst = mock.MagicMock()
|
||||
inst.system_metadata = {self.vol_drv._sys_meta_fabric_key('A'):
|
||||
'phys1,a,b,phys2,c,d'}
|
||||
mock_fabric_state.return_value = npiv.FS_INST_MAPPED
|
||||
self.vol_drv.instance.system_metadata = {
|
||||
self.vol_drv._sys_meta_fabric_key('A'): 'phys1,a,b,phys2,c,d'}
|
||||
|
||||
# Invoke
|
||||
wwpns = self.vol_drv.wwpns(mock.ANY, 'host_uuid', inst)
|
||||
|
||||
# Verify
|
||||
self.assertListEqual(['a', 'b', 'c', 'd'], wwpns)
|
||||
# Invole and Verify
|
||||
self.assertListEqual(['a', 'b', 'c', 'd'], self.vol_drv.wwpns())
|
||||
|
||||
@@ -37,12 +37,22 @@ I_WWPN_1 = '21000024FF649104'
|
||||
|
||||
class TestVSCSIAdapter(test.TestCase):
|
||||
"""Tests the vSCSI Volume Connector Adapter."""
|
||||
|
||||
def setUp(self):
|
||||
super(TestVSCSIAdapter, self).setUp()
|
||||
self.pypvm_fix = self.useFixture(fx.PyPowerVM())
|
||||
self.adpt = self.pypvm_fix.apt
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
def init_vol_adpt(mock_pvm_uuid):
|
||||
con_info = {'data': {'initiator_target_map': {I_WWPN_1: ['t1'],
|
||||
I_WWPN_1: ['t2',
|
||||
't3']},
|
||||
'target_lun': '1', 'volume_id': 'id'}}
|
||||
mock_inst = mock.MagicMock()
|
||||
mock_pvm_uuid.return_value = '1234'
|
||||
return vscsi.VscsiVolumeAdapter(self.adpt, 'host_uuid',
|
||||
mock_inst, con_info)
|
||||
self.vol_drv = init_vol_adpt()
|
||||
# Find directory for response file(s)
|
||||
data_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
data_dir = os.path.join(data_dir, '../data')
|
||||
@@ -63,25 +73,18 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
|
||||
def test_connect_volume(self, mock_add_vscsi_mapping,
|
||||
mock_discover_hdisk, mock_build_itls):
|
||||
con_info = {'data': {'initiator_target_map': {I_WWPN_1: ['t1'],
|
||||
I_WWPN_1: ['t2', 't3']},
|
||||
'target_lun': '1', 'volume_id': 'id'}}
|
||||
mock_discover_hdisk.return_value = (
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid')
|
||||
|
||||
self.adpt.read.return_value = self.vios_feed_resp
|
||||
mock_instance = mock.Mock()
|
||||
mock_instance.system_metadata = {}
|
||||
|
||||
mock_build_itls.return_value = [mock.MagicMock()]
|
||||
|
||||
vscsi.VscsiVolumeAdapter().connect_volume(self.adpt, 'host_uuid',
|
||||
'vm_uuid', mock_instance,
|
||||
con_info)
|
||||
self.vol_drv.connect_volume()
|
||||
|
||||
# Single mapping
|
||||
self.assertEqual(1, mock_add_vscsi_mapping.call_count)
|
||||
mock_add_vscsi_mapping.assert_called_with(
|
||||
'host_uuid', '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089', 'vm_uuid',
|
||||
'host_uuid', '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089', '1234',
|
||||
mock.ANY)
|
||||
|
||||
@mock.patch('pypowervm.tasks.hdisk.build_itls')
|
||||
@@ -91,9 +94,6 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
mock_discover_hdisk,
|
||||
mock_build_itls):
|
||||
"""Tests that the connect w/out initiators throws errors."""
|
||||
con_info = {'data': {'initiator_target_map': {I_WWPN_1: ['t1'],
|
||||
I_WWPN_1: ['t2', 't3']},
|
||||
'target_lun': '1', 'volume_id': 'id'}}
|
||||
mock_discover_hdisk.return_value = (
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid')
|
||||
|
||||
@@ -102,11 +102,8 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
mock_instance.system_metadata = {}
|
||||
|
||||
mock_build_itls.return_value = []
|
||||
|
||||
self.assertRaises(pexc.VolumeAttachFailed,
|
||||
vscsi.VscsiVolumeAdapter().connect_volume,
|
||||
self.adpt, 'host_uuid', 'vm_uuid', mock_instance,
|
||||
con_info)
|
||||
self.vol_drv.connect_volume)
|
||||
|
||||
@mock.patch('pypowervm.tasks.hdisk.remove_hdisk')
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.remove_pv_mapping')
|
||||
@@ -115,18 +112,6 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
def test_disconnect_volume(self, mock_hdisk_from_uuid,
|
||||
mock_get_vm_id, mock_remove_pv_mapping,
|
||||
mock_remove_hdisk):
|
||||
vol_drv = vscsi.VscsiVolumeAdapter()
|
||||
|
||||
# Mock up the connection info
|
||||
vios_uuid = '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089'
|
||||
volid_meta_key = vol_drv._build_udid_key(vios_uuid, 'id')
|
||||
con_info = {'data': {'initiator_target_map': {'i1': ['t1'],
|
||||
'i2': ['t2', 't3']},
|
||||
volid_meta_key: self.udid,
|
||||
'target_lun': '1', 'volume_id': 'id'}}
|
||||
|
||||
# Build the mock instance
|
||||
instance = mock.Mock()
|
||||
|
||||
# Set test scenario
|
||||
self.adpt.read.return_value = self.vios_feed_resp
|
||||
@@ -134,8 +119,8 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
mock_get_vm_id.return_value = 'partion_id'
|
||||
|
||||
# Run the test
|
||||
vol_drv.disconnect_volume(self.adpt, 'host_uuid', 'vm_uuid', instance,
|
||||
con_info)
|
||||
self.vol_drv.disconnect_volume()
|
||||
|
||||
self.assertEqual(1, mock_remove_pv_mapping.call_count)
|
||||
self.assertEqual(1, mock_remove_hdisk.call_count)
|
||||
|
||||
@@ -143,59 +128,54 @@ class TestVSCSIAdapter(test.TestCase):
|
||||
def test_wwpns(self, mock_vio_wwpns):
|
||||
mock_vio_wwpns.return_value = ['aa', 'bb']
|
||||
|
||||
vol_drv = vscsi.VscsiVolumeAdapter()
|
||||
wwpns = vol_drv.wwpns(mock.ANY, 'host_uuid', mock.ANY)
|
||||
wwpns = self.vol_drv.wwpns()
|
||||
|
||||
self.assertListEqual(['aa', 'bb'], wwpns)
|
||||
|
||||
def test_set_udid(self):
|
||||
vol_adpt = vscsi.VscsiVolumeAdapter()
|
||||
|
||||
# Mock connection info
|
||||
connection_info = {'data': {}}
|
||||
udid_key = vol_adpt._build_udid_key(self.vios_uuid, self.volume_id)
|
||||
udid_key = vscsi._build_udid_key(self.vios_uuid, self.volume_id)
|
||||
self.vol_drv.connection_info['data'][udid_key] = self.udid
|
||||
|
||||
# Set the UDID
|
||||
vol_adpt._set_udid(connection_info, self.vios_uuid, self.volume_id,
|
||||
self.udid)
|
||||
self.vol_drv._set_udid(self.vios_uuid, self.volume_id,
|
||||
self.udid)
|
||||
|
||||
# Verify
|
||||
self.assertEqual(self.udid, connection_info['data'][udid_key])
|
||||
self.assertEqual(self.udid,
|
||||
self.vol_drv.connection_info['data'][udid_key])
|
||||
|
||||
def test_get_udid(self):
|
||||
vol_adpt = vscsi.VscsiVolumeAdapter()
|
||||
|
||||
# Mock connection info
|
||||
connection_info = {'data': {}}
|
||||
udid_key = vol_adpt._build_udid_key(self.vios_uuid, self.volume_id)
|
||||
connection_info['data'][udid_key] = self.udid
|
||||
udid_key = vscsi._build_udid_key(self.vios_uuid, self.volume_id)
|
||||
self.vol_drv.connection_info['data'][udid_key] = self.udid
|
||||
|
||||
# Set the key to retrieve
|
||||
retrieved_udid = vol_adpt._get_udid(connection_info, self.vios_uuid,
|
||||
self.volume_id)
|
||||
retrieved_udid = self.vol_drv._get_udid(self.vios_uuid, self.volume_id)
|
||||
|
||||
# Check key found
|
||||
self.assertEqual(self.udid, retrieved_udid)
|
||||
|
||||
# Check key not found
|
||||
retrieved_udid = (vscsi.VscsiVolumeAdapter().
|
||||
_get_udid(connection_info, self.vios_uuid,
|
||||
'non_existent_key'))
|
||||
retrieved_udid = (self.vol_drv._get_udid(self.vios_uuid,
|
||||
'non_existent_key'))
|
||||
|
||||
# Check key not found
|
||||
self.assertIsNone(retrieved_udid)
|
||||
|
||||
def test_wwpns_on_vios(self):
|
||||
"""Validates the _wwpns_on_vios method."""
|
||||
vol_adpt = vscsi.VscsiVolumeAdapter()
|
||||
|
||||
mock_vios = mock.MagicMock()
|
||||
mock_vios.get_active_pfc_wwpns.return_value = ['A', 'B', 'C']
|
||||
|
||||
self.assertListEqual(
|
||||
['A'], vol_adpt._wwpns_on_vios(['A', 'D'], mock_vios))
|
||||
['A'], self.vol_drv._wwpns_on_vios(['A', 'D'], mock_vios))
|
||||
|
||||
self.assertListEqual(
|
||||
['A', 'C'], vol_adpt._wwpns_on_vios(['A', 'C', 'D'], mock_vios))
|
||||
['A', 'C'],
|
||||
self.vol_drv._wwpns_on_vios(['A', 'C', 'D'], mock_vios))
|
||||
|
||||
self.assertListEqual(
|
||||
[], vol_adpt._wwpns_on_vios(['D'], mock_vios))
|
||||
[], self.vol_drv._wwpns_on_vios(['D'], mock_vios))
|
||||
|
||||
@@ -40,11 +40,6 @@ pvm_opts = [
|
||||
default='/tmp/cfgdrv/',
|
||||
help='The location where the config drive ISO files should be '
|
||||
'built.'),
|
||||
cfg.StrOpt('fc_attach_strategy',
|
||||
default='vscsi',
|
||||
help='The Fibre Channel Volume Strategy defines how FC Cinder '
|
||||
'volumes should be attached to the Virtual Machine. The '
|
||||
'options are: npiv or vscsi.'),
|
||||
cfg.StrOpt('disk_driver',
|
||||
default='localdisk',
|
||||
help='The disk driver to use for PowerVM disks. '
|
||||
|
||||
@@ -97,9 +97,6 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
self._get_disk_adapter()
|
||||
self.image_api = image.API()
|
||||
|
||||
# Initialize the volume drivers
|
||||
self.vol_drvs = _inst_dict(VOLUME_DRIVER_MAPPINGS)
|
||||
|
||||
# Init Host CPU Statistics
|
||||
self.host_cpu_stats = pvm_host.HostCPUStats(self.adapter,
|
||||
self.host_uuid)
|
||||
@@ -242,14 +239,11 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
if bdms is not None:
|
||||
for bdm in bdms:
|
||||
conn_info = bdm.get('connection_info')
|
||||
drv_type = conn_info.get('driver_volume_type')
|
||||
vol_drv = self.vol_drvs.get(drv_type)
|
||||
|
||||
vol_drv = self._get_inst_vol_adpt(context, instance,
|
||||
conn_info=conn_info)
|
||||
# First connect the volume. This will update the
|
||||
# connection_info.
|
||||
flow_stor.add(tf_stg.ConnectVolume(
|
||||
self.adapter, vol_drv, instance, conn_info,
|
||||
self.host_uuid))
|
||||
flow_stor.add(tf_stg.ConnectVolume(vol_drv))
|
||||
|
||||
# Save the BDM so that the updated connection info is
|
||||
# persisted.
|
||||
@@ -330,12 +324,9 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
if bdms is not None:
|
||||
for bdm in bdms:
|
||||
conn_info = bdm.get('connection_info')
|
||||
drv_type = conn_info.get('driver_volume_type')
|
||||
vol_drv = self.vol_drvs.get(drv_type)
|
||||
flow.add(tf_stg.DisconnectVolume(self.adapter, vol_drv,
|
||||
instance, conn_info,
|
||||
self.host_uuid,
|
||||
pvm_inst_uuid))
|
||||
vol_drv = self._get_inst_vol_adpt(context, instance,
|
||||
conn_info=conn_info)
|
||||
flow.add(tf_stg.DisconnectVolume(vol_drv))
|
||||
|
||||
# Only attach the disk adapters if this is not a boot from volume.
|
||||
if not self._is_booted_from_volume(block_device_info):
|
||||
@@ -397,10 +388,9 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
|
||||
# Determine if there are volumes to connect. If so, add a connection
|
||||
# for each type.
|
||||
drv_type = connection_info.get('driver_volume_type')
|
||||
vol_drv = self.vol_drvs.get(drv_type)
|
||||
flow.add(tf_stg.ConnectVolume(self.adapter, vol_drv, instance,
|
||||
connection_info, self.host_uuid))
|
||||
vol_drv = self._get_inst_vol_adpt(context, instance,
|
||||
conn_info=connection_info)
|
||||
flow.add(tf_stg.ConnectVolume(vol_drv))
|
||||
|
||||
# Build the engine & run!
|
||||
engine = tf_eng.load(flow)
|
||||
@@ -416,12 +406,10 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
|
||||
# Determine if there are volumes to connect. If so, add a connection
|
||||
# for each type.
|
||||
drv_type = connection_info.get('driver_volume_type')
|
||||
vol_drv = self.vol_drvs.get(drv_type)
|
||||
pvm_inst_uuid = vm.get_pvm_uuid(instance)
|
||||
flow.add(tf_stg.DisconnectVolume(self.adapter, vol_drv, instance,
|
||||
connection_info, self.host_uuid,
|
||||
pvm_inst_uuid))
|
||||
vol_drv = self._get_inst_vol_adpt(ctx.get_admin_context(), instance,
|
||||
conn_info=connection_info)
|
||||
|
||||
flow.add(tf_stg.DisconnectVolume(vol_drv))
|
||||
|
||||
# Build the engine & run!
|
||||
engine = tf_eng.load(flow)
|
||||
@@ -736,19 +724,18 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
connector = {'host': CONF.host}
|
||||
|
||||
# The WWPNs in case of FC connection.
|
||||
if self.vol_drvs['fibre_channel'] is not None:
|
||||
vol_drv = self._get_inst_vol_adpt(ctx.get_admin_context(),
|
||||
instance)
|
||||
|
||||
# The WWPNs in case of FC connection.
|
||||
if vol_drv is not None:
|
||||
# Override the host name.
|
||||
# TODO(IBM) See if there is a way to support a FC host name that
|
||||
# is independent of overall host name.
|
||||
connector['host'] = self.vol_drvs['fibre_channel'].host_name(
|
||||
self.adapter, self.host_uuid, instance)
|
||||
connector['host'] = vol_drv.host_name()
|
||||
|
||||
# TODO(IBM) WWPNs should be resolved from instance if previously
|
||||
# invoked (ex. Destroy)
|
||||
# Set the WWPNs
|
||||
wwpn_list = self.vol_drvs['fibre_channel'].wwpns(self.adapter,
|
||||
self.host_uuid,
|
||||
instance)
|
||||
wwpn_list = vol_drv.wwpns()
|
||||
if wwpn_list is not None:
|
||||
connector["wwpns"] = wwpn_list
|
||||
return connector
|
||||
@@ -1050,6 +1037,33 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
host = CONF.vnc.vncserver_proxyclient_address
|
||||
return console_type.ConsoleVNC(host=host, port=port)
|
||||
|
||||
def _get_inst_vol_adpt(self, context, instance, conn_info=None):
|
||||
"""Returns the appropriate volume driver based on connection type.
|
||||
|
||||
Checks the connection info for connection-type and return the
|
||||
connector, if no connection info is provided returns the default
|
||||
connector.
|
||||
:param context: security context
|
||||
:param instance: Nova instance for which the volume adapter is needed.
|
||||
:param conn_info: BDM connection information of the instance to
|
||||
get the volume adapter type (vSCSI/NPIV) requested.
|
||||
:returns: Returns the volume adapter, if conn_info is not passed then
|
||||
returns the volume adapter based on the CONF
|
||||
fc_attach_strategy property (npiv/vscsi). Otherwise returns
|
||||
the adapter based on the connection-type of
|
||||
connection_info.
|
||||
"""
|
||||
adp_type = vol_attach.FC_STRATEGY_MAPPING[
|
||||
CONF.powervm.fc_attach_strategy]
|
||||
vol_cls = importutils.import_class(adp_type)
|
||||
if conn_info:
|
||||
LOG.debug('Volume Adapter returned for connection_info=%s' %
|
||||
conn_info)
|
||||
LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s' %
|
||||
{'cls': vol_cls, 'inst': instance})
|
||||
return vol_cls(self.adapter, self.host_uuid,
|
||||
instance, conn_info)
|
||||
|
||||
|
||||
def _inst_dict(input_dict):
|
||||
"""Builds a dictionary with instances as values based on the input classes.
|
||||
|
||||
@@ -32,38 +32,26 @@ LOG = logging.getLogger(__name__)
|
||||
class ConnectVolume(task.Task):
|
||||
"""The task to connect a volume to an instance."""
|
||||
|
||||
def __init__(self, adapter, vol_drv, instance, connection_info, host_uuid):
|
||||
def __init__(self, vol_drv):
|
||||
"""Create the task.
|
||||
|
||||
Requires LPAR info through requirement of lpar_wrap.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param vol_drv: The volume driver (see volume folder). Ties the
|
||||
storage to a connection type (ex. vSCSI or NPIV).
|
||||
:param instance: The nova instance.
|
||||
:param connection_info: The connection info from the block device
|
||||
mapping.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
"""
|
||||
self.adapter = adapter
|
||||
self.vol_drv = vol_drv
|
||||
self.instance = instance
|
||||
self.connection_info = connection_info
|
||||
self.vol_id = self.connection_info['data']['volume_id']
|
||||
self.host_uuid = host_uuid
|
||||
self.vol_id = self.vol_drv.connection_info['data']['volume_id']
|
||||
|
||||
super(ConnectVolume, self).__init__(name='connect_vol_%s' %
|
||||
self.vol_id,
|
||||
requires=['lpar_wrap'])
|
||||
self.vol_id)
|
||||
|
||||
def execute(self, lpar_wrap):
|
||||
def execute(self):
|
||||
LOG.info(_LI('Connecting volume %(vol)s to instance %(inst)s'),
|
||||
{'vol': self.vol_id, 'inst': self.instance.name})
|
||||
return self.vol_drv.connect_volume(self.adapter, self.host_uuid,
|
||||
lpar_wrap.uuid, self.instance,
|
||||
self.connection_info)
|
||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||
return self.vol_drv.connect_volume()
|
||||
|
||||
def revert(self, lpar_wrap, result, flow_failures):
|
||||
def revert(self, result, flow_failures):
|
||||
# The parameters have to match the execute method, plus the response +
|
||||
# failures even if only a subset are used.
|
||||
if result is None or isinstance(result, task_fail.Failure):
|
||||
@@ -72,48 +60,32 @@ class ConnectVolume(task.Task):
|
||||
|
||||
LOG.warn(_LW('Volume %(vol)s for instance %(inst)s to be '
|
||||
'disconnected'),
|
||||
{'vol': self.vol_id, 'inst': self.instance.name})
|
||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||
|
||||
return self.vol_drv.disconnect_volume(self.adapter, self.host_uuid,
|
||||
lpar_wrap.uuid, self.instance,
|
||||
self.connection_info)
|
||||
return self.vol_drv.disconnect_volume()
|
||||
|
||||
|
||||
class DisconnectVolume(task.Task):
|
||||
"""The task to disconnect a volume from an instance."""
|
||||
|
||||
def __init__(self, adapter, vol_drv, instance, connection_info,
|
||||
host_uuid, vm_uuid):
|
||||
def __init__(self, vol_drv):
|
||||
"""Create the task.
|
||||
|
||||
Requires LPAR info through requirement of lpar_wrap.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param vol_drv: The volume driver (see volume folder). Ties the
|
||||
storage to a connection type (ex. vSCSI or NPIV).
|
||||
:param instance: The nova instance.
|
||||
:param connection_info: The connection info from the block device
|
||||
mapping.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The pypowervm UUID of the VM.
|
||||
"""
|
||||
self.adapter = adapter
|
||||
self.vol_drv = vol_drv
|
||||
self.instance = instance
|
||||
self.connection_info = connection_info
|
||||
self.vol_id = self.connection_info['data']['volume_id']
|
||||
self.host_uuid = host_uuid
|
||||
self.vm_uuid = vm_uuid
|
||||
|
||||
self.vol_id = self.vol_drv.connection_info['data']['volume_id']
|
||||
super(DisconnectVolume, self).__init__(name='disconnect_vol_%s' %
|
||||
self.vol_id)
|
||||
|
||||
def execute(self):
|
||||
LOG.info(_LI('Disconnecting volume %(vol)s from instance %(inst)s'),
|
||||
{'vol': self.vol_id, 'inst': self.instance.name})
|
||||
return self.vol_drv.disconnect_volume(self.adapter, self.host_uuid,
|
||||
self.vm_uuid, self.instance,
|
||||
self.connection_info)
|
||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance})
|
||||
return self.vol_drv.disconnect_volume()
|
||||
|
||||
def revert(self, result, flow_failures):
|
||||
# The parameters have to match the execute method, plus the response +
|
||||
@@ -124,10 +96,8 @@ class DisconnectVolume(task.Task):
|
||||
|
||||
LOG.warn(_LW('Volume %(vol)s for instance %(inst)s to be '
|
||||
're-connected'),
|
||||
{'vol': self.vol_id, 'inst': self.instance.name})
|
||||
return self.vol_drv.connect_volume(self.adapter, self.host_uuid,
|
||||
self.vm_uuid, self.instance,
|
||||
self.connection_info)
|
||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance})
|
||||
return self.vol_drv.connect_volume()
|
||||
|
||||
|
||||
class CreateDiskForImg(task.Task):
|
||||
|
||||
@@ -15,7 +15,30 @@
|
||||
# under the License.
|
||||
|
||||
# Defines the various volume connectors that can be used.
|
||||
from oslo_config import cfg
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
vol_adapter_opts = [
|
||||
cfg.StrOpt('fc_attach_strategy',
|
||||
default='vscsi',
|
||||
help='The Fibre Channel Volume Strategy defines how FC Cinder '
|
||||
'volumes should be attached to the Virtual Machine. The '
|
||||
'options are: npiv or vscsi.'),
|
||||
cfg.StrOpt('fc_npiv_adapter_api',
|
||||
default='nova_powervm.virt.powervm.volume.npiv.'
|
||||
'NPIVVolumeAdapter',
|
||||
help='Volume Adapter API to connect FC volumes using NPIV'
|
||||
'connection mechanism'),
|
||||
cfg.StrOpt('fc_vscsi_adapter_api',
|
||||
default='nova_powervm.virt.powervm.volume.vscsi.'
|
||||
'VscsiVolumeAdapter',
|
||||
help='Volume Adapter API to connect FC volumes through Virtual '
|
||||
'I/O Server using PowerVM vSCSI connection mechanism')
|
||||
]
|
||||
CONF.register_opts(vol_adapter_opts, group='powervm')
|
||||
|
||||
FC_STRATEGY_MAPPING = {
|
||||
'npiv': 'nova_powervm.virt.powervm.volume.npiv.NPIVVolumeAdapter',
|
||||
'vscsi': 'nova_powervm.virt.powervm.volume.vscsi.VscsiVolumeAdapter'
|
||||
'npiv': CONF.powervm.fc_npiv_adapter_api,
|
||||
'vscsi': CONF.powervm.fc_vscsi_adapter_api
|
||||
}
|
||||
|
||||
@@ -14,7 +14,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
from nova_powervm.virt.powervm import vm
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PowerVMVolumeAdapter(object):
|
||||
"""The volume adapter connects a Cinder volume to a VM.
|
||||
|
||||
@@ -25,67 +30,34 @@ class PowerVMVolumeAdapter(object):
|
||||
|
||||
This is built similarly to the LibvirtBaseVolumeDriver.
|
||||
"""
|
||||
|
||||
def connect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
"""Connects the volume.
|
||||
def __init__(self, adapter, host_uuid, instance, connection_info):
|
||||
"""Initialize the PowerVMVolumeAdapter
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should connect to.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
:param connection_info: The volume connection info generated from the
|
||||
BDM. Used to determine how to connect the
|
||||
volume to the VM.
|
||||
"""
|
||||
self.adapter = adapter
|
||||
self.host_uuid = host_uuid
|
||||
self.instance = instance
|
||||
self.connection_info = connection_info
|
||||
self.vm_uuid = vm.get_pvm_uuid(instance)
|
||||
|
||||
def connect_volume(self):
|
||||
"""Connects the volume.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def disconnect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
def disconnect_volume(self):
|
||||
"""Disconnect the volume.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should disconnect
|
||||
from.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class FibreChannelVolumeAdapter(PowerVMVolumeAdapter):
|
||||
"""Defines a Fibre Channel specific volume adapter.
|
||||
|
||||
@@ -94,22 +66,16 @@ class FibreChannelVolumeAdapter(PowerVMVolumeAdapter):
|
||||
sub classes can support them.
|
||||
"""
|
||||
|
||||
def wwpns(self, adapter, host_uuid, instance):
|
||||
def wwpns(self):
|
||||
"""Builds the WWPNs of the adapters that will connect the ports.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The list of WWPNs that need to be included in the zone set.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def host_name(self, adapter, host_uuid, instance):
|
||||
def host_name(self):
|
||||
"""Derives the host name that should be used for the storage device.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The host name.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@@ -48,106 +48,51 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
will have its own WWPNs and own Virtual FC adapter. The Virtual I/O
|
||||
Server only passes through communication directly to the VM itself.
|
||||
"""
|
||||
def connect_volume(self):
|
||||
"""Connects the volume."""
|
||||
|
||||
def connect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
"""Connects the volume.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vios_uuid: The pypowervm UUID of the VIOS.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should connect to.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
"""
|
||||
# We need to gather each fabric's port mappings
|
||||
npiv_port_mappings = []
|
||||
for fabric in self._fabric_names():
|
||||
npiv_port_mappings = self._get_fabric_meta(instance, fabric)
|
||||
self._remove_npiv_mgmt_mappings(adapter, fabric, host_uuid,
|
||||
instance, npiv_port_mappings)
|
||||
npiv_port_mappings = self._get_fabric_meta(fabric)
|
||||
self._remove_npiv_mgmt_mappings(fabric, npiv_port_mappings)
|
||||
# This method should no-op if the mappings are already attached to
|
||||
# the instance...so it won't duplicate the settings every time an
|
||||
# attach volume is called.
|
||||
LOG.info(_LI("Adding NPIV mapping for instance %s"), instance.name)
|
||||
pvm_vfcm.add_npiv_port_mappings(adapter, host_uuid, vm_uuid,
|
||||
npiv_port_mappings)
|
||||
LOG.info(_LI("Adding NPIV mapping for instance %s"),
|
||||
self.instance.name)
|
||||
pvm_vfcm.add_npiv_port_mappings(self.adapter, self.host_uuid,
|
||||
self.vm_uuid, npiv_port_mappings)
|
||||
|
||||
self._set_fabric_state(instance, fabric, FS_INST_MAPPED)
|
||||
self._set_fabric_state(fabric, FS_INST_MAPPED)
|
||||
|
||||
def disconnect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
"""Disconnect the volume.
|
||||
def disconnect_volume(self):
|
||||
"""Disconnect the volume."""
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should disconnect
|
||||
from.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
"""
|
||||
# We should only delete the NPIV mappings if we are running through a
|
||||
# VM deletion. VM deletion occurs when the task state is deleting.
|
||||
# However, it can also occur during a 'roll-back' of the spawn.
|
||||
# Disconnect of the volumes will only be called during a roll back
|
||||
# of the spawn.
|
||||
if instance.task_state not in TASK_STATES_FOR_DISCONNECT:
|
||||
if self.instance.task_state not in TASK_STATES_FOR_DISCONNECT:
|
||||
# NPIV should only remove the VFC mapping upon a destroy of the VM
|
||||
return
|
||||
|
||||
# We need to gather each fabric's port mappings
|
||||
npiv_port_mappings = []
|
||||
for fabric in self._fabric_names():
|
||||
npiv_port_mappings.extend(self._get_fabric_meta(instance, fabric))
|
||||
npiv_port_mappings.extend(self._get_fabric_meta(fabric))
|
||||
|
||||
# Now that we've collapsed all of the varying fabrics' port mappings
|
||||
# into one list, we can call down into pypowervm to remove them in one
|
||||
# action.
|
||||
LOG.info(_LI("Removing NPIV mapping for instance %s"), instance.name)
|
||||
pvm_vfcm.remove_npiv_port_mappings(adapter, host_uuid,
|
||||
LOG.info(_LI("Removing NPIV mapping for instance %s"),
|
||||
self.instance.name)
|
||||
pvm_vfcm.remove_npiv_port_mappings(self.adapter, self.host_uuid,
|
||||
npiv_port_mappings)
|
||||
|
||||
def wwpns(self, adapter, host_uuid, instance):
|
||||
def wwpns(self):
|
||||
"""Builds the WWPNs of the adapters that will connect the ports.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The list of WWPNs that need to be included in the zone set.
|
||||
"""
|
||||
vios_wraps, mgmt_uuid = None, None
|
||||
resp_wwpns = []
|
||||
@@ -161,13 +106,13 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
# If a mapping already exists, we can instead just pull the data off
|
||||
# of the system metadata from the nova instance.
|
||||
for fabric in self._fabric_names():
|
||||
fc_state = self._get_fabric_state(instance, fabric)
|
||||
fc_state = self._get_fabric_state(fabric)
|
||||
LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
|
||||
"instance %(inst)s") %
|
||||
{'st': fc_state, 'inst': instance.name})
|
||||
{'st': fc_state, 'inst': self.instance.name})
|
||||
|
||||
if (fc_state == FS_UNMAPPED and
|
||||
instance.task_state not in [task_states.DELETING]):
|
||||
self.instance.task_state not in [task_states.DELETING]):
|
||||
|
||||
# At this point we've determined that we need to do a mapping.
|
||||
# So we go and obtain the mgmt uuid and the VIOS wrappers.
|
||||
@@ -175,11 +120,11 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
# that we do not keep invoking these expensive calls
|
||||
# unnecessarily.
|
||||
if mgmt_uuid is None:
|
||||
mgmt_uuid = mgmt.get_mgmt_partition(adapter).uuid
|
||||
mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid
|
||||
|
||||
# The VIOS wrappers are also not set at this point. Seed
|
||||
# them as well. Will get reused on subsequent loops.
|
||||
vios_resp = adapter.read(
|
||||
vios_resp = self.adapter.read(
|
||||
pvm_vios.VIOS.schema_type,
|
||||
xag=[pvm_vios.VIOS.xags.FC_MAPPING,
|
||||
pvm_vios.VIOS.xags.STORAGE])
|
||||
@@ -200,20 +145,20 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
# Check if the fabrics are unmapped then we need to map it
|
||||
# temporarily with the management partition.
|
||||
LOG.info(_LI("Adding NPIV Mapping with mgmt partition for "
|
||||
"instance %s") % instance.name)
|
||||
"instance %s") % self.instance.name)
|
||||
port_maps = pvm_vfcm.add_npiv_port_mappings(
|
||||
adapter, host_uuid, mgmt_uuid, port_maps)
|
||||
self.adapter, self.host_uuid, mgmt_uuid, port_maps)
|
||||
|
||||
# Set the fabric meta (which indicates on the instance how
|
||||
# the fabric is mapped to the physical port) and the fabric
|
||||
# state.
|
||||
self._set_fabric_meta(instance, fabric, port_maps)
|
||||
self._set_fabric_state(instance, fabric, FS_MGMT_MAPPED)
|
||||
self._set_fabric_meta(fabric, port_maps)
|
||||
self._set_fabric_state(fabric, FS_MGMT_MAPPED)
|
||||
else:
|
||||
# This specific fabric had been previously set. Just pull
|
||||
# from the meta (as it is likely already mapped to the
|
||||
# instance)
|
||||
port_maps = self._get_fabric_meta(instance, fabric)
|
||||
port_maps = self._get_fabric_meta(fabric)
|
||||
|
||||
# Port map is set by either conditional, but may be set to None.
|
||||
# If not None, then add the WWPNs to the response.
|
||||
@@ -224,8 +169,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
# The return object needs to be a list for the volume connector.
|
||||
return resp_wwpns
|
||||
|
||||
def _remove_npiv_mgmt_mappings(self, adapter, fabric, host_uuid, instance,
|
||||
npiv_port_map):
|
||||
def _remove_npiv_mgmt_mappings(self, fabric, npiv_port_map):
|
||||
"""Remove the fabric from the management partition if necessary.
|
||||
|
||||
Check if the Fabric is mapped to the management partition, if yes then
|
||||
@@ -235,35 +179,27 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
from the management partition so that they can be remapped to the
|
||||
actual client VM.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param fabric: fabric name
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param instance: The nova instance that the volume should disconnect
|
||||
from.
|
||||
:param npiv_port_map: NPIV port mappings needs to be removed.
|
||||
:param npiv_port_map: NPIV Port mappings which needs to be removed.
|
||||
"""
|
||||
|
||||
if self._get_fabric_state(instance, fabric) == FS_MGMT_MAPPED:
|
||||
if self._get_fabric_state(fabric) == FS_MGMT_MAPPED:
|
||||
LOG.info(_LI("Removing NPIV mapping for mgmt partition "
|
||||
"for instance=%s") % instance.name)
|
||||
pvm_vfcm.remove_npiv_port_mappings(adapter, host_uuid,
|
||||
"for instance=%s") % self.instance.name)
|
||||
pvm_vfcm.remove_npiv_port_mappings(self.adapter, self.host_uuid,
|
||||
npiv_port_map)
|
||||
self._set_fabric_state(instance, fabric, FS_UNMAPPED)
|
||||
self._set_fabric_state(fabric, FS_UNMAPPED)
|
||||
return
|
||||
|
||||
def host_name(self, adapter, host_uuid, instance):
|
||||
def host_name(self):
|
||||
"""Derives the host name that should be used for the storage device.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The host name.
|
||||
"""
|
||||
return instance.name
|
||||
return self.instance.name
|
||||
|
||||
def _set_fabric_state(self, instance, fabric, state):
|
||||
def _set_fabric_state(self, fabric, state):
|
||||
"""Sets the fabric state into the instance's system metadata.
|
||||
:param instance: The nova instance
|
||||
:param fabric: The name of the fabric
|
||||
:param state: state of the fabric whicn needs to be set
|
||||
Possible Valid States:-
|
||||
@@ -274,12 +210,11 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
|
||||
meta_key = self._sys_fabric_state_key(fabric)
|
||||
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s") %
|
||||
{'st': state, 'inst': instance.name})
|
||||
instance.system_metadata[meta_key] = state
|
||||
{'st': state, 'inst': self.instance.name})
|
||||
self.instance.system_metadata[meta_key] = state
|
||||
|
||||
def _get_fabric_state(self, instance, fabric):
|
||||
def _get_fabric_state(self, fabric):
|
||||
"""Gets the fabric state from the instance's system metadata.
|
||||
:param instance: The nova instance
|
||||
:param fabric: The name of the fabric
|
||||
:Returns state: state of the fabric whicn needs to be set
|
||||
Possible Valid States:-
|
||||
@@ -288,16 +223,16 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
FS_INST_MAPPED: Fabric is mapped with the nova instance.
|
||||
"""
|
||||
meta_key = self._sys_fabric_state_key(fabric)
|
||||
if instance.system_metadata.get(meta_key) is None:
|
||||
instance.system_metadata[meta_key] = FS_UNMAPPED
|
||||
if self.instance.system_metadata.get(meta_key) is None:
|
||||
self.instance.system_metadata[meta_key] = FS_UNMAPPED
|
||||
|
||||
return instance.system_metadata[meta_key]
|
||||
return self.instance.system_metadata[meta_key]
|
||||
|
||||
def _sys_fabric_state_key(self, fabric):
|
||||
"""Returns the nova system metadata key for a given fabric."""
|
||||
return FABRIC_STATE_METADATA_KEY + '_' + fabric
|
||||
|
||||
def _set_fabric_meta(self, instance, fabric, port_map):
|
||||
def _set_fabric_meta(self, fabric, port_map):
|
||||
"""Sets the port map into the instance's system metadata.
|
||||
|
||||
The system metadata will store a per-fabric port map that links the
|
||||
@@ -305,7 +240,6 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
nature between the wwpns call (get_volume_connector) and the
|
||||
connect_volume (spawn).
|
||||
|
||||
:param instance: The nova instance.
|
||||
:param fabric: The name of the fabric.
|
||||
:param port_map: The port map (as defined via the derive_npiv_map
|
||||
pypowervm method).
|
||||
@@ -322,23 +256,22 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
meta_value = ",".join(meta_elems)
|
||||
meta_key = self._sys_meta_fabric_key(fabric)
|
||||
|
||||
instance.system_metadata[meta_key] = meta_value
|
||||
self.instance.system_metadata[meta_key] = meta_value
|
||||
|
||||
def _get_fabric_meta(self, instance, fabric):
|
||||
def _get_fabric_meta(self, fabric):
|
||||
"""Gets the port map from the instance's system metadata.
|
||||
|
||||
See _set_fabric_meta.
|
||||
|
||||
:param instance: The nova instance.
|
||||
:param fabric: The name of the fabric.
|
||||
:return: The port map (as defined via the derive_npiv_map pypowervm
|
||||
method.
|
||||
"""
|
||||
meta_key = self._sys_meta_fabric_key(fabric)
|
||||
if instance.system_metadata.get(meta_key) is None:
|
||||
if self.instance.system_metadata.get(meta_key) is None:
|
||||
return None
|
||||
|
||||
meta_value = instance.system_metadata[meta_key]
|
||||
meta_value = self.instance.system_metadata[meta_key]
|
||||
wwpns = meta_value.split(",")
|
||||
|
||||
# Rebuild the WWPNs into the natural structure.
|
||||
|
||||
@@ -37,6 +37,16 @@ LOG = logging.getLogger(__name__)
|
||||
_XAGS = [pvm_vios.VIOS.xags.STORAGE]
|
||||
|
||||
|
||||
def _build_udid_key(vios_uuid, volume_id):
|
||||
"""This method will build the udid dictionary key.
|
||||
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param volume_id: The lun volume id
|
||||
:returns: The udid dictionary key
|
||||
"""
|
||||
return vios_uuid + volume_id
|
||||
|
||||
|
||||
class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
"""The vSCSI implementation of the Volume Adapter.
|
||||
|
||||
@@ -45,49 +55,33 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
information from the driver and link it to a given virtual machine.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(VscsiVolumeAdapter, self).__init__()
|
||||
self._pfc_wwpns = None
|
||||
|
||||
def connect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
"""Connects the volume.
|
||||
def __init__(self, adapter, host_uuid, instance, connection_info):
|
||||
"""Initializes the vSCSI Volume Adapter.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should connect to.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
:param connection_info: Comes from the BDM.
|
||||
"""
|
||||
super(VscsiVolumeAdapter, self).__init__(adapter, host_uuid, instance,
|
||||
connection_info)
|
||||
self._pfc_wwpns = None
|
||||
|
||||
def connect_volume(self):
|
||||
"""Connects the volume."""
|
||||
|
||||
# Get the initiators
|
||||
it_map = connection_info['data']['initiator_target_map']
|
||||
volume_id = connection_info['data']['volume_id']
|
||||
lun = connection_info['data']['target_lun']
|
||||
it_map = self.connection_info['data']['initiator_target_map']
|
||||
volume_id = self.connection_info['data']['volume_id']
|
||||
lun = self.connection_info['data']['target_lun']
|
||||
hdisk_found = False
|
||||
device_name = None
|
||||
|
||||
i_wwpns = it_map.keys()
|
||||
|
||||
# Get VIOS feed
|
||||
vios_feed = vios.get_active_vioses(adapter, host_uuid, xag=_XAGS)
|
||||
vios_feed = vios.get_active_vioses(self.adapter, self.host_uuid,
|
||||
xag=_XAGS)
|
||||
|
||||
# Iterate through host vios list to find valid hdisks and map to VM.
|
||||
for vio_wrap in vios_feed:
|
||||
@@ -108,7 +102,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
continue
|
||||
|
||||
status, device_name, udid = hdisk.discover_hdisk(
|
||||
adapter, vio_wrap.uuid, itls)
|
||||
self.adapter, vio_wrap.uuid, itls)
|
||||
if device_name is not None and status in [
|
||||
hdisk.LUAStatus.DEVICE_AVAILABLE,
|
||||
hdisk.LUAStatus.FOUND_ITL_ERR]:
|
||||
@@ -119,12 +113,11 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
|
||||
# Found a hdisk on this Virtual I/O Server. Add a vSCSI
|
||||
# mapping to the Virtual Machine so that it can use the hdisk.
|
||||
self._add_mapping(adapter, host_uuid, vm_uuid, vio_wrap.uuid,
|
||||
device_name)
|
||||
self._add_mapping(vio_wrap.uuid, device_name)
|
||||
|
||||
# Save the UDID for the disk in the connection info. It is
|
||||
# used for the detach.
|
||||
self._set_udid(connection_info, vio_wrap.uuid, volume_id, udid)
|
||||
self._set_udid(vio_wrap.uuid, volume_id, udid)
|
||||
LOG.info(_LI('Device attached: %s'), device_name)
|
||||
hdisk_found = True
|
||||
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
||||
@@ -142,50 +135,25 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
if device_name is None:
|
||||
device_name = 'None'
|
||||
ex_args = {'backing_dev': device_name, 'reason': msg,
|
||||
'instance_name': instance.name}
|
||||
'instance_name': self.instance.name}
|
||||
raise pexc.VolumeAttachFailed(**ex_args)
|
||||
|
||||
def disconnect_volume(self, adapter, host_uuid, vm_uuid, instance,
|
||||
connection_info):
|
||||
"""Disconnect the volume.
|
||||
|
||||
:param adapter: The pypowervm adapter.
|
||||
:param host_uuid: The pypowervm UUID of the host.
|
||||
:param vm_uuid: The powervm UUID of the VM.
|
||||
:param instance: The nova instance that the volume should disconnect
|
||||
from.
|
||||
:param connection_info: Comes from the BDM. Example connection_info:
|
||||
{
|
||||
'driver_volume_type':'fibre_channel',
|
||||
'serial':u'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'data':{
|
||||
'initiator_target_map':{
|
||||
'21000024FF649105':['500507680210E522'],
|
||||
'21000024FF649104':['500507680210E522'],
|
||||
'21000024FF649107':['500507680210E522'],
|
||||
'21000024FF649106':['500507680210E522']
|
||||
},
|
||||
'target_discovered':False,
|
||||
'qos_specs':None,
|
||||
'volume_id':'10d9934e-b031-48ff-9f02-2ac533e331c8',
|
||||
'target_lun':0,
|
||||
'access_mode':'rw',
|
||||
'target_wwn':'500507680210E522'
|
||||
}
|
||||
"""
|
||||
volume_id = connection_info['data']['volume_id']
|
||||
def disconnect_volume(self):
|
||||
"""Disconnect the volume."""
|
||||
|
||||
volume_id = self.connection_info['data']['volume_id']
|
||||
device_name = None
|
||||
volume_udid = None
|
||||
try:
|
||||
# Get VIOS feed
|
||||
vios_feed = vios.get_active_vioses(adapter, host_uuid,
|
||||
vios_feed = vios.get_active_vioses(self.adapter, self.host_uuid,
|
||||
xag=_XAGS)
|
||||
|
||||
# Iterate through host vios list to find hdisks to disconnect.
|
||||
for vio_wrap in vios_feed:
|
||||
LOG.debug("vios uuid %s", vio_wrap.uuid)
|
||||
try:
|
||||
volume_udid = self._get_udid(
|
||||
connection_info, vio_wrap.uuid, volume_id)
|
||||
volume_udid = self._get_udid(vio_wrap.uuid, volume_id)
|
||||
device_name = vio_wrap.hdisk_from_uuid(volume_udid)
|
||||
|
||||
if not device_name:
|
||||
@@ -214,13 +182,13 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
"%(volume_id)s. volume_uid: %(volume_uid)s."),
|
||||
{'volume_uid': volume_udid, 'volume_id': volume_id,
|
||||
'vios_name': vio_wrap.name, 'hdisk': device_name})
|
||||
partition_id = vm.get_vm_id(adapter, vm_uuid)
|
||||
tsk_map.remove_pv_mapping(adapter, vio_wrap.uuid,
|
||||
partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)
|
||||
tsk_map.remove_pv_mapping(self.adapter, vio_wrap.uuid,
|
||||
partition_id, device_name)
|
||||
|
||||
try:
|
||||
# Attempt to remove the hDisk
|
||||
hdisk.remove_hdisk(adapter, CONF.host, device_name,
|
||||
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
|
||||
vio_wrap.uuid)
|
||||
except Exception as e:
|
||||
# If there is a failure, log it, but don't stop the process
|
||||
@@ -231,84 +199,49 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
|
||||
vm_uuid)
|
||||
self.vm_uuid)
|
||||
LOG.exception(_LE(u'Error: %s'), e)
|
||||
ex_args = {'backing_dev': device_name,
|
||||
'instance_name': instance.name,
|
||||
'instance_name': self.instance.name,
|
||||
'reason': six.text_type(e)}
|
||||
raise pexc.VolumeDetachFailed(**ex_args)
|
||||
|
||||
def wwpns(self, adapter, host_uuid, instance):
|
||||
def wwpns(self):
|
||||
"""Builds the WWPNs of the adapters that will connect the ports.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The list of WWPNs that need to be included in the zone set.
|
||||
"""
|
||||
if self._pfc_wwpns is None:
|
||||
self._pfc_wwpns = vios.get_physical_wwpns(adapter, host_uuid)
|
||||
self._pfc_wwpns = vios.get_physical_wwpns(self.adapter,
|
||||
self.host_uuid)
|
||||
return self._pfc_wwpns
|
||||
|
||||
def host_name(self, adapter, host_uuid, instance):
|
||||
def host_name(self):
|
||||
"""Derives the host name that should be used for the storage device.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the host for the pypowervm adapter.
|
||||
:param instance: The nova instance.
|
||||
:returns: The host name.
|
||||
"""
|
||||
return CONF.host
|
||||
|
||||
def _add_mapping(self, adapter, host_uuid, vm_uuid, vios_uuid,
|
||||
device_name):
|
||||
def _add_mapping(self, vios_uuid, device_name):
|
||||
"""This method builds the vscsi map and adds the mapping to
|
||||
the given VIOS.
|
||||
|
||||
:param adapter: The pypowervm API adapter.
|
||||
:param host_uuid: The UUID of the target host
|
||||
:param vm_uuid" The UUID of the VM instance
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param device_name: The The hdisk device name
|
||||
"""
|
||||
pv = pvm_stor.PV.bld(adapter, device_name)
|
||||
tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, vm_uuid, pv)
|
||||
pv = pvm_stor.PV.bld(self.adapter, device_name)
|
||||
tsk_map.add_vscsi_mapping(self.host_uuid, vios_uuid, self.vm_uuid, pv)
|
||||
|
||||
def _get_udid(self, connection_info, vios_uuid, volume_id):
|
||||
"""This method will return the hdisk udid stored in connection_info.
|
||||
|
||||
:param connection_info: The connection_info from the BDM.
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param volume_id: The lun volume id
|
||||
:returns: The target_udid associated with the hdisk
|
||||
"""
|
||||
try:
|
||||
udid_key = self._build_udid_key(vios_uuid, volume_id)
|
||||
return connection_info['data'][udid_key]
|
||||
except (KeyError, ValueError):
|
||||
LOG.warn(_LW(u'Failed to retrieve device_id key from BDM for '
|
||||
'volume id %s'), volume_id)
|
||||
return None
|
||||
|
||||
def _set_udid(self, connection_info, vios_uuid, volume_id, udid):
|
||||
def _set_udid(self, vios_uuid, volume_id, udid):
|
||||
"""This method will set the hdisk udid in the connection_info.
|
||||
|
||||
:param connection_info: The connection_info from the BDM.
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param volume_id: The lun volume id
|
||||
:param udid: The hdisk target_udid to be stored in system_metadata
|
||||
"""
|
||||
udid_key = self._build_udid_key(vios_uuid, volume_id)
|
||||
connection_info['data'][udid_key] = udid
|
||||
|
||||
def _build_udid_key(self, vios_uuid, volume_id):
|
||||
"""This method will build the udid dictionary key.
|
||||
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param volume_id: The lun volume id
|
||||
:returns: The udid dictionary key
|
||||
"""
|
||||
return vios_uuid + volume_id
|
||||
udid_key = _build_udid_key(vios_uuid, volume_id)
|
||||
self.connection_info['data'][udid_key] = udid
|
||||
|
||||
def _wwpns_on_vios(self, i_wwpns, vios_w):
|
||||
"""Returns the subset of wwpns from i_wwpns that the VIOS owns.
|
||||
@@ -324,3 +257,18 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
||||
"""
|
||||
active_wwpns = vios_w.get_active_pfc_wwpns()
|
||||
return [x for x in i_wwpns if x in active_wwpns]
|
||||
|
||||
def _get_udid(self, vios_uuid, volume_id):
|
||||
"""This method will return the hdisk udid stored in connection_info.
|
||||
|
||||
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
|
||||
:param volume_id: The lun volume id
|
||||
:returns: The target_udid associated with the hdisk
|
||||
"""
|
||||
try:
|
||||
udid_key = _build_udid_key(vios_uuid, volume_id)
|
||||
return self.connection_info['data'][udid_key]
|
||||
except (KeyError, ValueError):
|
||||
LOG.warn(_LW(u'Failed to retrieve device_id key from BDM for '
|
||||
'volume id %s'), volume_id)
|
||||
return None
|
||||
|
||||
Reference in New Issue
Block a user