NEC driver: Optimal path for non-disruptive backup
This patch enables optimal path for non-disruptive backup for the NEC volume driver. Change-Id: Ied9f3353be02a1b80081ab0d41f45f1a7c563468 Implements: blueprint nec-nondisruptive-backup
This commit is contained in:
parent
453df126ab
commit
55e8befc4c
@ -94,8 +94,8 @@ xml_out = '''
|
||||
<OBJECT name="Logical Disk">
|
||||
<SECTION name="LD Detail Information">
|
||||
<UNIT name="LDN(h)">0006</UNIT>
|
||||
<UNIT name="OS Type"> </UNIT>
|
||||
<UNIT name="LD Name">20000009910200140006</UNIT>
|
||||
<UNIT name="OS Type">LX</UNIT>
|
||||
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT_l</UNIT>
|
||||
<UNIT name="LD Capacity">10737418240</UNIT>
|
||||
<UNIT name="Pool No.(h)">0000</UNIT>
|
||||
<UNIT name="Purpose">---</UNIT>
|
||||
@ -385,7 +385,7 @@ def patch_get_conf_properties(self, conf=None):
|
||||
'pool_pools': [0, 1],
|
||||
'pool_backup_pools': [2, 3],
|
||||
'pool_actual_free_capacity': 50000000000,
|
||||
'ldset_name': 'LX:OpenStack0',
|
||||
'ldset_name': '',
|
||||
'ldset_controller_node_name': 'LX:node0',
|
||||
'ld_name_format': 'LX:%s',
|
||||
'ld_backupname_format': 'LX:%s_back',
|
||||
@ -717,7 +717,7 @@ class VolumeCreateTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.vol.status = 'available'
|
||||
with self.assertRaisesRegexp(exception.NotFound,
|
||||
'Logical Disk `LX:37mA82`'
|
||||
' does not exist.'):
|
||||
' could not be found.'):
|
||||
self._validate_migrate_volume(self.vol, self.xml)
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
@ -760,7 +760,7 @@ class BindLDTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.max_ld_count) = self.configs(self.xml)
|
||||
mock_bindld = mock.Mock()
|
||||
self._bind_ld = mock_bindld
|
||||
self._bind_ld.return_value = (0, 0, 0)
|
||||
self._bind_ld.return_value = 0, 0, 0
|
||||
|
||||
def test_bindld_CreateVolume(self):
|
||||
self.vol.id = "AAAAAAAA"
|
||||
@ -820,7 +820,7 @@ class BindLDTest_Snap(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.max_ld_count) = self.configs(self.xml)
|
||||
mock_bindld = mock.Mock()
|
||||
self._bind_ld = mock_bindld
|
||||
self._bind_ld.return_value = (0, 0, 0)
|
||||
self._bind_ld.return_value = 0, 0, 0
|
||||
mock_bindsnap = mock.Mock()
|
||||
self._create_snapshot = mock_bindsnap
|
||||
|
||||
@ -901,7 +901,7 @@ class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.vol.size = 10
|
||||
self.vol.status = None
|
||||
self.vol.migration_status = None
|
||||
connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
self.fc_do_export(None, self.vol, connector)
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
@ -960,7 +960,7 @@ class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
def test_fc_initialize_connection(self):
|
||||
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
|
||||
self.vol.migration_status = None
|
||||
connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
info = self._fc_initialize_connection(self.vol, connector)
|
||||
self.assertEqual('fibre_channel', info['driver_volume_type'])
|
||||
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
|
||||
@ -969,28 +969,28 @@ class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
|
||||
self.assertEqual(
|
||||
'2100000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][0])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][0])
|
||||
self.assertEqual(
|
||||
'2100000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][0])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][0])
|
||||
self.assertEqual(
|
||||
'2200000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][1])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][1])
|
||||
self.assertEqual(
|
||||
'2200000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][1])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][1])
|
||||
self.assertEqual(
|
||||
'2900000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][2])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][2])
|
||||
self.assertEqual(
|
||||
'2900000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][2])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][2])
|
||||
self.assertEqual(
|
||||
'2A00000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][3])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][3])
|
||||
self.assertEqual(
|
||||
'2A00000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][3])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][3])
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
patch_execute)
|
||||
@ -998,7 +998,7 @@ class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
patch_view_all)
|
||||
def test_fc_terminate_connection(self):
|
||||
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
|
||||
connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
info = self._fc_terminate_connection(self.vol, connector)
|
||||
self.assertEqual('fibre_channel', info['driver_volume_type'])
|
||||
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
|
||||
@ -1007,28 +1007,28 @@ class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase):
|
||||
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
|
||||
self.assertEqual(
|
||||
'2100000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][0])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][0])
|
||||
self.assertEqual(
|
||||
'2100000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][0])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][0])
|
||||
self.assertEqual(
|
||||
'2200000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][1])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][1])
|
||||
self.assertEqual(
|
||||
'2200000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][1])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][1])
|
||||
self.assertEqual(
|
||||
'2900000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][2])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][2])
|
||||
self.assertEqual(
|
||||
'2900000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][2])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][2])
|
||||
self.assertEqual(
|
||||
'2A00000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723A'][3])
|
||||
info['data']['initiator_target_map']['10000090FAA0786A'][3])
|
||||
self.assertEqual(
|
||||
'2A00000991020012',
|
||||
info['data']['initiator_target_map']['1000-0090-FAA0-723B'][3])
|
||||
info['data']['initiator_target_map']['10000090FAA0786B'][3])
|
||||
|
||||
|
||||
class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver,
|
||||
@ -1063,3 +1063,115 @@ class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver,
|
||||
self._cli.query_BV_SV_status.return_value = 'snap/active'
|
||||
ret = self.delete_snapshot(self.vol)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
|
||||
class NonDisruptiveBackup_test(volume_helper.MStorageDSVDriver,
|
||||
unittest.TestCase):
|
||||
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
|
||||
'get_conf_properties', patch_get_conf_properties)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
|
||||
'view_all', patch_view_all)
|
||||
def setUp(self):
|
||||
self.do_setup(None)
|
||||
self.vol = DummyVolume()
|
||||
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
|
||||
self.volvolume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
|
||||
self.volsize = 10
|
||||
self.volstatus = None
|
||||
self.volmigration_status = None
|
||||
self._properties = self.get_conf_properties()
|
||||
self._cli = self._properties['cli']
|
||||
self.xml = self._cli.view_all()
|
||||
(self.pools,
|
||||
self.lds,
|
||||
self.ldsets,
|
||||
self.used_ldns,
|
||||
self.hostports,
|
||||
self.max_ld_count) = self.configs(self.xml)
|
||||
|
||||
def test_validate_ld_exist(self):
|
||||
ldname = self._validate_ld_exist(
|
||||
self.lds, self.vol.id, self._properties['ld_name_format'])
|
||||
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', ldname)
|
||||
self.vol.id = "00000000-0000-0000-0000-6b6d96553b4b"
|
||||
with self.assertRaisesRegexp(exception.NotFound,
|
||||
'Logical Disk `LX:XXXXXXXX`'
|
||||
' could not be found.'):
|
||||
self._validate_ld_exist(
|
||||
self.lds, self.vol.id, self._properties['ld_name_format'])
|
||||
|
||||
def test_validate_iscsildset_exist(self):
|
||||
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
|
||||
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
|
||||
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
|
||||
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255XX"}
|
||||
with self.assertRaisesRegexp(exception.NotFound,
|
||||
'Appropriate Logical Disk Set'
|
||||
' could not be found.'):
|
||||
self._validate_iscsildset_exist(self.ldsets, connector)
|
||||
|
||||
def test_validate_fcldset_exist(self):
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
ldset = self._validate_fcldset_exist(self.ldsets, connector)
|
||||
self.assertEqual('LX:OpenStack1', ldset['ldsetname'])
|
||||
connector = {'wwpns': ["10000090FAA0786X", "10000090FAA0786Y"]}
|
||||
with self.assertRaisesRegexp(exception.NotFound,
|
||||
'Appropriate Logical Disk Set'
|
||||
' could not be found.'):
|
||||
self._validate_fcldset_exist(self.ldsets, connector)
|
||||
|
||||
def test_enumerate_iscsi_portals(self):
|
||||
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
|
||||
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
|
||||
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
|
||||
portal = self._enumerate_iscsi_portals(self.hostports, ldset)
|
||||
self.assertEqual('192.168.1.90:3260', portal[0])
|
||||
self.assertEqual('192.168.1.91:3260', portal[1])
|
||||
self.assertEqual('192.168.2.92:3260', portal[2])
|
||||
self.assertEqual('192.168.2.93:3260', portal[3])
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
|
||||
'get_conf_properties', patch_get_conf_properties)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
|
||||
'view_all', patch_view_all)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
patch_execute)
|
||||
def test_initialize_connection_snapshot(self):
|
||||
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
|
||||
loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
|
||||
self.vol.provider_location = loc
|
||||
ret = self.iscsi_initialize_connection_snapshot(self.vol, connector)
|
||||
self.assertIsNotNone(ret)
|
||||
self.assertEqual('iscsi', ret['driver_volume_type'])
|
||||
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
ret = self.fc_initialize_connection_snapshot(self.vol, connector)
|
||||
self.assertIsNotNone(ret)
|
||||
self.assertEqual('fibre_channel', ret['driver_volume_type'])
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
|
||||
'get_conf_properties', patch_get_conf_properties)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
|
||||
'view_all', patch_view_all)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
patch_execute)
|
||||
def test_terminate_connection_snapshot(self):
|
||||
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
|
||||
self.iscsi_terminate_connection_snapshot(self.vol, connector)
|
||||
|
||||
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
|
||||
ret = self.fc_terminate_connection_snapshot(self.vol, connector)
|
||||
self.assertEqual('fibre_channel', ret['driver_volume_type'])
|
||||
|
||||
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
|
||||
'get_conf_properties', patch_get_conf_properties)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
|
||||
'view_all', patch_view_all)
|
||||
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
|
||||
patch_execute)
|
||||
def test_remove_export_snapshot(self):
|
||||
self.remove_export_snapshot(None, self.vol)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
ret = self.backup_use_temp_snapshot()
|
||||
self.assertTrue(ret)
|
||||
|
@ -582,6 +582,35 @@ class MStorageISMCLI(object):
|
||||
if force_delete:
|
||||
self.unbind(ldname)
|
||||
|
||||
def lvbind(self, bvname, lvname, lvnumber):
|
||||
"""Link Volume create."""
|
||||
cmd = ('iSMcfg lvbind -bvname %(bvname)s '
|
||||
'-lvn %(lvnumber)d -lvname %(lvname)s'
|
||||
% {'bvname': bvname,
|
||||
'lvnumber': lvnumber,
|
||||
'lvname': lvname})
|
||||
self._execute(cmd)
|
||||
|
||||
def lvunbind(self, lvname):
|
||||
"""Link Volume delete."""
|
||||
cmd = ('iSMcfg lvunbind -ldname %(lvname)s'
|
||||
% {'lvname': lvname})
|
||||
self._execute(cmd)
|
||||
|
||||
def lvlink(self, svname, lvname):
|
||||
"""Link to snapshot volume."""
|
||||
cmd = ('iSMsc_link -lv %(lvname)s -lvflg ld '
|
||||
'-sv %(svname)s -svflg ld -lvacc ro'
|
||||
% {'lvname': lvname,
|
||||
'svname': svname})
|
||||
self._execute(cmd)
|
||||
|
||||
def lvunlink(self, lvname):
|
||||
"""Unlink from snapshot volume."""
|
||||
cmd = ('iSMsc_unlink -lv %(lvname)s -lvflg ld'
|
||||
% {'lvname': lvname})
|
||||
self._execute(cmd)
|
||||
|
||||
|
||||
class UnpairWait(object):
|
||||
error_updates = {'status': 'error',
|
||||
|
@ -47,6 +47,19 @@ class MStorageISCSIDriver(volume_helper.MStorageDSVDriver,
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
return self.iscsi_terminate_connection(volume, connector)
|
||||
|
||||
def create_export_snapshot(self, context, snapshot, connector):
|
||||
return self.iscsi_do_export_snapshot(context, snapshot, connector)
|
||||
|
||||
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||
return self.iscsi_initialize_connection_snapshot(snapshot,
|
||||
connector,
|
||||
**kwargs)
|
||||
|
||||
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||
return self.iscsi_terminate_connection_snapshot(snapshot,
|
||||
connector,
|
||||
**kwargs)
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class MStorageFCDriver(volume_helper.MStorageDSVDriver,
|
||||
@ -74,3 +87,16 @@ class MStorageFCDriver(volume_helper.MStorageDSVDriver,
|
||||
@fczm_utils.remove_fc_zone
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
return self.fc_terminate_connection(volume, connector)
|
||||
|
||||
def create_export_snapshot(self, context, snapshot, connector):
|
||||
return self.fc_do_export_snapshot(context, snapshot, connector)
|
||||
|
||||
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||
return self.fc_initialize_connection_snapshot(snapshot,
|
||||
connector,
|
||||
**kwargs)
|
||||
|
||||
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||
return self.fc_terminate_connection_snapshot(snapshot,
|
||||
connector,
|
||||
**kwargs)
|
||||
|
@ -150,7 +150,7 @@ def convert_to_id(value62):
|
||||
class MStorageVolumeCommon(object):
|
||||
"""M-Series Storage volume common class."""
|
||||
|
||||
VERSION = '1.8.2'
|
||||
VERSION = '1.9.1'
|
||||
WIKI_NAME = 'NEC_Cinder_CI'
|
||||
|
||||
def do_setup(self, context):
|
||||
@ -876,7 +876,7 @@ class MStorageVolumeCommon(object):
|
||||
specs['upperreport'] = None
|
||||
LOG.debug('qos parameter not found.')
|
||||
else:
|
||||
if ('upperlimit' in specs) and (specs['upperlimit'] is not None):
|
||||
if 'upperlimit' in specs and specs['upperlimit'] is not None:
|
||||
if self.validates_number(specs['upperlimit']) is True:
|
||||
upper_limit = int(specs['upperlimit'], 10)
|
||||
if ((upper_limit != 0) and
|
||||
@ -889,7 +889,7 @@ class MStorageVolumeCommon(object):
|
||||
else:
|
||||
specs['upperlimit'] = None
|
||||
|
||||
if ('lowerlimit' in specs) and (specs['lowerlimit'] is not None):
|
||||
if 'lowerlimit' in specs and specs['lowerlimit'] is not None:
|
||||
if self.validates_number(specs['lowerlimit']) is True:
|
||||
lower_limit = int(specs['lowerlimit'], 10)
|
||||
if (lower_limit != 0 and (lower_limit < 10 or
|
||||
|
@ -85,7 +85,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
for pool in nominated_pools:
|
||||
nld = len(pool['ld_list'])
|
||||
if (nld < self._numofld_per_pool and
|
||||
((selected_pool == -1) or (min_ldn > nld))):
|
||||
(selected_pool == -1 or min_ldn > nld)):
|
||||
selected_pool = pool['pool_num']
|
||||
min_ldn = nld
|
||||
if selected_pool < 0:
|
||||
@ -135,7 +135,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
size = option * units.Gi
|
||||
pools = [pool for (pn, pool) in pools.items()
|
||||
if pool['free'] >= size and
|
||||
(pn in self._properties['pool_backup_pools'])]
|
||||
pn in self._properties['pool_backup_pools']]
|
||||
return self._return_poolnumber(pools)
|
||||
|
||||
def _select_volddr_poolnumber(self, volume, pools, xml, option):
|
||||
@ -143,7 +143,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
size = option * units.Gi
|
||||
pools = [pool for (pn, pool) in pools.items()
|
||||
if pool['free'] >= size and
|
||||
(pn in self._properties['pool_pools'])]
|
||||
pn in self._properties['pool_pools']]
|
||||
return self._return_poolnumber(pools)
|
||||
|
||||
def _bind_ld(self, volume, capacity, validator,
|
||||
@ -214,6 +214,78 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
'ldn': selected_ldn, 'pool': selected_pool})
|
||||
return ldname, selected_ldn, selected_pool
|
||||
|
||||
def _validate_ld_exist(self, lds, vol_id, name_format):
|
||||
ldname = self.get_ldname(vol_id, name_format)
|
||||
if ldname not in lds:
|
||||
msg = _('Logical Disk `%s` could not be found.') % ldname
|
||||
LOG.error(msg)
|
||||
raise exception.NotFound(msg)
|
||||
return ldname
|
||||
|
||||
def _validate_iscsildset_exist(self, ldsets, connector, metadata=None):
|
||||
ldset = self.get_ldset(ldsets, metadata)
|
||||
if ldset is None:
|
||||
for tldset in six.itervalues(ldsets):
|
||||
if 'initiator_list' not in tldset:
|
||||
continue
|
||||
n = tldset['initiator_list'].count(connector['initiator'])
|
||||
if n > 0:
|
||||
ldset = tldset
|
||||
break
|
||||
if ldset is None:
|
||||
msg = _('Appropriate Logical Disk Set could not be found.')
|
||||
raise exception.NotFound(msg)
|
||||
if len(ldset['portal_list']) < 1:
|
||||
msg = (_('Logical Disk Set `%s` has no portal.') %
|
||||
ldset['ldsetname'])
|
||||
raise exception.NotFound(msg)
|
||||
return ldset
|
||||
|
||||
def _validate_fcldset_exist(self, ldsets, connector, metadata=None):
|
||||
ldset = self.get_ldset(ldsets, metadata)
|
||||
if ldset is None:
|
||||
for conect in connector['wwpns']:
|
||||
length = len(conect)
|
||||
findwwpn = '-'.join([conect[i:i + 4]
|
||||
for i in range(0, length, 4)])
|
||||
findwwpn = findwwpn.upper()
|
||||
for tldset in six.itervalues(ldsets):
|
||||
if 'wwpn' in tldset and findwwpn in tldset['wwpn']:
|
||||
ldset = tldset
|
||||
break
|
||||
if ldset is not None:
|
||||
break
|
||||
if ldset is None:
|
||||
msg = _('Appropriate Logical Disk Set could not be found.')
|
||||
raise exception.NotFound(msg)
|
||||
return ldset
|
||||
|
||||
def _enumerate_iscsi_portals(self, hostports, ldset, prefered_director=0):
|
||||
nominated = []
|
||||
for director in [prefered_director, 1 - prefered_director]:
|
||||
if director not in hostports:
|
||||
continue
|
||||
dirportal = []
|
||||
for port in hostports[director]:
|
||||
if not port['protocol'].lower() == 'iscsi':
|
||||
continue
|
||||
for portal in ldset['portal_list']:
|
||||
if portal.startswith(port['ip'] + ':'):
|
||||
dirportal.append(portal)
|
||||
break
|
||||
if (self._properties['portal_number'] > 0 and
|
||||
len(dirportal) > self._properties['portal_number']):
|
||||
nominated.extend(random.sample(
|
||||
dirportal, self._properties['portal_number']))
|
||||
else:
|
||||
nominated.extend(dirportal)
|
||||
|
||||
if len(nominated) == 0:
|
||||
raise exception.NotFound(
|
||||
_('No portal matches to any host ports.'))
|
||||
|
||||
return nominated
|
||||
|
||||
def create_volume(self, volume):
|
||||
msgparm = ('Volume ID = %(id)s, Size = %(size)dGB'
|
||||
% {'id': volume.id, 'size': volume.size})
|
||||
@ -320,16 +392,9 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
|
||||
ldname = self.get_ldname(volume.id,
|
||||
self._properties['ld_name_format'])
|
||||
|
||||
# get volume.
|
||||
if ldname not in lds:
|
||||
msg = (_('Logical Disk has unbound already '
|
||||
'(name=%(name)s, id=%(id)s).') %
|
||||
{'name': ldname, 'id': volume.id})
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
ldname = self._validate_ld_exist(
|
||||
lds, volume.id, self._properties['ld_name_format'])
|
||||
ld = lds[ldname]
|
||||
ldn = ld['ldn']
|
||||
|
||||
@ -455,8 +520,10 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
"""Validate source volume information."""
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
ldname = self.get_ldname(volume.id,
|
||||
self._properties['ld_name_format'])
|
||||
|
||||
# get ld object
|
||||
ldname = self._validate_ld_exist(
|
||||
lds, volume.id, self._properties['ld_name_format'])
|
||||
|
||||
# check volume status.
|
||||
if volume.status != 'available':
|
||||
@ -464,11 +531,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
# get ld object and check rpl attribute.
|
||||
if ldname not in lds:
|
||||
msg = _('Logical Disk `%s` does not exist.') % ldname
|
||||
LOG.error(msg)
|
||||
raise exception.NotFound(msg)
|
||||
# check rpl attribute.
|
||||
ld = lds[ldname]
|
||||
if ld['Purpose'] != '---':
|
||||
msg = (_('Specified Logical Disk %(ld)s '
|
||||
@ -574,6 +637,9 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
def check_for_export(self, context, volume_id):
|
||||
pass
|
||||
|
||||
def backup_use_temp_snapshot(self):
|
||||
return True
|
||||
|
||||
def iscsi_do_export(self, _ctx, volume, connector, ensure=False):
|
||||
msgparm = ('Volume ID = %(id)s, '
|
||||
'Initiator Name = %(initiator)s'
|
||||
@ -590,6 +656,9 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
def _iscsi_do_export(self, _ctx, volume, connector, ensure):
|
||||
LOG.debug('_iscsi_do_export'
|
||||
'(Volume ID = %(id)s, connector = %(connector)s) Start.',
|
||||
{'id': volume.id, 'connector': connector})
|
||||
while True:
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
@ -607,32 +676,21 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
LOG.debug('image to volume or volume to image:%s',
|
||||
volume.status)
|
||||
# migrate.
|
||||
elif (volume.migration_status is not None and
|
||||
elif (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
self._properties['ldset_controller_node_name'] != ''):
|
||||
metadata['ldset'] = (
|
||||
self._properties['ldset_controller_node_name'])
|
||||
LOG.debug('migrate:%s', volume.migration_status)
|
||||
|
||||
ldset = self.get_ldset(ldsets, metadata)
|
||||
if ldset is None:
|
||||
for tldset in six.itervalues(ldsets):
|
||||
n = tldset['initiator_list'].count(connector['initiator'])
|
||||
if ('initiator_list' in tldset and n > 0):
|
||||
ldset = tldset
|
||||
LOG.debug('ldset=%s.', ldset)
|
||||
break
|
||||
if ldset is None:
|
||||
msg = _('Appropriate Logical Disk Set could not be found.')
|
||||
raise exception.NotFound(msg)
|
||||
ldset = self._validate_iscsildset_exist(
|
||||
ldsets, connector, metadata)
|
||||
|
||||
if len(ldset['portal_list']) < 1:
|
||||
msg = (_('Logical Disk Set `%s` has no portal.') %
|
||||
ldset['ldsetname'])
|
||||
raise exception.NotFound(msg)
|
||||
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
if migstat is not None and 'target:' in migstat:
|
||||
if (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
'target:' in volume.migration_status):
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
index = migstat.find('target:')
|
||||
if index != -1:
|
||||
migstat = migstat[len('target:'):]
|
||||
@ -675,29 +733,8 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
|
||||
# enumerate portals for iscsi multipath.
|
||||
prefered_director = ld['pool_num'] % 2
|
||||
nominated = []
|
||||
for director in [prefered_director, 1 - prefered_director]:
|
||||
if director not in hostports:
|
||||
continue
|
||||
dirportal = []
|
||||
for port in hostports[director]:
|
||||
if not port['protocol'] == 'iSCSI':
|
||||
continue
|
||||
for portal in ldset['portal_list']:
|
||||
if portal.startswith(port['ip'] + ':'):
|
||||
dirportal.append(portal)
|
||||
break
|
||||
if ((self._properties['portal_number'] > 0) and
|
||||
(len(dirportal) > self._properties['portal_number'])):
|
||||
nominated.extend(random.sample(
|
||||
dirportal, self._properties['portal_number']))
|
||||
else:
|
||||
nominated.extend(dirportal)
|
||||
|
||||
if len(nominated) == 0:
|
||||
raise exception.NotFound(
|
||||
_('Any portal not match to any host ports.'))
|
||||
|
||||
nominated = self._enumerate_iscsi_portals(hostports, ldset,
|
||||
prefered_director)
|
||||
location = ('%(list)s,1 %(iqn)s %(lun)d'
|
||||
% {'list': ';'.join(nominated),
|
||||
'iqn': ldset['lds'][ld['ldn']]['iqn'],
|
||||
@ -725,6 +762,9 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
def _fc_do_export(self, _ctx, volume, connector, ensure):
|
||||
LOG.debug('_fc_do_export'
|
||||
'(Volume ID = %(id)s, connector = %(connector)s) Start.',
|
||||
{'id': volume.id, 'connector': connector})
|
||||
while True:
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
@ -742,30 +782,15 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
LOG.debug('image to volume or volume to image:%s',
|
||||
volume.status)
|
||||
# migrate.
|
||||
elif (volume.migration_status is not None and
|
||||
elif (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
self._properties['ldset_controller_node_name'] != ''
|
||||
):
|
||||
metadata['ldset'] = (
|
||||
self._properties['ldset_controller_node_name'])
|
||||
LOG.debug('migrate:%s', volume.migration_status)
|
||||
|
||||
ldset = self.get_ldset(ldsets, metadata)
|
||||
if ldset is None:
|
||||
for conect in connector['wwpns']:
|
||||
length = len(conect)
|
||||
findwwpn = '-'.join([conect[i:i + 4]
|
||||
for i in range(0, length, 4)])
|
||||
findwwpn = findwwpn.upper()
|
||||
for tldset in six.itervalues(ldsets):
|
||||
if 'wwpn' in tldset and findwwpn in tldset['wwpn']:
|
||||
ldset = tldset
|
||||
LOG.debug('ldset=%s.', ldset)
|
||||
break
|
||||
if ldset is not None:
|
||||
break
|
||||
if ldset is None:
|
||||
msg = _('Logical Disk Set could not be found.')
|
||||
raise exception.NotFound(msg)
|
||||
ldset = self._validate_fcldset_exist(ldsets, connector, metadata)
|
||||
|
||||
# get free lun.
|
||||
luns = []
|
||||
@ -779,9 +804,11 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
break
|
||||
target_lun += 1
|
||||
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
if migstat is not None and 'target:' in migstat:
|
||||
if (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
'target:' in volume.migration_status):
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
index = migstat.find('target:')
|
||||
if index != -1:
|
||||
migstat = migstat[len('target:'):]
|
||||
@ -828,6 +855,127 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
{'ensure': 'ensure_' if ensure else '',
|
||||
'ld': ldname})
|
||||
|
||||
def iscsi_do_export_snapshot(self, context, snapshot, connector):
|
||||
"""Exports the snapshot."""
|
||||
msgparm = 'Snapshot ID = %s' % snapshot.id
|
||||
try:
|
||||
ret = self._iscsi_do_export_snapshot(
|
||||
context, snapshot, connector,
|
||||
self._properties['diskarray_name'])
|
||||
LOG.info('Create Export Snapshot (%s)', msgparm)
|
||||
return ret
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Create Export Snapshot '
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
@coordination.synchronized('mstorage_bind_execute_{diskarray_name}')
|
||||
def _iscsi_do_export_snapshot(self, context, snapshot, connector,
|
||||
diskarray_name):
|
||||
LOG.debug('_iscsi_do_export_snapshot(Snapshot ID = %s) Start.',
|
||||
snapshot.id)
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
|
||||
LOG.debug('validate data.')
|
||||
svname = self._validate_ld_exist(
|
||||
lds, snapshot.id, self._properties['ld_name_format'])
|
||||
bvname = self._validate_ld_exist(
|
||||
lds, snapshot.volume_id, self._properties['ld_name_format'])
|
||||
lvname = svname + '_l'
|
||||
ldset = self._validate_iscsildset_exist(ldsets, connector)
|
||||
svstatus = self._cli.query_BV_SV_status(bvname[3:], svname[3:])
|
||||
if svstatus != 'snap/active':
|
||||
msg = _('Logical Disk (%s) is invalid snapshot.') % svname
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
lvldn = self._select_ldnumber(used_ldns, max_ld_count)
|
||||
|
||||
LOG.debug('configure backend.')
|
||||
self._cli.lvbind(bvname, lvname[3:], lvldn)
|
||||
self._cli.lvlink(svname[3:], lvname[3:])
|
||||
self._cli.addldsetld(ldset['ldsetname'], lvname)
|
||||
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
ld = lds[lvname]
|
||||
ldset = self._validate_iscsildset_exist(ldsets, connector)
|
||||
|
||||
LOG.debug('enumerate portals for iscsi multipath.')
|
||||
prefered_director = ld['pool_num'] % 2
|
||||
nominated = self._enumerate_iscsi_portals(hostports, ldset,
|
||||
prefered_director)
|
||||
location = ('%(list)s,1 %(iqn)s %(lun)d'
|
||||
% {'list': ';'.join(nominated),
|
||||
'iqn': ldset['lds'][ld['ldn']]['iqn'],
|
||||
'lun': ldset['lds'][ld['ldn']]['lun']})
|
||||
|
||||
LOG.debug('create_export_snapshot location:(%s)', location)
|
||||
return {'provider_location': location}
|
||||
|
||||
def fc_do_export_snapshot(self, context, snapshot, connector,
|
||||
ensure=False):
|
||||
msgparm = ('Volume ID = %(id)s, '
|
||||
'Initiator WWPNs = %(wwpns)s'
|
||||
% {'id': snapshot.id,
|
||||
'wwpns': connector['wwpns']})
|
||||
try:
|
||||
ret = self._fc_do_export_snapshot(
|
||||
context, snapshot, connector, ensure,
|
||||
self._properties['diskarray_name'])
|
||||
LOG.info('Created FC Export snapshot(%s)', msgparm)
|
||||
return ret
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Create FC Export snapshot'
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
@coordination.synchronized('mstorage_bind_execute_{diskarray_name}')
|
||||
def _fc_do_export_snapshot(self, context, snapshot, connector, ensure,
|
||||
diskarray_name):
|
||||
LOG.debug('_fc_do_export_snapshot(Snapshot ID = %s) Start.',
|
||||
snapshot.id)
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
|
||||
LOG.debug('validate data.')
|
||||
svname = self._validate_ld_exist(
|
||||
lds, snapshot.id, self._properties['ld_name_format'])
|
||||
bvname = self._validate_ld_exist(
|
||||
lds, snapshot.volume_id, self._properties['ld_name_format'])
|
||||
lvname = svname + '_l'
|
||||
ldset = self._validate_fcldset_exist(ldsets, connector)
|
||||
svstatus = self._cli.query_BV_SV_status(bvname[3:], svname[3:])
|
||||
if svstatus != 'snap/active':
|
||||
msg = _('Logical Disk (%s) is invalid snapshot.') % svname
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
lvldn = self._select_ldnumber(used_ldns, max_ld_count)
|
||||
|
||||
LOG.debug('configure backend.')
|
||||
self._cli.lvbind(bvname, lvname[3:], lvldn)
|
||||
self._cli.lvlink(svname[3:], lvname[3:])
|
||||
|
||||
luns = []
|
||||
ldsetlds = ldset['lds']
|
||||
for ld in six.itervalues(ldsetlds):
|
||||
luns.append(ld['lun'])
|
||||
target_lun = 0
|
||||
for lun in sorted(luns):
|
||||
if target_lun < lun:
|
||||
break
|
||||
target_lun += 1
|
||||
|
||||
self._cli.addldsetld(ldset['ldsetname'], lvname, target_lun)
|
||||
LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.',
|
||||
{'ld': lvname, 'ldset': ldset['ldsetname']})
|
||||
LOG.debug('%(ensure)sexport LD `%(ld)s`.',
|
||||
{'ensure': 'ensure_' if ensure else '',
|
||||
'ld': lvname})
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
msgparm = 'Volume ID = %s' % volume.id
|
||||
try:
|
||||
@ -859,7 +1007,8 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
LOG.debug('image to volume or volume to image:%s',
|
||||
volume.status)
|
||||
# migrate.
|
||||
elif (volume.migration_status is not None and
|
||||
elif (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
self._properties['ldset_controller_node_name'] != ''
|
||||
):
|
||||
metadata['ldset'] = (
|
||||
@ -868,9 +1017,11 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
|
||||
ldset = self.get_ldset(ldsets, metadata)
|
||||
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
if migstat is not None and 'target:' in migstat:
|
||||
if (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
'target:' in volume.migration_status):
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
index = migstat.find('target:')
|
||||
if index != -1:
|
||||
migstat = migstat[len('target:'):]
|
||||
@ -928,6 +1079,69 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
|
||||
LOG.debug('_remove_export(Volume ID = %s) End.', volume.id)
|
||||
|
||||
def remove_export_snapshot(self, context, snapshot):
|
||||
"""Removes an export for a snapshot."""
|
||||
msgparm = 'Snapshot ID = %s' % snapshot.id
|
||||
try:
|
||||
self._remove_export_snapshot(context, snapshot)
|
||||
LOG.info('Removed Export Snapshot(%s)', msgparm)
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Remove Export Snapshot'
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
def _remove_export_snapshot(self, context, snapshot):
|
||||
LOG.debug('_remove_export_snapshot(Snapshot ID = %s) Start.',
|
||||
snapshot.id)
|
||||
xml = self._cli.view_all(self._properties['ismview_path'])
|
||||
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
|
||||
self.configs(xml))
|
||||
|
||||
LOG.debug('validate data.')
|
||||
svname = self._validate_ld_exist(
|
||||
lds, snapshot.id, self._properties['ld_name_format'])
|
||||
lvname = svname + '_l'
|
||||
if lvname not in lds:
|
||||
LOG.debug('Logical Disk `%s` is already unexported.', lvname)
|
||||
return
|
||||
|
||||
ld = lds[lvname]
|
||||
ldsetlist = []
|
||||
if ld is None:
|
||||
msg = _('Exported snapshot could not be found.')
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
for tldset in six.itervalues(ldsets):
|
||||
if ld['ldn'] in tldset['lds']:
|
||||
ldsetlist.append(tldset)
|
||||
if len(ldsetlist) == 0:
|
||||
LOG.debug('Specified Logical Disk is already removed.')
|
||||
return
|
||||
|
||||
LOG.debug('configure backend.')
|
||||
for tagetldset in ldsetlist:
|
||||
retnum, errnum = self._cli.delldsetld(tagetldset['ldsetname'],
|
||||
lvname)
|
||||
if retnum is not True:
|
||||
msg = (_('Failed to remove export Logical Disk from '
|
||||
'Logical Disk Set (%s)') % errnum)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.',
|
||||
{'ld': lvname, 'ldset': tagetldset['ldsetname']})
|
||||
|
||||
try:
|
||||
self._cli.lvunlink(lvname[3:])
|
||||
except Exception:
|
||||
LOG.debug('LV unlink error.')
|
||||
|
||||
try:
|
||||
self._cli.lvunbind(lvname)
|
||||
except Exception:
|
||||
LOG.debug('LV unbind error.')
|
||||
|
||||
LOG.debug('_remove_export_snapshot(Snapshot ID = %s) End.',
|
||||
snapshot.id)
|
||||
|
||||
def iscsi_initialize_connection(self, volume, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': volume.id, 'connector': connector})
|
||||
@ -991,24 +1205,43 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
'info': info})
|
||||
return info
|
||||
|
||||
def iscsi_terminate_connection(self, volume, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': volume.id, 'connector': connector})
|
||||
def iscsi_initialize_connection_snapshot(self, snapshot, connector,
|
||||
**kwargs):
|
||||
"""Allow connection to connector and return connection info.
|
||||
|
||||
:param snapshot: The snapshot to be attached
|
||||
:param connector: Dictionary containing information about what
|
||||
is being connected to.
|
||||
:returns conn_info: A dictionary of connection information. This
|
||||
can optionally include a "initiator_updates"
|
||||
field.
|
||||
"""
|
||||
msgparm = ('Snapshot ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': snapshot.id, 'connector': connector})
|
||||
|
||||
try:
|
||||
ret = self._iscsi_terminate_connection(volume, connector)
|
||||
LOG.info('Terminated iSCSI Connection (%s)', msgparm)
|
||||
ret = self._iscsi_initialize_connection(snapshot, connector)
|
||||
LOG.info('Initialized iSCSI Connection snapshot(%s)', msgparm)
|
||||
return ret
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Terminate iSCSI Connection '
|
||||
LOG.warning('Failed to Initialize iSCSI Connection snapshot'
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
return ret
|
||||
|
||||
def _iscsi_terminate_connection(self, volume, connector):
|
||||
LOG.debug('execute _iscsi_terminate_connection'
|
||||
'(Volume ID = %(id)s, connector = %(connector)s).',
|
||||
{'id': volume.id, 'connector': connector})
|
||||
def iscsi_terminate_connection(self, volume, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': volume.id, 'connector': connector})
|
||||
LOG.info('Terminated iSCSI Connection (%s)', msgparm)
|
||||
|
||||
def iscsi_terminate_connection_snapshot(self, snapshot, connector,
|
||||
**kwargs):
|
||||
"""Disallow connection from connector."""
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': snapshot.id, 'connector': connector})
|
||||
self.remove_export_snapshot(None, snapshot)
|
||||
LOG.info('Terminated iSCSI Connection snapshot(%s)', msgparm)
|
||||
|
||||
def fc_initialize_connection(self, volume, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
@ -1068,14 +1301,16 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
fc_ports = []
|
||||
for director, hostport in hostports.items():
|
||||
for port in hostport:
|
||||
if port['protocol'] == 'FC':
|
||||
if port['protocol'].lower() == 'fc':
|
||||
fc_ports.append(port)
|
||||
target_wwns, init_targ_map = (
|
||||
self._build_initiator_target_map(connector, fc_ports))
|
||||
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
if migstat is not None and 'target:' in migstat:
|
||||
if (hasattr(volume, 'migration_status') and
|
||||
volume.migration_status is not None and
|
||||
'target:' in volume.migration_status):
|
||||
LOG.debug('migration_status:%s', volume.migration_status)
|
||||
migstat = volume.migration_status
|
||||
index = migstat.find('target:')
|
||||
if index != -1:
|
||||
migstat = migstat[len('target:'):]
|
||||
@ -1094,7 +1329,11 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
{'ld': ldname, 'id': volume.id})
|
||||
LOG.error(msg)
|
||||
raise exception.NotFound(msg)
|
||||
ldn = lds[ldname]['ldn']
|
||||
lvname = ldname + '_l'
|
||||
if lvname in lds:
|
||||
ldn = lds[lvname]['ldn']
|
||||
else:
|
||||
ldn = lds[ldname]['ldn']
|
||||
|
||||
lun = None
|
||||
for ldset in six.itervalues(ldsets):
|
||||
@ -1116,6 +1355,20 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
'info': info})
|
||||
return info
|
||||
|
||||
def fc_initialize_connection_snapshot(self, snapshot, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': snapshot.id, 'connector': connector})
|
||||
|
||||
try:
|
||||
ret = self._fc_initialize_connection(snapshot, connector)
|
||||
LOG.info('Initialized FC Connection snapshot(%s)', msgparm)
|
||||
return ret
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Initialize FC Connection snapshot'
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
def fc_terminate_connection(self, volume, connector):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': volume.id, 'connector': connector})
|
||||
@ -1144,7 +1397,7 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
fc_ports = []
|
||||
for director, hostport in hostports.items():
|
||||
for port in hostport:
|
||||
if port['protocol'] == 'FC':
|
||||
if port['protocol'].lower() == 'fc':
|
||||
fc_ports.append(port)
|
||||
target_wwns, init_targ_map = (
|
||||
self._build_initiator_target_map(connector, fc_ports))
|
||||
@ -1160,6 +1413,20 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
|
||||
'info': info})
|
||||
return info
|
||||
|
||||
def fc_terminate_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||
msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
|
||||
% {'id': snapshot.id, 'connector': connector})
|
||||
try:
|
||||
ret = self._fc_terminate_connection(snapshot, connector)
|
||||
LOG.info('Terminated FC Connection snapshot(%s)', msgparm)
|
||||
self.remove_export_snapshot(None, snapshot)
|
||||
return ret
|
||||
except exception.CinderException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warning('Failed to Terminate FC Connection snapshot'
|
||||
'(%(msgparm)s) (%(exception)s)',
|
||||
{'msgparm': msgparm, 'exception': e})
|
||||
|
||||
def _build_initiator_target_map(self, connector, fc_ports):
|
||||
target_wwns = []
|
||||
for port in fc_ports:
|
||||
@ -1344,12 +1611,8 @@ class MStorageDSVDriver(MStorageDriver):
|
||||
raise exception.ParameterNotFound(param='backup_pools')
|
||||
|
||||
# get BV name.
|
||||
ldname = self.get_ldname(snapshot.volume_id,
|
||||
self._properties['ld_name_format'])
|
||||
if ldname not in lds:
|
||||
msg = _('Logical Disk `%s` has unbound already.') % ldname
|
||||
LOG.error(msg)
|
||||
raise exception.NotFound(msg)
|
||||
ldname = self._validate_ld_exist(
|
||||
lds, snapshot.volume_id, self._properties['ld_name_format'])
|
||||
|
||||
selected_pool = self._select_dsv_poolnumber(snapshot, pools, None)
|
||||
snapshotname = self._convert_id2snapname(snapshot)
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Enable backup snapshot optimal path by implementing attach
|
||||
and detach snapshot in the NEC driver.
|
Loading…
x
Reference in New Issue
Block a user