Drivers: remove unused code
Remove code from drivers that is unused. Change-Id: I53641e20da74fec15f3a3235dbb179d2a4a6c647
This commit is contained in:
@ -13666,9 +13666,7 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
||||
|
||||
self.driver.delete_group(self.ctxt, group, vols)
|
||||
|
||||
@mock.patch.object(storwize_svc_common.StorwizeHelpers,
|
||||
'switch_rccg')
|
||||
def test_storwize_failover_group_without_action(self, switchrccg):
|
||||
def test_storwize_failover_group_without_action(self):
|
||||
self.driver.configuration.set_override('replication_device',
|
||||
[self.rep_target])
|
||||
self.driver.do_setup(self.ctxt)
|
||||
@ -13684,14 +13682,12 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
||||
self.assertEqual(
|
||||
{'replication_status': fields.ReplicationStatus.FAILED_OVER},
|
||||
model_update)
|
||||
self.assertFalse(switchrccg.called)
|
||||
|
||||
self.sim._rcconsistgrp_list[rccg_name]['primary'] = 'master'
|
||||
model_update = self.driver._rep_grp_failback(self.ctxt, group)
|
||||
self.assertEqual(
|
||||
{'replication_status': fields.ReplicationStatus.ENABLED},
|
||||
model_update)
|
||||
self.assertFalse(switchrccg.called)
|
||||
|
||||
self.driver.delete_group(self.ctxt, group, [])
|
||||
|
||||
|
@ -3320,16 +3320,13 @@ class SolidFireVolumeTestCase(test.TestCase):
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_info')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_map_sf_volumes')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_failover_volume')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
|
||||
@mock.patch.object(solidfire.SolidFireDriver, '_get_remote_info_by_id')
|
||||
def test_failover(self, mock_get_remote_info_by_id,
|
||||
def test_failover(self,
|
||||
mock_get_create_account,
|
||||
mock_failover_volume,
|
||||
mock_map_sf_volumes,
|
||||
mock_get_cluster_info,
|
||||
mock_update_cluster_status,
|
||||
mock_set_cluster_pairs,
|
||||
mock_create_cluster_reference,
|
||||
|
@ -672,7 +672,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_rd_delete.assert_called_once()
|
||||
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore')
|
||||
@ -688,7 +687,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore,
|
||||
m_rsc_create,
|
||||
m_vol_extend,
|
||||
m_vol_dfn,
|
||||
m_sp_list):
|
||||
m_rsc_dfn_create.return_value = True
|
||||
m_api_reply.return_value = True
|
||||
@ -698,7 +696,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore.return_value = True
|
||||
m_rsc_create.return_value = True
|
||||
m_vol_extend.return_value = True
|
||||
m_vol_dfn.return_value = True
|
||||
m_sp_list.return_value = (
|
||||
self._fake_driver.fake_api_storage_pool_list())
|
||||
|
||||
@ -707,7 +704,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
CINDER_VOLUME, SNAPSHOT))
|
||||
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore')
|
||||
@ -723,7 +719,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore,
|
||||
m_rsc_create,
|
||||
m_vol_extend,
|
||||
m_vol_dfn,
|
||||
m_sp_list):
|
||||
m_rsc_dfn_create.return_value = True
|
||||
m_api_reply.return_value = True
|
||||
@ -733,7 +728,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore.return_value = False
|
||||
m_rsc_create.return_value = True
|
||||
m_vol_extend.return_value = True
|
||||
m_vol_dfn.return_value = True
|
||||
m_sp_list.return_value = (
|
||||
self._fake_driver.fake_api_storage_pool_list())
|
||||
|
||||
@ -744,7 +738,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver.delete_volume')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create')
|
||||
@mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore')
|
||||
@ -760,7 +753,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore,
|
||||
m_rsc_create,
|
||||
m_vol_extend,
|
||||
m_vol_dfn,
|
||||
m_sp_list,
|
||||
m_delete_volume):
|
||||
m_rsc_dfn_create.return_value = True
|
||||
@ -771,7 +763,6 @@ class LinstorBaseDriverTestCase(test.TestCase):
|
||||
m_snap_rsc_restore.return_value = True
|
||||
m_rsc_create.return_value = True
|
||||
m_vol_extend.return_value = True
|
||||
m_vol_dfn.return_value = True
|
||||
m_sp_list.return_value = (
|
||||
self._fake_driver.fake_api_storage_pool_list())
|
||||
m_delete_volume.return_value = True
|
||||
|
@ -33,7 +33,6 @@ from cinder.volume import driver
|
||||
from cinder.volume.drivers import qnap
|
||||
|
||||
FAKE_LUNNAA = {'LUNNAA': 'fakeLunNaa'}
|
||||
FAKE_SNAPSHOT = {'snapshot_id': 'fakeSnapshotId'}
|
||||
|
||||
FAKE_USER = 'admin'
|
||||
FAKE_PASSWORD = 'qnapadmin'
|
||||
@ -472,23 +471,6 @@ FAKE_RES_DETAIL_DATA_CREATE_TARGET_FAIL = """
|
||||
<result><![CDATA[-1]]></result>
|
||||
</QDocRoot>"""
|
||||
|
||||
FAKE_RES_DETAIL_DATA_GETHOSTIDLISTBYINITIQN = """
|
||||
<QDocRoot version="1.0">
|
||||
<authPassed><![CDATA[1]]></authPassed>
|
||||
<content>
|
||||
<host_list total="4">
|
||||
<host>
|
||||
<index><![CDATA[fakeIndex]]></index>
|
||||
<hostid><![CDATA[fakeHostId]]></hostid>
|
||||
<name><![CDATA[fakeHostName]]></name>
|
||||
<iqns>
|
||||
<iqn><![CDATA[fakeIqn]]></iqn>
|
||||
</iqns>
|
||||
</host>
|
||||
</host_list>
|
||||
</content>
|
||||
</QDocRoot>"""
|
||||
|
||||
FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING = """
|
||||
<QDocRoot version="1.0">
|
||||
<authPassed><![CDATA[1]]></authPassed>
|
||||
@ -737,20 +719,6 @@ class VolumeClass(object):
|
||||
self.display_name = value
|
||||
|
||||
|
||||
class HostClass(object):
|
||||
"""Host Class."""
|
||||
|
||||
def __init__(self, host):
|
||||
"""Init."""
|
||||
self.host = host
|
||||
|
||||
def __getitem__(self, arg):
|
||||
"""Getitem."""
|
||||
return {
|
||||
'host': 'fakeHost',
|
||||
}[arg]
|
||||
|
||||
|
||||
class FakeLoginResponse(object):
|
||||
"""Fake login response."""
|
||||
|
||||
@ -1004,21 +972,6 @@ class FakeCreateTargetFailResponse(object):
|
||||
return FAKE_RES_DETAIL_DATA_CREATE_TARGET_FAIL
|
||||
|
||||
|
||||
class FakeGetIscsiPortalInfoResponse(object):
|
||||
"""Fake get iscsi portal inforesponse."""
|
||||
|
||||
status_code = 'fackStatus'
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""Mock response.text."""
|
||||
return FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO
|
||||
|
||||
def __repr__(self):
|
||||
"""Repr."""
|
||||
return six.StringIO(FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO)
|
||||
|
||||
|
||||
class FakeCreateSnapshotResponse(object):
|
||||
"""Fake Create snapshot inforesponse."""
|
||||
|
||||
|
@ -237,11 +237,6 @@ class RBDISCSIDriver(rbd.RBDDriver):
|
||||
{'pool': self.configuration.rbd_pool,
|
||||
'volume_name': volume_name})
|
||||
|
||||
def get_existing_disks(self):
|
||||
"""Get the existing list of registered volumes on the gateway."""
|
||||
resp, disks = self.client.get_disks()
|
||||
return disks['disks']
|
||||
|
||||
@volume_utils.trace
|
||||
def create_disk(self, volume_name):
|
||||
"""Register the volume with the iscsi gateways.
|
||||
|
@ -108,10 +108,6 @@ class NimbleAPIException(exception.VolumeBackendAPIException):
|
||||
message = _("Unexpected response from Nimble API")
|
||||
|
||||
|
||||
class NimbleVolumeBusyException(exception.VolumeIsBusy):
|
||||
message = _("Nimble Cinder Driver: Volume Busy")
|
||||
|
||||
|
||||
class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
"""OpenStack driver to enable Nimble Controller.
|
||||
|
||||
@ -1269,9 +1265,6 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver):
|
||||
|
||||
return target_wwpns
|
||||
|
||||
def _convert_string_to_colon_separated_wwnn(self, wwnn):
|
||||
return ':'.join(a + b for a, b in zip(wwnn[::2], wwnn[1::2]))
|
||||
|
||||
|
||||
def _connection_checker(func):
|
||||
"""Decorator to re-establish and re-run the api if session has expired."""
|
||||
@ -1349,15 +1342,6 @@ class NimbleRestAPIExecutor(object):
|
||||
"Folder: %s") % folder_name)
|
||||
return r.json()['data'][0]['id']
|
||||
|
||||
def get_folder_info(self, folder_name):
|
||||
folder_id = self.get_folder_id(folder_name)
|
||||
api = "folders/" + six.text_type(folder_id)
|
||||
r = self.get(api)
|
||||
if not r.json()['data']:
|
||||
raise NimbleAPIException(_("Unable to retrieve Folder info for: "
|
||||
"%s") % folder_id)
|
||||
return r.json()['data']
|
||||
|
||||
def get_performance_policy_id(self, perf_policy_name):
|
||||
api = 'performance_policies/'
|
||||
filter = {'name': perf_policy_name}
|
||||
|
@ -2040,15 +2040,6 @@ class StorwizeHelpers(object):
|
||||
attrs = self.get_vdisk_attributes(vdisk_name)
|
||||
return attrs is not None
|
||||
|
||||
def find_vdisk_copy_id(self, vdisk, pool):
|
||||
resp = self.ssh.lsvdiskcopy(vdisk)
|
||||
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
|
||||
if mdisk_grp == pool:
|
||||
return copy_id
|
||||
msg = _('Failed to find a vdisk copy in the expected pool.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
|
||||
def get_vdisk_copy_attrs(self, vdisk, copy_id):
|
||||
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
|
||||
|
||||
@ -2698,9 +2689,6 @@ class StorwizeHelpers(object):
|
||||
def stop_rccg(self, rccg, access=False):
|
||||
self.ssh.stoprcconsistgrp(rccg, access)
|
||||
|
||||
def switch_rccg(self, rccg, aux=True):
|
||||
self.ssh.switchrcconsistgrp(rccg, aux)
|
||||
|
||||
def get_rccg_info(self, volume_name):
|
||||
vol_attrs = self.get_vdisk_attributes(volume_name)
|
||||
if not vol_attrs or not vol_attrs['RC_name']:
|
||||
@ -3006,9 +2994,6 @@ class StorwizeHelpers(object):
|
||||
def rename_vdisk(self, vdisk, new_name):
|
||||
self.ssh.chvdisk(vdisk, ['-name', new_name])
|
||||
|
||||
def change_vdisk_primary_copy(self, vdisk, copy_id):
|
||||
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
|
||||
|
||||
def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
|
||||
self.ssh.migratevdisk(vdisk, dest_pool, copy_id)
|
||||
|
||||
|
@ -1921,7 +1921,6 @@ class InfortrendCommon(object):
|
||||
'channel_id': channel_id, 'controller': slot_name}
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
return
|
||||
|
||||
def _get_wwpn_list(self):
|
||||
rc, wwn_list = self._execute('ShowWWN')
|
||||
|
@ -2372,15 +2372,6 @@ class InStorageAssistant(object):
|
||||
attrs = self.ssh.lsvdisk(vdisk)
|
||||
return attrs
|
||||
|
||||
def find_vdisk_copy_id(self, vdisk, pool):
|
||||
resp = self.ssh.lsvdiskcopy(vdisk)
|
||||
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
|
||||
if mdisk_grp == pool:
|
||||
return copy_id
|
||||
msg = _('Failed to find a vdisk copy in the expected pool.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
|
||||
def get_vdisk_copy_attrs(self, vdisk, copy_id):
|
||||
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
|
||||
|
||||
@ -2535,9 +2526,6 @@ class InStorageAssistant(object):
|
||||
def delete_lc_consistgrp(self, lc_consistgrp):
|
||||
self.ssh.rmlcconsistgrp(lc_consistgrp)
|
||||
|
||||
def stop_lc_consistgrp(self, lc_consistgrp):
|
||||
self.ssh.stoplcconsistgrp(lc_consistgrp)
|
||||
|
||||
def run_consistgrp_snapshots(self, lc_consistgrp, snapshots, state,
|
||||
config, timeout):
|
||||
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
|
||||
@ -3057,9 +3045,6 @@ class InStorageAssistant(object):
|
||||
def rename_vdisk(self, vdisk, new_name):
|
||||
self.ssh.chvdisk(vdisk, ['-name', new_name])
|
||||
|
||||
def change_vdisk_primary_copy(self, vdisk, copy_id):
|
||||
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
|
||||
|
||||
|
||||
class InStorageSSH(object):
|
||||
"""SSH interface to Inspur InStorage systems."""
|
||||
@ -3470,10 +3455,6 @@ class InStorageSSH(object):
|
||||
ssh_cmd = ['mcsop', 'startlcconsistgrp', lc_consist_group]
|
||||
self.run_ssh_assert_no_output(ssh_cmd)
|
||||
|
||||
def stoplcconsistgrp(self, lc_consist_group):
|
||||
ssh_cmd = ['mcsop', 'stoplcconsistgrp', lc_consist_group]
|
||||
self.run_ssh_assert_no_output(ssh_cmd)
|
||||
|
||||
def chlcmap(self, lc_map_id, copyrate='50', autodel='on'):
|
||||
ssh_cmd = ['mcsop', 'chlcmap', '-copyrate', copyrate,
|
||||
'-autodelete', autodel, lc_map_id]
|
||||
|
@ -287,19 +287,6 @@ class LinstorBaseDriver(driver.VolumeDriver):
|
||||
size=size)
|
||||
return vol_dfn_reply
|
||||
|
||||
def _api_volume_dfn_set_sp(self, rsc_target_name):
|
||||
with lin_drv(self.default_uri) as lin:
|
||||
if not lin.connected:
|
||||
lin.connect()
|
||||
|
||||
snap_reply = lin.volume_dfn_modify(
|
||||
rsc_name=rsc_target_name,
|
||||
volume_nr=0,
|
||||
set_properties={
|
||||
'StorPoolName': self.default_pool
|
||||
})
|
||||
return snap_reply
|
||||
|
||||
def _api_rsc_create(self, rsc_name, node_name, diskless=False):
|
||||
with lin_drv(self.default_uri) as lin:
|
||||
if not lin.connected:
|
||||
@ -456,7 +443,6 @@ class LinstorBaseDriver(driver.VolumeDriver):
|
||||
# Separate the diskless nodes
|
||||
sp_diskless_list = []
|
||||
sp_list = []
|
||||
node_count = 0
|
||||
|
||||
if sp_list_reply:
|
||||
for node in sp_list_reply:
|
||||
@ -491,7 +477,6 @@ class LinstorBaseDriver(driver.VolumeDriver):
|
||||
sp_diskless_list.append(sp_node)
|
||||
else:
|
||||
sp_list.append(sp_node)
|
||||
node_count += 1
|
||||
|
||||
# Add the diskless nodes to the end of the list
|
||||
if sp_diskless_list:
|
||||
|
@ -229,11 +229,6 @@ class Client(object):
|
||||
}
|
||||
return self.send_request(method='put', url='/lun', data=data)
|
||||
|
||||
def localclone_exists(self, lun):
|
||||
"""Whether localclone lun exists."""
|
||||
return self.send_request(method='get', url='/local_clone',
|
||||
data={'attr': 'existence', 'lun': lun})
|
||||
|
||||
def localclone_completed(self, lun):
|
||||
"""Whether localclone lun completed."""
|
||||
return self.send_request(method='get', url='/local_clone',
|
||||
@ -522,23 +517,6 @@ class Client(object):
|
||||
}
|
||||
return self.send_request(method='get', url='/copy_volume', data=data)
|
||||
|
||||
def copy_volume_from_volume(self, lun_name, src_lun_name):
|
||||
"""Copy volume from volume."""
|
||||
data = {
|
||||
'attr': 'from_volume',
|
||||
'lun_name': lun_name,
|
||||
'src_lun_name': src_lun_name
|
||||
}
|
||||
return self.send_request(method='post', url='/copy_volume', data=data)
|
||||
|
||||
def query_bcopy_task(self, task_id):
|
||||
"""Query bcopy task."""
|
||||
data = {
|
||||
'attr': 'bcopy_task',
|
||||
'task_id': task_id
|
||||
}
|
||||
return self.send_request(method='get', url='/copy_volume', data=data)
|
||||
|
||||
def get_it_unused_id_list(self, it_type, initr_wwn, tgt_port_name):
|
||||
data = {
|
||||
'attr': 'it_unused_id_list',
|
||||
|
@ -473,58 +473,6 @@ class MStorageISMCLI(object):
|
||||
unpairProc = unpairWait(volume_properties, self)
|
||||
unpairProc.run()
|
||||
|
||||
def check_ld_existed_rplstatus(self, lds, ldname, snapshot, flag):
|
||||
|
||||
if ldname not in lds:
|
||||
if flag == 'backup':
|
||||
LOG.debug('Volume Id not found. '
|
||||
'LD name = %(name)s volume_id = %(id)s.',
|
||||
{'name': ldname, 'id': snapshot.volume_id})
|
||||
raise exception.NotFound(_('Logical Disk does not exist.'))
|
||||
elif flag == 'restore':
|
||||
LOG.debug('Snapshot Id not found. '
|
||||
'LD name = %(name)s snapshot_id = %(id)s.',
|
||||
{'name': ldname, 'id': snapshot.id})
|
||||
raise exception.NotFound(_('Logical Disk does not exist.'))
|
||||
elif flag == 'delete':
|
||||
LOG.debug('LD `%(name)s` already unbound? '
|
||||
'snapshot_id = %(id)s.',
|
||||
{'name': ldname, 'id': snapshot.id})
|
||||
return None
|
||||
else:
|
||||
LOG.debug('check_ld_existed_rplstatus flag error flag = %s.',
|
||||
flag)
|
||||
raise exception.NotFound(_('Logical Disk does not exist.'))
|
||||
|
||||
ld = lds[ldname]
|
||||
|
||||
if ld['RPL Attribute'] == 'IV':
|
||||
pass
|
||||
elif ld['RPL Attribute'] == 'MV':
|
||||
query_status = self.query_MV_RV_status(ldname[3:], 'MV')
|
||||
LOG.debug('query_status : %s.', query_status)
|
||||
if(query_status == 'separated'):
|
||||
# unpair.
|
||||
rvname = self.query_MV_RV_name(ldname[3:], 'MV')
|
||||
self.unpair(ldname[3:], rvname, 'force')
|
||||
else:
|
||||
msg = _('Specified Logical Disk %s has been copied.') % ldname
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
elif ld['RPL Attribute'] == 'RV':
|
||||
query_status = self.query_MV_RV_status(ldname[3:], 'RV')
|
||||
if query_status == 'separated':
|
||||
# unpair.
|
||||
mvname = self.query_MV_RV_name(ldname[3:], 'RV')
|
||||
self.unpair(mvname, ldname[3:], 'force')
|
||||
else:
|
||||
msg = _('Specified Logical Disk %s has been copied.') % ldname
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
return ld
|
||||
|
||||
def get_pair_lds(self, ldname, lds):
|
||||
query_status = self.query_MV_RV_name(ldname[3:], 'MV')
|
||||
query_status = query_status.split('\n')
|
||||
|
@ -111,18 +111,6 @@ mstorage_opts = [
|
||||
FLAGS.register_opts(mstorage_opts, group=configuration.SHARED_CONF_GROUP)
|
||||
|
||||
|
||||
def convert_to_name(uuid):
|
||||
alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
num = int(uuid.replace(("-"), ""), 16)
|
||||
|
||||
convertname = ""
|
||||
while num != 0:
|
||||
convertname = alnum[num % len(alnum)] + convertname
|
||||
num = num - num % len(alnum)
|
||||
num = num // len(alnum)
|
||||
return convertname
|
||||
|
||||
|
||||
def convert_to_id(value62):
|
||||
alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
length = len(value62)
|
||||
|
@ -342,11 +342,6 @@ class NaElement(object):
|
||||
"""Add the attribute to the element."""
|
||||
self._element.set(name, value)
|
||||
|
||||
def add_attrs(self, **attrs):
|
||||
"""Add multiple attributes to the element."""
|
||||
for attr in attrs:
|
||||
self._element.set(attr, attrs.get(attr))
|
||||
|
||||
def add_child_elem(self, na_element):
|
||||
"""Add the child element to the element."""
|
||||
if isinstance(na_element, NaElement):
|
||||
|
@ -125,8 +125,3 @@ def parse_nms_url(url):
|
||||
def get_migrate_snapshot_name(volume):
|
||||
"""Return name for snapshot that will be used to migrate the volume."""
|
||||
return 'cinder-migrate-snapshot-%(id)s' % volume
|
||||
|
||||
|
||||
def ex2err(ex):
|
||||
"""Convert a Cinder Exception to a Nexenta Error."""
|
||||
return ex.msg
|
||||
|
@ -106,22 +106,6 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
|
||||
lstargetWwpns = []
|
||||
return lstargetWwpns
|
||||
|
||||
def _is_initiator_wwpn_active(self, targetWwpn, initiatorWwpn):
|
||||
fActive = False
|
||||
output = None
|
||||
try:
|
||||
retCode, output = self.dpl.get_sns_table(targetWwpn)
|
||||
if retCode == 0 and output:
|
||||
for fdwwpn, fcport in output.get('metadata',
|
||||
{}).get('sns_table',
|
||||
[]):
|
||||
if fdwwpn == initiatorWwpn:
|
||||
fActive = True
|
||||
break
|
||||
except Exception:
|
||||
LOG.error('Failed to get sns table')
|
||||
return fActive
|
||||
|
||||
def _convertHex2String(self, wwpns):
|
||||
szwwpns = ''
|
||||
if len(str(wwpns)) == 16:
|
||||
|
@ -612,14 +612,6 @@ class DPLVolume(object):
|
||||
[http_client.OK, http_client.ACCEPTED,
|
||||
http_client.CREATED])
|
||||
|
||||
def get_vg_list(self, vgtype=None):
|
||||
method = 'GET'
|
||||
if vgtype:
|
||||
url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype)
|
||||
else:
|
||||
url = '/%s/' % (DPL_OBJ_VOLUMEGROUP)
|
||||
return self._execute(method, url, None, [http_client.OK])
|
||||
|
||||
def get_vg(self, groupID):
|
||||
method = 'GET'
|
||||
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
|
||||
|
@ -2063,7 +2063,8 @@ class RemoteFSManageableVolumesMixin(object):
|
||||
# be troublesome for some distributed shares, which may have
|
||||
# hostnames resolving to multiple addresses.
|
||||
norm_share = os.path.normcase(os.path.normpath(mounted_share))
|
||||
head, match, share_rel_path = vol_remote_path.partition(norm_share)
|
||||
_head, match, share_rel_path = vol_remote_path.partition(
|
||||
norm_share)
|
||||
if not (match and share_rel_path.startswith(os.path.sep)):
|
||||
continue
|
||||
|
||||
@ -2195,7 +2196,7 @@ class RemoteFSManageableVolumesMixin(object):
|
||||
manageable_volumes = []
|
||||
mount_path = self._get_mount_point_for_share(share)
|
||||
|
||||
for dir_path, dir_names, file_names in os.walk(mount_path):
|
||||
for dir_path, _dir_names, file_names in os.walk(mount_path):
|
||||
for file_name in file_names:
|
||||
file_name = os.path.normcase(file_name)
|
||||
img_path = os.path.join(dir_path, file_name)
|
||||
|
@ -180,7 +180,3 @@ class SanDriver(driver.BaseVD):
|
||||
class SanISCSIDriver(SanDriver, driver.ISCSIDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SanISCSIDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _build_iscsi_target_name(self, volume):
|
||||
return "%s%s" % (self.configuration.target_prefix,
|
||||
volume['name'])
|
||||
|
@ -407,18 +407,6 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
msg = _('Attribute: %s not found.') % attr
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def _get_remote_info_by_id(self, backend_id):
|
||||
remote_info = None
|
||||
for rd in self.configuration.get('replication_device', []):
|
||||
if rd.get('backend_id', None) == backend_id:
|
||||
remote_endpoint = self._build_endpoint_info(**rd)
|
||||
remote_info = self._get_cluster_info(remote_endpoint)
|
||||
remote_info['endpoint'] = remote_endpoint
|
||||
if not remote_info['endpoint']['svip']:
|
||||
remote_info['endpoint']['svip'] = (
|
||||
remote_info['svip'] + ':3260')
|
||||
return remote_info
|
||||
|
||||
def _create_remote_pairing(self, remote_device):
|
||||
try:
|
||||
pairing_info = self._issue_api_request('StartClusterPairing',
|
||||
@ -438,16 +426,6 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
|
||||
return pair_id
|
||||
|
||||
def _get_cluster_info(self, remote_endpoint):
|
||||
try:
|
||||
return self._issue_api_request(
|
||||
'GetClusterInfo', {},
|
||||
endpoint=remote_endpoint)['result']['clusterInfo']
|
||||
except SolidFireAPIException:
|
||||
msg = _("Replication device is unreachable!")
|
||||
LOG.exception(msg)
|
||||
raise
|
||||
|
||||
def _check_replication_configs(self):
|
||||
repl_configs = self.configuration.replication_device
|
||||
if not repl_configs:
|
||||
@ -578,25 +556,6 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
|
||||
return cluster_ref
|
||||
|
||||
def _set_active_cluster(self, endpoint=None):
|
||||
if not endpoint:
|
||||
self.active_cluster['endpoint'] = self._build_endpoint_info()
|
||||
else:
|
||||
self.active_cluster['endpoint'] = endpoint
|
||||
|
||||
for k, v in self._issue_api_request(
|
||||
'GetClusterInfo',
|
||||
{})['result']['clusterInfo'].items():
|
||||
self.active_cluster[k] = v
|
||||
|
||||
# Add a couple extra things that are handy for us
|
||||
self.active_cluster['clusterAPIVersion'] = (
|
||||
self._issue_api_request('GetClusterVersionInfo',
|
||||
{})['result']['clusterAPIVersion'])
|
||||
if self.configuration.get('sf_svip', None):
|
||||
self.active_cluster['svip'] = (
|
||||
self.configuration.get('sf_svip'))
|
||||
|
||||
def _create_provider_id_string(self,
|
||||
resource_id,
|
||||
account_or_vol_id):
|
||||
@ -1261,17 +1220,6 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
|
||||
return vols
|
||||
|
||||
def _get_all_deleted_volumes(self, cinder_uuid=None):
|
||||
params = {}
|
||||
vols = self._issue_api_request('ListDeletedVolumes',
|
||||
params)['result']['volumes']
|
||||
if cinder_uuid:
|
||||
deleted_vols = ([v for v in vols if
|
||||
cinder_uuid in v['name']])
|
||||
else:
|
||||
deleted_vols = [v for v in vols]
|
||||
return deleted_vols
|
||||
|
||||
def _get_account_create_availability(self, accounts, endpoint=None):
|
||||
# we'll check both the primary and the secondary
|
||||
# if it exists and return whichever one has count
|
||||
|
@ -242,14 +242,6 @@ class STXClient(object):
|
||||
def logout(self):
|
||||
pass
|
||||
|
||||
def session_logout(self):
|
||||
url = self._base_url + '/exit'
|
||||
try:
|
||||
requests.get(url, verify=self.ssl_verify, timeout=30)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def is_titanium(self):
|
||||
"""True for older array firmware."""
|
||||
return self._fw_type == 'T'
|
||||
|
@ -69,14 +69,6 @@ class SmbfsException(exception.RemoteFSException):
|
||||
message = _("Unknown SMBFS exception.")
|
||||
|
||||
|
||||
class SmbfsNoSharesMounted(exception.RemoteFSNoSharesMounted):
|
||||
message = _("No mounted SMBFS shares found.")
|
||||
|
||||
|
||||
class SmbfsNoSuitableShareFound(exception.RemoteFSNoSuitableShareFound):
|
||||
message = _("There is no share which can host %(volume_size)sG.")
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
|
||||
remotefs_drv.RemoteFSPoolMixin,
|
||||
|
@ -406,18 +406,6 @@ class ZadaraVPSAConnection(object):
|
||||
data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol)
|
||||
return data['volume']
|
||||
|
||||
def _get_volume_cg_name(self, name):
|
||||
"""Return name of the consistency group for the volume.
|
||||
|
||||
cg-name is a volume uniqe identifier (legacy attribute)
|
||||
and not consistency group as it may imply.
|
||||
"""
|
||||
volume = self._get_vpsa_volume(name)
|
||||
if volume is not None:
|
||||
return volume['cg_name']
|
||||
|
||||
return None
|
||||
|
||||
def _get_all_vpsa_snapshots(self):
|
||||
"""Returns snapshots from all vpsa volumes"""
|
||||
data = self.send_cmd('list_snapshots')
|
||||
|
@ -33,10 +33,6 @@ class ZadaraVPSANoActiveController(exception.VolumeDriverException):
|
||||
message = _("Unable to find any active VPSA controller")
|
||||
|
||||
|
||||
class ZadaraVolumeNotFound(exception.VolumeDriverException):
|
||||
message = "%(reason)s"
|
||||
|
||||
|
||||
class ZadaraServerCreateFailure(exception.VolumeDriverException):
|
||||
message = _("Unable to create server object for initiator %(name)s")
|
||||
|
||||
|
Reference in New Issue
Block a user