HPE 3PAR: ISCSI/FC drivers – code refactoring

Issue:
=====
Redundant code was present for ISCSI and FC drivers with
the potential to maintenance issues.

Solution:
=======
Refactored code as below:
1.Made common as a base class for existing ISCSI and FC drivers.
2.Moved duplicate methods to the base class.
3.Incorporated template method pattern wherever applicable.

Change-Id: I5024642f8e3e6cd7d221dc3af367bba55fe56cf5
This commit is contained in:
kushal
2017-03-16 04:16:58 -07:00
committed by Kushal Wathore
parent 6d8b648399
commit 8588aa5cf6
5 changed files with 535 additions and 734 deletions

View File

@@ -27,6 +27,7 @@ from cinder import test
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.hpe \
import fake_hpe_3par_client as hpe3parclient
from cinder.volume.drivers.hpe import hpe_3par_base as hpedriverbase
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.hpe import hpe_3par_fc as hpefcdriver
from cinder.volume.drivers.hpe import hpe_3par_iscsi as hpedriver
@@ -93,7 +94,7 @@ class Comment(object):
return not self.__eq__(other)
class HPE3PARBaseDriver(object):
class HPE3PARBaseDriver(test.TestCase):
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6'
@@ -689,6 +690,91 @@ class HPE3PARBaseDriver(object):
self.driver.do_setup(None)
return _m_client
@mock.patch.object(volume_types, 'get_volume_type')
def migrate_volume_attached(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
volume = {'name': HPE3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HPE3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'available',
'host': HPE3PARBaseDriver.FAKE_HOST,
'source_volid': HPE3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
loc_info = 'HPE3PARDriver:1234567:CPG-FC1'
protocol = "FC"
if self.properties['driver_volume_type'] == "iscsi":
protocol = "iSCSI"
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
new_comment = Comment({
"qos": {},
"retype_test": "test comment",
})
expected = [
mock.call.modifyVolume(osv_matcher,
{'comment': new_comment,
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1,
'compression': False}),
mock.call.getTask(1),
mock.call.logout()
]
mock_client.assert_has_calls(expected)
self.assertIsNotNone(result)
self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}),
result)
class TestHPE3PARDriverBase(HPE3PARBaseDriver):
def setup_driver(self, config=None, mock_conf=None, wsapi_version=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpedriverbase.HPE3PARDriverBase)
if wsapi_version:
mock_client.getWsApiVersion.return_value = (
wsapi_version)
else:
mock_client.getWsApiVersion.return_value = (
self.wsapi_version_latest)
expected = [
mock.call.getCPG(HPE3PAR_CPG),
mock.call.getCPG(HPE3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
return mock_client
@mock.patch('hpe3parclient.version', "3.0.9")
def test_unsupported_client_version(self):
@@ -2862,64 +2948,6 @@ class HPE3PARBaseDriver(object):
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
volume = {'name': HPE3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HPE3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HPE3PARBaseDriver.FAKE_HOST,
'source_volid': HPE3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
loc_info = 'HPE3PARDriver:1234567:CPG-FC1'
protocol = "FC"
if self.properties['driver_volume_type'] == "iscsi":
protocol = "iSCSI"
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
new_comment = Comment({
"qos": {},
"retype_test": "test comment",
})
expected = [
mock.call.modifyVolume(osv_matcher,
{'comment': new_comment,
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1,
'compression': False}),
mock.call.getTask(1),
mock.call.logout()
]
mock_client.assert_has_calls(expected)
self.assertIsNotNone(result)
self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}),
result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
@@ -3438,48 +3466,6 @@ class HPE3PARBaseDriver(object):
expected +
self.standard_logout)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getHostVLUNs.return_value = [
{'active': False,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
expected = [
mock.call.queryHost(iqns=[self.connector['initiator']]),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_terminate_connection_from_primary_when_failed_over(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
@@ -5466,7 +5452,7 @@ class HPE3PARBaseDriver(object):
self.assertTrue(common._replication_enabled)
class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase):
class TestHPE3PARFCDriver(HPE3PARBaseDriver):
properties = {
'driver_volume_type': 'fibre_channel',
@@ -6668,8 +6654,11 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase):
self.assertEqual(self.FAKE_HOST, host['name'])
self.assertEqual(3, len(host['FCPaths']))
def test_migrate_volume_attached(self):
self.migrate_volume_attached()
class TestHPE3PARISCSIDriver(HPE3PARBaseDriver, test.TestCase):
class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d'
TARGET_LUN = 186
@@ -8547,6 +8536,51 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver, test.TestCase):
model_update = common._get_model_update('xxx@yyy#zzz', 'CPG')
self.assertEqual({'host': 'xxx@yyy#CPG'}, model_update)
def test_migrate_volume_attached(self):
self.migrate_volume_attached()
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getHostVLUNs.return_value = [
{'active': False,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
expected = [
mock.call.queryHost(iqns=[self.connector['initiator']]),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
VLUNS5_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'active': True},

View File

@@ -0,0 +1,383 @@
# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base class for HPE Storage Drivers.
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
sudo pip install --upgrade "hpe3parclient>=4.0"
"""
try:
from hpe3parclient import exceptions as hpeexceptions
except ImportError:
hpeexceptions = None
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class HPE3PARDriverBase(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
"""OpenStack base driver to enable 3PAR storage array.
Version history:
.. code-block:: none
1.0.0 - Initial base driver
1.0.1 - Adds consistency group capability in generic volume groups.
"""
VERSION = "1.0.1"
def __init__(self, *args, **kwargs):
super(HPE3PARDriverBase, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
self.protocol = None
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
pass
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password']
common.check_flags(self.configuration, required_flags)
@utils.trace
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = self.protocol
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@utils.trace
def create_group(self, context, group):
common = self._login()
try:
common.create_group(context, group)
finally:
self._logout(common)
@utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
LOG.debug("3PAR %(protocol)s driver cannot migrate in-use volume "
"to a host with storage_protocol=%(protocol)s",
{'protocol': self.protocol})
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates = common.failover_host(
context, volumes, secondary_id)
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, []
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
self._do_setup(common)
def _do_setup(self, common):
pass
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def initialize_connection(self, volume, connector):
pass

View File

@@ -265,11 +265,12 @@ class HPE3PARCommon(object):
3.0.38 - Fixed delete operation of replicated volume which is part
of QOS. bug #1717875
3.0.39 - Add support for revert to snapshot.
4.0.0 - Code refactor.
"""
VERSION = "3.0.39"
VERSION = "4.0.0"
stats = {}

View File

@@ -17,7 +17,7 @@
#
"""
Volume driver for HPE 3PAR Storage array.
This driver requires 3.1.3 firmware on the 3PAR array, using
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
@@ -41,9 +41,7 @@ from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@@ -53,10 +51,7 @@ EXISTENT_PATH = 73
@interface.volumedriver
class HPE3PARFCDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
"""OpenStack Fibre Channel driver to enable 3PAR storage array.
Version history:
@@ -111,134 +106,19 @@ class HPE3PARFCDriver(driver.ManageableVD,
3.0.10 - Added Entry point tracing
3.0.11 - Handle manage and unmanage hosts present. bug #1648067
3.0.12 - Adds consistency group capability in generic volume groups.
4.0.0 - Adds base class.
"""
VERSION = "3.0.12"
VERSION = "4.0.0"
# The name of the CI wiki page.
CI_WIKI_NAME = "HPE_Storage_CI"
def __init__(self, *args, **kwargs):
super(HPE3PARFCDriver, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
self.lookup_service = fczm_utils.create_lookup_service()
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
pass
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password',
'san_ip', 'san_login', 'san_password']
common.check_flags(self.configuration, required_flags)
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = 'FC'
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
self.protocol = 'FC'
@utils.trace
@fczm_utils.add_fc_zone
@@ -543,194 +423,3 @@ class HPE3PARFCDriver(driver.ManageableVD,
self._modify_3par_fibrechan_host(common, host['name'], new_wwns)
host = common._get_3par_host(host['name'])
return host
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
@utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@utils.trace
def create_group(self, context, group):
common = self._login()
try:
return common.create_group(context, group)
finally:
self._logout(common)
@utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != 'FC':
LOG.debug("3PAR FC driver cannot migrate in-use volume "
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates = common.failover_host(
context, volumes, secondary_id)
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, []
finally:
self._logout(common)

View File

@@ -15,7 +15,7 @@
#
"""
Volume driver for HPE 3PAR Storage array.
This driver requires 3.1.3 firmware on the 3PAR array, using
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
@@ -42,9 +42,7 @@ from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@@ -57,10 +55,7 @@ CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
@interface.volumedriver
class HPE3PARISCSIDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
"""OpenStack iSCSI driver to enable 3PAR storage array.
Version history:
@@ -124,79 +119,20 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
3.0.14 - Handle manage and unmanage hosts present. bug #1648067
3.0.15 - Adds consistency group capability in generic volume groups.
3.0.16 - Get host from os-brick connector. bug #1690244
4.0.0 - Adds base class.
"""
VERSION = "3.0.16"
VERSION = "4.0.0"
# The name of the CI wiki page.
CI_WIKI_NAME = "HPE_Storage_CI"
def __init__(self, *args, **kwargs):
super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
pass
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password']
common.check_flags(self.configuration, required_flags)
@utils.trace
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = 'iSCSI'
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
self.protocol = 'iSCSI'
def _do_setup(self, common):
self.iscsi_ips = {}
common.client_login()
try:
@@ -264,63 +200,6 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
raise exception.InvalidInput(reason=msg)
self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
@@ -751,9 +630,6 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
finally:
self._logout(common)
def remove_export(self, context, volume):
pass
def _get_least_used_nsp_for_host(self, common, hostname):
"""Get the least used NSP for the current host.
@@ -823,185 +699,3 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
current_least_used_nsp = nsp
current_smallest_count = count
return current_least_used_nsp
@utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@utils.trace
def create_group(self, context, group):
common = self._login()
try:
common.create_group(context, group)
finally:
self._logout(common)
@utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != 'iSCSI':
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates = common.failover_host(
context, volumes, secondary_id)
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, []
finally:
self._logout(common)