From 409e9796f0cdb5b380df8833e57dd15c2b65fd42 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 31 Aug 2018 11:27:44 -0500 Subject: [PATCH] Restructure Huawei driver As part of further refactoring work, this pulls out common driver code into its own file to better organize the code. Change-Id: Ib5b52b638e27743d86106cc9859f8adeab2aab08 Signed-off-by: Sean McGinnis --- cinder/opts.py | 6 +- .../drivers/huawei/test_huawei_drivers.py | 75 +- cinder/volume/drivers/huawei/common.py | 1939 +++++++++++++++++ cinder/volume/drivers/huawei/huawei_driver.py | 1925 +--------------- .../drivers/huawei-storage-driver.rst | 2 +- 5 files changed, 1986 insertions(+), 1961 deletions(-) create mode 100644 cinder/volume/drivers/huawei/common.py diff --git a/cinder/opts.py b/cinder/opts.py index 653fb40d6e6..cf25d618c84 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -101,8 +101,8 @@ from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ cinder_volume_drivers_hpe_hpelefthandiscsi -from cinder.volume.drivers.huawei import huawei_driver as \ - cinder_volume_drivers_huawei_huaweidriver +from cinder.volume.drivers.huawei import common as \ + cinder_volume_drivers_huawei_common from cinder.volume.drivers.ibm import flashsystem_common as \ cinder_volume_drivers_ibm_flashsystemcommon from cinder.volume.drivers.ibm import flashsystem_iscsi as \ @@ -291,7 +291,7 @@ def list_opts(): cinder_volume_drivers_fusionstorage_dsware.volume_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, - cinder_volume_drivers_huawei_huaweidriver.huawei_opts, + cinder_volume_drivers_huawei_common.huawei_opts, cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, cinder_volume_drivers_ibm_flashsystemiscsi. flashsystem_iscsi_opts, diff --git a/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py index 81e329e0c16..d5dd788cfc7 100644 --- a/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py +++ b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py @@ -34,6 +34,7 @@ from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils from cinder.volume import configuration as conf +from cinder.volume.drivers.huawei import common from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf @@ -2617,7 +2618,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.support_func = pool_data self.mock_object(replication.ReplicaCommonDriver, 'split') self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.mock_object(rest_client.RestClient, @@ -2779,7 +2780,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') self.mock_object(rest_client.RestClient, 'rename_lun') - self.mock_object(huawei_driver.HuaweiBaseDriver, + self.mock_object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={ 'PARENTNAME': 'OpenStack_Pool', @@ -2789,7 +2790,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): 'WWN': '6643e8c1004c5f6723e9f454003'}) self.mock_object(volume_types, 'get_volume_type', return_value={'extra_specs': test_new_type}) - self.mock_object(huawei_driver.HuaweiBaseDriver, + self.mock_object(common.HuaweiBaseDriver, '_check_needed_changes', return_value={}) external_ref = {'source-name': 'test1', @@ -2807,7 +2808,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_create_volume_from_snapsuccess(self): self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.mock_object(replication.ReplicaCommonDriver, 'sync') @@ -3177,7 +3178,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): json.loads(lun_info['provider_location'])) @ddt.data('front-end', 'back-end') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', @@ -3186,7 +3187,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_type', return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) def test_create_smartqos_success(self, mock_consumer, @@ -3215,7 +3216,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_create_smartqos_failed(self, qos_specs_value, pool_data): self.driver.support_func = pool_data self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', @@ -3225,7 +3226,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_type', return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) self.mock_object(qos_specs, 'get_qos_specs', return_value=qos_specs_value) @@ -3236,7 +3237,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_create_smartqos_without_huawei_type(self, pool_data): self.driver.support_func = pool_data self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', @@ -3246,7 +3247,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_type', return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) self.mock_object(qos_specs, 'get_qos_specs', return_value={'specs': {'fake_qos_type': '100', @@ -3365,7 +3366,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.delete_volume(self.volume) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', @@ -3413,14 +3414,14 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_create_smartCache_failed(self, opts, pool_data): self.driver.support_func = pool_data self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_params', return_value=opts) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', @@ -3438,7 +3439,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', @@ -3506,7 +3507,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): (qos_id, lun_list) = self.driver.client.find_available_qos(qos) self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list)) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @@ -3531,7 +3532,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @@ -3692,7 +3693,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_manage_existing_pool_mismatch(self, mock_get_by_name, mock_get_info): # LUN does not belong to the specified pool. - with mock.patch.object(huawei_driver.HuaweiBaseDriver, + with mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'StoragePool'}): external_ref = {'source-name': 'LUN1'} @@ -3714,7 +3715,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): # Status is not normal. ret = {'PARENTNAME': "OpenStack_Pool", 'HEALTHSTATUS': '2'} - with mock.patch.object(huawei_driver.HuaweiBaseDriver, + with mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value=ret): external_ref = {'source-name': 'LUN1'} @@ -3849,7 +3850,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): [{'source-id': 'ID1'}, FAKE_POOLS_UNSUPPORT_REPORT], [{'source-id': 'ID1'}, FAKE_POOLS_SUPPORT_REPORT]) @mock.patch.object(rest_client.RestClient, 'rename_lun') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, + @mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'OpenStack_Pool', 'SNAPSHOTIDS': [], @@ -3877,7 +3878,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.unmanage(self.volume) def test_manage_existing_snapshot_abnormal(self): - with mock.patch.object(huawei_driver.HuaweiBaseDriver, + with mock.patch.object(common.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'HEALTHSTATUS': '2', 'PARENTID': '11'}): @@ -3908,7 +3909,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): ex.msg)) @mock.patch.object(rest_client.RestClient, 'rename_snapshot') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, + @mock.patch.object(common.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', @@ -3978,7 +3979,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): def test_create_replication_success(self, mock_type): self.mock_object(replication.ReplicaCommonDriver, 'sync') self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': mock_type}) @@ -4081,7 +4082,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): mock_value, pool_data): self.driver.support_func = pool_data self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.mock_object(replication.ReplicaPairManager, '_delete_pair') @@ -4095,7 +4096,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.driver.support_func = pool_data self.mock_object(replication.ReplicaCommonDriver, 'split') self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.driver.delete_volume(self.replica_volume) @@ -4254,7 +4255,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): old_replica_client = driver.replica_client old_replica = driver.replica self.mock_object(replication.ReplicaCommonDriver, 'failover') - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'replication_enabled': 'true'}) secondary_id, volumes_update, __ = driver.failover_host( None, [self.replica_volume], REPLICA_BACKEND_ID, []) @@ -4291,7 +4292,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'replication_enabled': 'true'}) secondary_id, volumes_update, __ = driver.failover_host( None, [volume], REPLICA_BACKEND_ID, []) @@ -4310,7 +4311,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): self.mock_object(replication.ReplicaCommonDriver, 'enable') self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready') self.mock_object(replication.ReplicaCommonDriver, 'failover') - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'replication_enabled': 'true'}) volume = self.replica_volume @@ -4348,7 +4349,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase): @ddt.data({}, {'pair_id': TEST_PAIR_ID}) def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data): - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + self.mock_object(common.HuaweiBaseDriver, '_get_volume_params', return_value={'replication_enabled': 'true'}) volume = self.replica_volume @@ -4982,7 +4983,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client, 'RestClient') @mock.patch.object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) def test_retype_replication_volume_success(self, mock_get_type, @@ -5033,7 +5034,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): self.mock_object(mock_module, mock_func, side_effect=side_effect) self.mock_object(rest_client.RestClient, 'add_lun_to_partition') self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) retype = self.driver.retype(None, self.volume, @@ -5212,7 +5213,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): None) self.assertEqual(expected_pool_capacity, pool_capacity) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @@ -5245,7 +5246,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', + @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @@ -5323,7 +5324,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): def test_create_snapshot_fail_hypermetro(self): self.mock_object( - huawei_driver.HuaweiBaseDriver, + common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': replica_hypermetro_specs}) self.assertRaises(exception.VolumeBackendAPIException, @@ -5383,7 +5384,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): @ddt.data([{"hypermetro": "true"}], []) def test_create_group_success(self, cg_type): - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_group_type', + self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=cg_type) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @@ -5399,7 +5400,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): ) @ddt.unpack def test_create_group_from_src(self, snapshots, source_vols, tmp_snap): - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_group_type', + self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=[]) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @@ -5431,7 +5432,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): def test_delete_group_success(self, cg_type): test_volumes = [self.volume] ctxt = context.get_admin_context() - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_group_type', + self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=cg_type) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @@ -5439,7 +5440,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase): ctxt, self.group, test_volumes) self.assertEqual(fields.GroupStatus.DELETED, model['status']) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_group_type', + @mock.patch.object(common.HuaweiBaseDriver, '_get_group_type', return_value=[{"hypermetro": "true"}]) @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', return_value={'hypermetro_id': '3400a30d844d0007', diff --git a/cinder/volume/drivers/huawei/common.py b/cinder/volume/drivers/huawei/common.py new file mode 100644 index 00000000000..df863f35562 --- /dev/null +++ b/cinder/volume/drivers/huawei/common.py @@ -0,0 +1,1939 @@ +# Copyright (c) 2018 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import math +import re +import six +import uuid + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units + +from cinder import context +from cinder import exception +from cinder.i18n import _ +from cinder import objects +from cinder.objects import fields +from cinder.volume import configuration +from cinder.volume import driver +from cinder.volume.drivers.huawei import constants +from cinder.volume.drivers.huawei import huawei_conf +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import hypermetro +from cinder.volume.drivers.huawei import replication +from cinder.volume.drivers.huawei import rest_client +from cinder.volume.drivers.huawei import smartx +from cinder.volume import utils as volume_utils +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +huawei_opts = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='The configuration file for the Cinder Huawei driver.'), + cfg.StrOpt('hypermetro_devices', + default=None, + help='The remote device hypermetro will use.'), + cfg.StrOpt('metro_san_user', + default=None, + help='The remote metro device san user.'), + cfg.StrOpt('metro_san_password', + default=None, + secret=True, + help='The remote metro device san password.'), + cfg.StrOpt('metro_domain_name', + default=None, + help='The remote metro device domain name.'), + cfg.StrOpt('metro_san_address', + default=None, + help='The remote metro device request url.'), + cfg.StrOpt('metro_storage_pools', + default=None, + help='The remote metro device pool names.'), +] + +CONF = cfg.CONF +CONF.register_opts(huawei_opts, group=configuration.SHARED_CONF_GROUP) + +snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') +Snapshot = collections.namedtuple('Snapshot', snap_attrs) +vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') +Volume = collections.namedtuple('Volume', vol_attrs) + + +class HuaweiBaseDriver(driver.VolumeDriver): + + # ThirdPartySytems wiki page + CI_WIKI_NAME = "Huawei_volume_CI" + + def __init__(self, *args, **kwargs): + super(HuaweiBaseDriver, self).__init__(*args, **kwargs) + + if not self.configuration: + msg = _('Configuration is not found.') + raise exception.InvalidInput(reason=msg) + + self.active_backend_id = kwargs.get('active_backend_id') + + self.configuration.append_config_values(huawei_opts) + self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) + self.support_func = None + self.metro_flag = False + self.replica = None + + def check_func_support(self, obj_name): + try: + self.client._get_object_count(obj_name) + return True + except Exception: + return False + + def get_local_and_remote_dev_conf(self): + self.loc_dev_conf = self.huawei_conf.get_local_device() + + # Now just support one replication device. + replica_devs = self.huawei_conf.get_replication_devices() + self.replica_dev_conf = replica_devs[0] if replica_devs else {} + + def get_local_and_remote_client_conf(self): + if self.active_backend_id: + return self.replica_dev_conf, self.loc_dev_conf + else: + return self.loc_dev_conf, self.replica_dev_conf + + def do_setup(self, context): + """Instantiate common class and login storage system.""" + # Set huawei private configuration into Configuration object. + self.huawei_conf.update_config_value() + + self.get_local_and_remote_dev_conf() + client_conf, replica_client_conf = ( + self.get_local_and_remote_client_conf()) + + # init local client + if not client_conf: + msg = _('Get active client failed.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + self.client = rest_client.RestClient(self.configuration, + **client_conf) + self.client.login() + + # init remote client + metro_san_address = self.configuration.safe_get("metro_san_address") + metro_san_user = self.configuration.safe_get("metro_san_user") + metro_san_password = self.configuration.safe_get("metro_san_password") + if metro_san_address and metro_san_user and metro_san_password: + metro_san_address = metro_san_address.split(";") + self.rmt_client = rest_client.RestClient(self.configuration, + metro_san_address, + metro_san_user, + metro_san_password) + + self.rmt_client.login() + self.metro_flag = True + else: + self.metro_flag = False + LOG.warning("Remote device not configured in cinder.conf") + # init replication manager + if replica_client_conf: + self.replica_client = rest_client.RestClient(self.configuration, + **replica_client_conf) + self.replica_client.try_login() + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + + def check_for_setup_error(self): + pass + + def _get_volume_stats(self, refresh=False): + """Get volume status and reload huawei config file.""" + self.huawei_conf.update_config_value() + stats = self.client.update_volume_stats() + stats = self.update_support_capability(stats) + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or self.__class__.__name__ + stats['vendor_name'] = 'Huawei' + + if self.replica: + stats = self.replica.update_replica_capability(stats) + targets = [self.replica_dev_conf['backend_id']] + stats['replication_targets'] = targets + stats['replication_enabled'] = True + + return stats + + def update_support_capability(self, stats): + for pool in stats['pools']: + pool['smartpartition'] = ( + self.check_func_support("SMARTCACHEPARTITION")) + pool['smartcache'] = self.check_func_support("smartcachepool") + pool['QoS_support'] = self.check_func_support("ioclass") + pool['splitmirror'] = self.check_func_support("splitmirror") + pool['luncopy'] = self.check_func_support("luncopy") + pool['thick_provisioning_support'] = True + pool['thin_provisioning_support'] = True + pool['smarttier'] = True + pool['consistencygroup_support'] = True + pool['consistent_group_snapshot_enabled'] = True + + if self.configuration.san_product == "Dorado": + pool['smarttier'] = False + pool['thick_provisioning_support'] = False + + if self.metro_flag: + pool['hypermetro'] = self.check_func_support("HyperMetroPair") + + # assign the support function to global parameter. + self.support_func = pool + + return stats + + def _get_volume_type(self, volume): + volume_type = None + type_id = volume.volume_type_id + if type_id: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + + return volume_type + + def _get_volume_params(self, volume_type): + """Return the parameters for creating the volume.""" + specs = {} + if volume_type: + specs = dict(volume_type).get('extra_specs') + + opts = self._get_volume_params_from_specs(specs) + return opts + + def _get_volume_params_from_specs(self, specs): + """Return the volume parameters from extra specs.""" + opts_capability = { + 'smarttier': False, + 'smartcache': False, + 'smartpartition': False, + 'thin_provisioning_support': False, + 'thick_provisioning_support': False, + 'hypermetro': False, + 'replication_enabled': False, + 'replication_type': 'async', + } + + opts_value = { + 'policy': None, + 'partitionname': None, + 'cachename': None, + } + + opts_associate = { + 'smarttier': 'policy', + 'smartcache': 'cachename', + 'smartpartition': 'partitionname', + } + + opts = self._get_opts_from_specs(opts_capability, + opts_value, + opts_associate, + specs) + opts = smartx.SmartX().get_smartx_specs_opts(opts) + opts = replication.get_replication_opts(opts) + LOG.debug('volume opts %(opts)s.', {'opts': opts}) + return opts + + def _get_opts_from_specs(self, opts_capability, opts_value, + opts_associate, specs): + """Get the well defined extra specs.""" + opts = {} + opts.update(opts_capability) + opts.update(opts_value) + + for key, value in specs.items(): + # Get the scope, if it is using scope format. + scope = None + key_split = key.split(':') + if len(key_split) > 2 and key_split[0] != "capabilities": + continue + + if len(key_split) == 1: + key = key_split[0].lower() + else: + scope = key_split[0].lower() + key = key_split[1].lower() + + if ((not scope or scope == 'capabilities') + and key in opts_capability): + words = value.split() + if words and len(words) == 2 and words[0] in ('', ''): + opts[key] = words[1].lower() + elif key == 'replication_type': + LOG.error("Extra specs must be specified as " + "replication_type=' sync' or " + "' async'.") + else: + LOG.error("Extra specs must be specified as " + "capabilities:%s=' True'.", key) + + if ((scope in opts_capability) + and (key in opts_value) + and (scope in opts_associate) + and (opts_associate[scope] == key)): + opts[key] = value + + return opts + + def _get_lun_params(self, volume, opts): + pool_name = volume_utils.extract_host(volume.host, level='pool') + params = { + 'TYPE': '11', + 'NAME': huawei_utils.encode_name(volume.id), + 'PARENTTYPE': '216', + 'PARENTID': self.client.get_pool_id(pool_name), + 'DESCRIPTION': volume.name, + 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), + 'CAPACITY': huawei_utils.get_volume_size(volume), + 'WRITEPOLICY': self.configuration.lun_write_type, + 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, + 'PREFETCHVALUE': self.configuration.lun_prefetch_value, + 'DATATRANSFERPOLICY': + opts.get('policy', self.configuration.lun_policy), + 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, + 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } + + LOG.info('volume: %(volume)s, lun params: %(params)s.', + {'volume': volume.id, 'params': params}) + return params + + def _create_volume(self, lun_params): + # Create LUN on the array. + lun_info = self.client.create_lun(lun_params) + metadata = {'huawei_lun_id': lun_info['ID'], + 'huawei_lun_wwn': lun_info['WWN']} + model_update = {'metadata': metadata} + + return lun_info, model_update + + def _create_base_type_volume(self, opts, volume, volume_type): + """Create volume and add some base type. + + Base type is the service type which doesn't conflict with the other. + """ + lun_params = self._get_lun_params(volume, opts) + lun_info, model_update = self._create_volume(lun_params) + lun_id = lun_info['ID'] + + try: + qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) + if qos: + if not self.support_func.get('QoS_support'): + msg = (_("Can't support qos on the array")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + smart_qos = smartx.SmartQos(self.client) + smart_qos.add(qos, lun_id) + + smartpartition = smartx.SmartPartition(self.client) + smartpartition.add(opts, lun_id) + + smartcache = smartx.SmartCache(self.client) + smartcache.add(opts, lun_id) + except Exception as err: + self._delete_lun_with_check(lun_id) + msg = _('Create volume error. Because %s.') % six.text_type(err) + raise exception.VolumeBackendAPIException(data=msg) + + return lun_params, lun_info, model_update + + def _add_extend_type_to_volume(self, opts, lun_params, lun_info, + model_update): + """Add the extend type. + + Extend type is the service type which may conflict with the other. + So add it after those services. + """ + lun_id = lun_info['ID'] + if opts.get('hypermetro') == 'true': + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + try: + metro_info = metro.create_hypermetro(lun_id, lun_params) + model_update['metadata'].update(metro_info) + except exception.VolumeBackendAPIException as err: + LOG.error('Create hypermetro error: %s.', err) + self._delete_lun_with_check(lun_id) + raise + + if opts.get('replication_enabled') == 'true': + replica_model = opts.get('replication_type') + try: + replica_info = self.replica.create_replica(lun_info, + replica_model) + model_update.update(replica_info) + except Exception as err: + LOG.exception('Create replication volume error.') + self._delete_lun_with_check(lun_id) + raise + + return model_update + + def create_volume(self, volume): + """Create a volume.""" + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if (opts.get('hypermetro') == 'true' + and opts.get('replication_enabled') == 'true'): + err_msg = _("Hypermetro and Replication can not be " + "used in the same volume_type.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + lun_params, lun_info, model_update = ( + self._create_base_type_volume(opts, volume, volume_type)) + + model_update = self._add_extend_type_to_volume(opts, lun_params, + lun_info, model_update) + + model_update['provider_location'] = huawei_utils.to_string( + **model_update.pop('metadata')) + + return model_update + + def _delete_volume(self, volume): + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + if not lun_id: + return + + lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) + if lun_group_ids and len(lun_group_ids) == 1: + self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) + + self.client.delete_lun(lun_id) + + def delete_volume(self, volume): + """Delete a volume. + + Three steps: + Firstly, remove associate from lungroup. + Secondly, remove associate from QoS policy. + Thirdly, remove the lun. + """ + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_WARN) + if not lun_id: + return + + if self.support_func.get('QoS_support'): + qos_id = self.client.get_qosid_by_lunid(lun_id) + if qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(qos_id, lun_id) + + metadata = huawei_utils.get_lun_metadata(volume) + if metadata.get('hypermetro_id'): + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + try: + metro.delete_hypermetro(volume) + except exception.VolumeBackendAPIException as err: + LOG.error('Delete hypermetro error: %s.', err) + # We have checked the LUN WWN above, + # no need to check again here. + self._delete_volume(volume) + raise + + # Delete a replication volume + replica_data = volume.replication_driver_data + if replica_data: + try: + self.replica.delete_replica(volume) + except exception.VolumeBackendAPIException as err: + with excutils.save_and_reraise_exception(): + LOG.exception("Delete replication error.") + self._delete_volume(volume) + + self._delete_volume(volume) + + def _delete_lun_with_check(self, lun_id, lun_wwn=None): + if not lun_id: + return + + if self.client.check_lun_exist(lun_id, lun_wwn): + if self.support_func.get('QoS_support'): + qos_id = self.client.get_qosid_by_lunid(lun_id) + if qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(qos_id, lun_id) + + self.client.delete_lun(lun_id) + + def _is_lun_migration_complete(self, src_id, dst_id): + result = self.client.get_lun_migration_task() + found_migration_task = False + if 'data' not in result: + return False + + for item in result['data']: + if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): + found_migration_task = True + if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: + return True + if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: + msg = _("Lun migration error.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not found_migration_task: + err_msg = _("Cannot find migration task.") + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return False + + def _is_lun_migration_exist(self, src_id, dst_id): + try: + result = self.client.get_lun_migration_task() + except Exception: + LOG.error("Get LUN migration error.") + return False + + if 'data' in result: + for item in result['data']: + if (src_id == item['PARENTID'] + and dst_id == item['TARGETLUNID']): + return True + return False + + def _migrate_lun(self, src_id, dst_id): + try: + self.client.create_lun_migration(src_id, dst_id) + + def _is_lun_migration_complete(): + return self._is_lun_migration_complete(src_id, dst_id) + + wait_interval = constants.MIGRATION_WAIT_INTERVAL + huawei_utils.wait_for_condition(_is_lun_migration_complete, + wait_interval, + self.configuration.lun_timeout) + # Clean up if migration failed. + except Exception as ex: + raise exception.VolumeBackendAPIException(data=ex) + finally: + if self._is_lun_migration_exist(src_id, dst_id): + self.client.delete_lun_migration(src_id, dst_id) + self._delete_lun_with_check(dst_id) + + LOG.debug("Migrate lun %s successfully.", src_id) + return True + + def _wait_volume_ready(self, lun_id): + wait_interval = self.configuration.lun_ready_wait_interval + + def _volume_ready(): + result = self.client.get_lun_info(lun_id) + if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH + and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): + return True + return False + + huawei_utils.wait_for_condition(_volume_ready, + wait_interval, + wait_interval * 10) + + def _get_original_status(self, volume): + return 'in-use' if volume.volume_attachment else 'available' + + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status=None): + orig_lun_name = huawei_utils.encode_name(volume.id) + new_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( + self.client, new_volume) + new_metadata = huawei_utils.get_lun_metadata(new_volume) + model_update = { + 'provider_location': huawei_utils.to_string(**new_metadata), + } + + try: + self.client.rename_lun(new_lun_id, orig_lun_name) + except exception.VolumeBackendAPIException: + LOG.error('Unable to rename lun %s on array.', new_lun_id) + model_update['_name_id'] = new_volume.name_id + else: + LOG.debug("Renamed lun %(id)s to %(name)s successfully.", + {'id': new_lun_id, + 'name': orig_lun_name}) + model_update['_name_id'] = None + + return model_update + + def migrate_volume(self, ctxt, volume, host, new_type=None): + """Migrate a volume within the same array.""" + self._check_volume_exist_on_array(volume, + constants.VOLUME_NOT_EXISTS_RAISE) + + # NOTE(jlc): Replication volume can't migrate. But retype + # can remove replication relationship first then do migrate. + # So don't add this judgement into _check_migration_valid(). + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if opts.get('replication_enabled') == 'true': + return (False, None) + + return self._migrate_volume(volume, host, new_type) + + def _check_migration_valid(self, host, volume): + if 'pool_name' not in host['capabilities']: + return False + + target_device = host['capabilities']['location_info'] + + # Source and destination should be on same array. + if target_device != self.client.device_id: + return False + + # Same protocol should be used if volume is in-use. + protocol = self.configuration.san_protocol + if (host['capabilities']['storage_protocol'] != protocol + and self._get_original_status(volume) == 'in-use'): + return False + + pool_name = host['capabilities']['pool_name'] + if len(pool_name) == 0: + return False + + return True + + def _migrate_volume(self, volume, host, new_type=None): + if not self._check_migration_valid(host, volume): + return (False, None) + + type_id = volume.volume_type_id + + volume_type = None + if type_id: + volume_type = volume_types.get_volume_type(None, type_id) + + pool_name = host['capabilities']['pool_name'] + pools = self.client.get_all_pools() + pool_info = self.client.get_pool_info(pool_name, pools) + dst_volume_name = six.text_type(uuid.uuid4()) + + src_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + opts = None + qos = None + if new_type: + # If new type exists, use new type. + new_specs = new_type['extra_specs'] + opts = self._get_volume_params_from_specs(new_specs) + if 'LUNType' not in opts: + opts['LUNType'] = self.configuration.lun_type + + qos = smartx.SmartQos.get_qos_by_volume_type(new_type) + elif volume_type: + qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) + + if not opts: + opts = self._get_volume_params(volume_type) + + lun_info = self.client.get_lun_info(src_id) + + if opts['policy']: + policy = opts['policy'] + else: + policy = lun_info.get('DATATRANSFERPOLICY', + self.configuration.lun_policy) + + lun_params = { + 'NAME': huawei_utils.encode_name(dst_volume_name), + 'PARENTID': pool_info['ID'], + 'DESCRIPTION': lun_info['DESCRIPTION'], + 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), + 'CAPACITY': lun_info['CAPACITY'], + 'WRITEPOLICY': lun_info['WRITEPOLICY'], + 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], + 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], + 'DATATRANSFERPOLICY': policy, + 'READCACHEPOLICY': lun_info.get( + 'READCACHEPOLICY', + self.configuration.lun_read_cache_policy), + 'WRITECACHEPOLICY': lun_info.get( + 'WRITECACHEPOLICY', + self.configuration.lun_write_cache_policy), + 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } + + for item in lun_params: + if lun_params.get(item) == '--': + del lun_params[item] + + lun_info = self.client.create_lun(lun_params) + lun_id = lun_info['ID'] + + if qos: + LOG.info('QoS: %s.', qos) + SmartQos = smartx.SmartQos(self.client) + SmartQos.add(qos, lun_id) + if opts: + smartpartition = smartx.SmartPartition(self.client) + smartpartition.add(opts, lun_id) + smartcache = smartx.SmartCache(self.client) + smartcache.add(opts, lun_id) + + dst_id = lun_info['ID'] + self._wait_volume_ready(dst_id) + moved = self._migrate_lun(src_id, dst_id) + + return moved, {} + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + We use LUNcopy to copy a new volume from snapshot. + The time needed increases as volume size does. + """ + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if (opts.get('hypermetro') == 'true' + and opts.get('replication_enabled') == 'true'): + msg = _("Hypermetro and Replication can not be " + "used in the same volume_type.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot) + if snapshot_id is None: + msg = _('create_volume_from_snapshot: Snapshot %(name)s ' + 'does not exist.') % {'name': snapshot.id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_params, lun_info, model_update = ( + self._create_base_type_volume(opts, volume, volume_type)) + + tgt_lun_id = lun_info['ID'] + luncopy_name = huawei_utils.encode_name(volume.id) + LOG.info( + 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' + 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.', + {'src_lun_id': snapshot_id, + 'tgt_lun_id': tgt_lun_id, + 'copy_name': luncopy_name}) + + wait_interval = self.configuration.lun_ready_wait_interval + + def _volume_ready(): + result = self.client.get_lun_info(tgt_lun_id) + + if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH + and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): + return True + return False + + huawei_utils.wait_for_condition(_volume_ready, + wait_interval, + wait_interval * 10) + + self._copy_volume(volume, luncopy_name, + snapshot_id, tgt_lun_id) + + # NOTE(jlc): Actually, we just only support replication here right + # now, not hypermetro. + model_update = self._add_extend_type_to_volume(opts, lun_params, + lun_info, model_update) + model_update['provider_location'] = huawei_utils.to_string( + **model_update.pop('metadata')) + + return model_update + + def create_cloned_volume(self, volume, src_vref): + """Clone a new volume from an existing volume.""" + self._check_volume_exist_on_array(src_vref, + constants.VOLUME_NOT_EXISTS_RAISE) + + # Form the snapshot structure. + snapshot = Snapshot(id=uuid.uuid4().__str__(), + volume_id=src_vref.id, + volume=src_vref, + provider_location=None) + + # Create snapshot. + self.create_snapshot(snapshot) + + try: + # Create volume from snapshot. + model_update = self.create_volume_from_snapshot(volume, snapshot) + finally: + try: + # Delete snapshot. + self.delete_snapshot(snapshot) + except exception.VolumeBackendAPIException: + LOG.warning( + 'Failure deleting the snapshot %(snapshot_id)s ' + 'of volume %(volume_id)s.', + {'snapshot_id': snapshot.id, + 'volume_id': src_vref.id},) + + return model_update + + def _check_volume_exist_on_array(self, volume, action): + """Check whether the volume exists on the array. + + If the volume exists on the array, return the LUN ID. + If not exists, raise or log warning. + """ + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + if not lun_id: + msg = _("Volume %s does not exist on the array.") % volume.id + if action == constants.VOLUME_NOT_EXISTS_WARN: + LOG.warning(msg) + if action == constants.VOLUME_NOT_EXISTS_RAISE: + raise exception.VolumeBackendAPIException(data=msg) + return + + if not lun_wwn: + LOG.debug("No LUN WWN recorded for volume %s.", volume.id) + + if not self.client.check_lun_exist(lun_id, lun_wwn): + msg = (_("Volume %s does not exist on the array.") + % volume.id) + if action == constants.VOLUME_NOT_EXISTS_WARN: + LOG.warning(msg) + if action == constants.VOLUME_NOT_EXISTS_RAISE: + raise exception.VolumeBackendAPIException(data=msg) + return + return lun_id + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + + volume_type = self._get_volume_type(volume) + opts = self._get_volume_params(volume_type) + if opts.get('replication_enabled') == 'true': + msg = (_("Can't extend replication volume, volume: %(id)s") % + {"id": volume.id}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_info = self.client.get_lun_info(lun_id) + old_size = int(lun_info.get('CAPACITY')) + + new_size = int(new_size) * units.Gi / 512 + + if new_size == old_size: + LOG.info("New size is equal to the real size from backend" + " storage, no need to extend." + " realsize: %(oldsize)s, newsize: %(newsize)s.", + {'oldsize': old_size, + 'newsize': new_size}) + return + if new_size < old_size: + msg = (_("New size should be bigger than the real size from " + "backend storage." + " realsize: %(oldsize)s, newsize: %(newsize)s."), + {'oldsize': old_size, + 'newsize': new_size}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.info('Extend volume: %(id)s, oldsize: %(oldsize)s, ' + 'newsize: %(newsize)s.', + {'id': volume.id, + 'oldsize': old_size, + 'newsize': new_size}) + + self.client.extend_lun(lun_id, new_size) + + def _create_snapshot_base(self, snapshot): + volume = snapshot.volume + if not volume: + msg = _("Can't get volume id from snapshot, snapshot: %(id)s" + ) % {'id': snapshot.id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + snapshot_name = huawei_utils.encode_name(snapshot.id) + snapshot_description = snapshot.id + snapshot_info = self.client.create_snapshot(lun_id, + snapshot_name, + snapshot_description) + snapshot_id = snapshot_info['ID'] + return snapshot_id + + def create_snapshot(self, snapshot): + snapshot_id = self._create_snapshot_base(snapshot) + try: + self.client.activate_snapshot(snapshot_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Active snapshot %s failed, now deleting it.", + snapshot_id) + self.client.delete_snapshot(snapshot_id) + + snapshot_info = self.client.get_snapshot_info(snapshot_id) + location = huawei_utils.to_string( + huawei_snapshot_id=snapshot_id, + huawei_snapshot_wwn=snapshot_info['WWN']) + return {'provider_location': location} + + def delete_snapshot(self, snapshot): + LOG.info('Delete snapshot %s.', snapshot.id) + + snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot) + if snapshot_id and self.client.check_snapshot_exist(snapshot_id): + self.client.stop_snapshot(snapshot_id) + self.client.delete_snapshot(snapshot_id) + else: + LOG.warning("Can't find snapshot on the array.") + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type.""" + LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " + "diff=%(diff)s, host=%(host)s.", {'id': volume.id, + 'new_type': new_type, + 'diff': diff, + 'host': host}) + self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + + # Check what changes are needed + migration, change_opts, lun_id = self.determine_changes_when_retype( + volume, new_type, host) + + model_update = {} + replica_enabled_change = change_opts.get('replication_enabled') + replica_type_change = change_opts.get('replication_type') + if replica_enabled_change and replica_enabled_change[0] == 'true': + try: + self.replica.delete_replica(volume) + model_update.update({'replication_status': 'disabled', + 'replication_driver_data': None}) + except exception.VolumeBackendAPIException: + LOG.exception('Retype volume error. ' + 'Delete replication failed.') + return False + + try: + if migration: + LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " + "change %(change_opts)s.", + {"lun_id": lun_id, "change_opts": change_opts}) + if not self._migrate_volume(volume, host, new_type): + LOG.warning("Storage-assisted migration failed during " + "retype.") + return False + else: + # Modify lun to change policy + self.modify_lun(lun_id, change_opts) + except exception.VolumeBackendAPIException: + LOG.exception('Retype volume error.') + return False + + if replica_enabled_change and replica_enabled_change[1] == 'true': + try: + # If replica_enabled_change is not None, the + # replica_type_change won't be None. See function + # determine_changes_when_retype. + lun_info = self.client.get_lun_info(lun_id) + replica_info = self.replica.create_replica( + lun_info, replica_type_change[1]) + model_update.update(replica_info) + except exception.VolumeBackendAPIException: + LOG.exception('Retype volume error. ' + 'Create replication failed.') + return False + + return (True, model_update) + + def modify_lun(self, lun_id, change_opts): + if change_opts.get('partitionid'): + old, new = change_opts['partitionid'] + old_id = old[0] + old_name = old[1] + new_id = new[0] + new_name = new[1] + if old_id: + self.client.remove_lun_from_partition(lun_id, old_id) + if new_id: + self.client.add_lun_to_partition(lun_id, new_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) success.", + {"lun_id": lun_id, + "old_id": old_id, "old_name": old_name, + "new_id": new_id, "new_name": new_name}) + + if change_opts.get('cacheid'): + old, new = change_opts['cacheid'] + old_id = old[0] + old_name = old[1] + new_id = new[0] + new_name = new[1] + if old_id: + self.client.remove_lun_from_cache(lun_id, old_id) + if new_id: + self.client.add_lun_to_cache(lun_id, new_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) successfully.", + {'lun_id': lun_id, + 'old_id': old_id, "old_name": old_name, + 'new_id': new_id, "new_name": new_name}) + + if change_opts.get('policy'): + old_policy, new_policy = change_opts['policy'] + self.client.change_lun_smarttier(lun_id, new_policy) + LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " + "%(old_policy)s to %(new_policy)s success.", + {'lun_id': lun_id, + 'old_policy': old_policy, + 'new_policy': new_policy}) + + if change_opts.get('qos'): + old_qos, new_qos = change_opts['qos'] + old_qos_id = old_qos[0] + old_qos_value = old_qos[1] + if old_qos_id: + smart_qos = smartx.SmartQos(self.client) + smart_qos.remove(old_qos_id, lun_id) + if new_qos: + smart_qos = smartx.SmartQos(self.client) + smart_qos.add(new_qos, lun_id) + LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " + "%(old_qos_value)s to %(new_qos)s success.", + {'lun_id': lun_id, + 'old_qos_value': old_qos_value, + 'new_qos': new_qos}) + + def get_lun_specs(self, lun_id): + lun_opts = { + 'policy': None, + 'partitionid': None, + 'cacheid': None, + 'LUNType': None, + } + + lun_info = self.client.get_lun_info(lun_id) + lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) + if lun_info.get('DATATRANSFERPOLICY'): + lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] + if lun_info.get('SMARTCACHEPARTITIONID'): + lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] + if lun_info.get('CACHEPARTITIONID'): + lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] + + return lun_opts + + def _check_capability_support(self, new_opts, new_type): + new_cache_name = new_opts['cachename'] + if new_cache_name: + if not self.support_func.get('smartcache'): + msg = (_( + "Can't support cache on the array, cache name is: " + "%(name)s.") % {'name': new_cache_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_partition_name = new_opts['partitionname'] + if new_partition_name: + if not self.support_func.get('smartpartition'): + msg = (_( + "Can't support partition on the array, partition name is: " + "%(name)s.") % {'name': new_partition_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if new_opts['policy']: + if (not self.support_func.get('smarttier') + and new_opts['policy'] != '0'): + msg = (_("Can't support tier on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) + if not self.support_func.get('QoS_support'): + if new_qos: + msg = (_("Can't support qos on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _check_needed_changes(self, lun_id, old_opts, new_opts, + change_opts, new_type): + new_cache_id = None + new_cache_name = new_opts['cachename'] + if new_cache_name: + if self.support_func.get('smartcache'): + new_cache_id = self.client.get_cache_id_by_name( + new_cache_name) + if new_cache_id is None: + msg = (_( + "Can't find cache name on the array, cache name is: " + "%(name)s.") % {'name': new_cache_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + new_partition_id = None + new_partition_name = new_opts['partitionname'] + if new_partition_name: + if self.support_func.get('smartpartition'): + new_partition_id = self.client.get_partition_id_by_name( + new_partition_name) + if new_partition_id is None: + msg = (_( + "Can't find partition name on the array, partition name " + "is: %(name)s.") % {'name': new_partition_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # smarttier + if old_opts['policy'] != new_opts['policy']: + if not (old_opts['policy'] == '--' + and new_opts['policy'] is None): + change_opts['policy'] = (old_opts['policy'], + new_opts['policy']) + + # smartcache + old_cache_id = old_opts['cacheid'] + if old_cache_id == '--': + old_cache_id = None + if old_cache_id != new_cache_id: + old_cache_name = None + if self.support_func.get('smartcache'): + if old_cache_id: + cache_info = self.client.get_cache_info_by_id( + old_cache_id) + old_cache_name = cache_info['NAME'] + change_opts['cacheid'] = ([old_cache_id, old_cache_name], + [new_cache_id, new_cache_name]) + + # smartpartition + old_partition_id = old_opts['partitionid'] + if old_partition_id == '--': + old_partition_id = None + if old_partition_id != new_partition_id: + old_partition_name = None + if self.support_func.get('smartpartition'): + if old_partition_id: + partition_info = self.client.get_partition_info_by_id( + old_partition_id) + old_partition_name = partition_info['NAME'] + + change_opts['partitionid'] = ([old_partition_id, + old_partition_name], + [new_partition_id, + new_partition_name]) + + # smartqos + new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) + if not self.support_func.get('QoS_support'): + if new_qos: + msg = (_("Can't support qos on the array.")) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + else: + old_qos_id = self.client.get_qosid_by_lunid(lun_id) + old_qos = self._get_qos_specs_from_array(old_qos_id) + if old_qos != new_qos: + change_opts['qos'] = ([old_qos_id, old_qos], new_qos) + + return change_opts + + def determine_changes_when_retype(self, volume, new_type, host): + migration = False + change_opts = { + 'policy': None, + 'partitionid': None, + 'cacheid': None, + 'qos': None, + 'host': None, + 'LUNType': None, + 'replication_enabled': None, + 'replication_type': None, + } + + lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) + old_opts = self.get_lun_specs(lun_id) + + new_specs = new_type['extra_specs'] + new_opts = self._get_volume_params_from_specs(new_specs) + + if 'LUNType' not in new_opts: + new_opts['LUNType'] = self.configuration.lun_type + + if volume.host != host['host']: + migration = True + change_opts['host'] = (volume.host, host['host']) + if old_opts['LUNType'] != new_opts['LUNType']: + migration = True + change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) + + volume_type = self._get_volume_type(volume) + volume_opts = self._get_volume_params(volume_type) + if (volume_opts['replication_enabled'] == 'true' + or new_opts['replication_enabled'] == 'true'): + # If replication_enabled changes, + # then replication_type in change_opts will be set. + change_opts['replication_enabled'] = ( + volume_opts['replication_enabled'], + new_opts['replication_enabled']) + + change_opts['replication_type'] = (volume_opts['replication_type'], + new_opts['replication_type']) + + change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, + change_opts, new_type) + + LOG.debug("Determine changes when retype. Migration: " + "%(migration)s, change_opts: %(change_opts)s.", + {'migration': migration, 'change_opts': change_opts}) + return migration, change_opts, lun_id + + def _get_qos_specs_from_array(self, qos_id): + qos = {} + qos_info = {} + if qos_id: + qos_info = self.client.get_qos_info(qos_id) + + for key, value in qos_info.items(): + key = key.upper() + if key in constants.QOS_KEYS: + if key == 'LATENCY' and value == '0': + continue + else: + qos[key] = value + return qos + + def create_export(self, context, volume, connector): + """Export a volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_export_snapshot(self, context, snapshot, connector): + """Export a snapshot.""" + pass + + def remove_export_snapshot(self, context, snapshot): + """Remove an export for a snapshot.""" + pass + + def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): + metadata = huawei_utils.get_volume_metadata(volume) + copyspeed = metadata.get('copyspeed') + luncopy_id = self.client.create_luncopy(copy_name, + src_lun, + tgt_lun, + copyspeed) + wait_interval = self.configuration.lun_copy_wait_interval + + try: + self.client.start_luncopy(luncopy_id) + + def _luncopy_complete(): + luncopy_info = self.client.get_luncopy_info(luncopy_id) + if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: + # luncopy_info['status'] means for the running status of + # the luncopy. If luncopy_info['status'] is equal to '40', + # this luncopy is completely ready. + return True + elif luncopy_info['state'] != constants.STATUS_HEALTH: + # luncopy_info['state'] means for the healthy status of the + # luncopy. If luncopy_info['state'] is not equal to '1', + # this means that an error occurred during the LUNcopy + # operation and we should abort it. + err_msg = (_( + 'An error occurred during the LUNcopy operation. ' + 'LUNcopy name: %(luncopyname)s. ' + 'LUNcopy status: %(luncopystatus)s. ' + 'LUNcopy state: %(luncopystate)s.') + % {'luncopyname': luncopy_id, + 'luncopystatus': luncopy_info['status'], + 'luncopystate': luncopy_info['state']},) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + huawei_utils.wait_for_condition(_luncopy_complete, + wait_interval, + self.configuration.lun_timeout) + + except Exception: + with excutils.save_and_reraise_exception(): + self.client.delete_luncopy(luncopy_id) + self.delete_volume(volume) + + self.client.delete_luncopy(luncopy_id) + + def _check_lun_valid_for_manage(self, lun_info, external_ref): + lun_id = lun_info.get('ID') + lun_name = lun_info.get('NAME') + + # Check whether the LUN is already in LUN group. + if lun_info.get('ISADD2LUNGROUP') == 'true': + msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " + "group.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN is Normal. + if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: + msg = _("Can't import LUN %s to Cinder. LUN status is not " + "normal.") % lun_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a HyperMetroPair. + if self.support_func.get('hypermetro'): + try: + hypermetro_pairs = self.client.get_hypermetro_pairs() + except exception.VolumeBackendAPIException: + hypermetro_pairs = [] + LOG.debug("Can't get hypermetro info, pass the check.") + + for pair in hypermetro_pairs: + if pair.get('LOCALOBJID') == lun_id: + msg = (_("Can't import LUN %s to Cinder. Already exists " + "in a HyperMetroPair.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a SplitMirror. + if self.support_func.get('splitmirror'): + try: + split_mirrors = self.client.get_split_mirrors() + except exception.VolumeBackendAPIException as ex: + if re.search('License is unavailable', ex.msg): + # Can't check whether the LUN has SplitMirror with it, + # just pass the check and log it. + split_mirrors = [] + LOG.warning('No license for SplitMirror.') + else: + msg = _("Failed to get SplitMirror.") + raise exception.VolumeBackendAPIException(data=msg) + + for mirror in split_mirrors: + try: + target_luns = self.client.get_target_luns(mirror.get('ID')) + except exception.VolumeBackendAPIException: + msg = _("Failed to get target LUN of SplitMirror.") + raise exception.VolumeBackendAPIException(data=msg) + + if ((mirror.get('PRILUNID') == lun_id) + or (lun_id in target_luns)): + msg = (_("Can't import LUN %s to Cinder. Already exists " + "in a SplitMirror.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a migration task. + try: + migration_tasks = self.client.get_migration_task() + except exception.VolumeBackendAPIException as ex: + if re.search('License is unavailable', ex.msg): + # Can't check whether the LUN has migration task with it, + # just pass the check and log it. + migration_tasks = [] + LOG.warning('No license for migration.') + else: + msg = _("Failed to get migration task.") + raise exception.VolumeBackendAPIException(data=msg) + + for migration in migration_tasks: + if lun_id in (migration.get('PARENTID'), + migration.get('TARGETLUNID')): + msg = (_("Can't import LUN %s to Cinder. Already exists in a " + "migration task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a LUN copy task. + if self.support_func.get('luncopy'): + lun_copy = lun_info.get('LUNCOPYIDS') + if lun_copy and lun_copy[1:-1]: + msg = (_("Can't import LUN %s to Cinder. Already exists in " + "a LUN copy task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a remote replication task. + rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') + if rmt_replication and rmt_replication[1:-1]: + msg = (_("Can't import LUN %s to Cinder. Already exists in " + "a remote replication task.") % lun_id) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check whether the LUN exists in a LUN mirror. + if self.client.is_lun_in_mirror(lun_name): + msg = (_("Can't import LUN %s to Cinder. Already exists in " + "a LUN mirror.") % lun_name) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + def manage_existing(self, volume, external_ref): + """Manage an existing volume on the backend storage.""" + # Check whether the LUN is belonged to the specified pool. + pool = volume_utils.extract_host(volume.host, 'pool') + LOG.debug("Pool specified is: %s.", pool) + lun_info = self._get_lun_info_by_ref(external_ref) + lun_id = lun_info.get('ID') + description = lun_info.get('DESCRIPTION', '') + if len(description) <= ( + constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): + description = volume.name + ' ' + description + + lun_pool = lun_info.get('PARENTNAME') + LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", + {"lun": lun_id, "pool": lun_pool}) + if pool != lun_pool: + msg = (_("The specified LUN does not belong to the given " + "pool: %s.") % pool) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check other stuffs to determine whether this LUN can be imported. + self._check_lun_valid_for_manage(lun_info, external_ref) + type_id = volume.volume_type_id + new_opts = None + if type_id: + # Handle volume type if specified. + old_opts = self.get_lun_specs(lun_id) + volume_type = volume_types.get_volume_type(None, type_id) + new_specs = volume_type.get('extra_specs') + new_opts = self._get_volume_params_from_specs(new_specs) + if ('LUNType' in new_opts and + old_opts['LUNType'] != new_opts['LUNType']): + msg = (_("Can't import LUN %(lun_id)s to Cinder. " + "LUN type mismatched.") % lun_id) + raise exception.ManageExistingVolumeTypeMismatch(reason=msg) + if volume_type: + self._check_capability_support(new_opts, volume_type) + + change_opts = {'policy': None, 'partitionid': None, + 'cacheid': None, 'qos': None} + + change_opts = self._check_needed_changes(lun_id, old_opts, + new_opts, change_opts, + volume_type) + self.modify_lun(lun_id, change_opts) + + # Rename the LUN to make it manageable for Cinder. + new_name = huawei_utils.encode_name(volume.id) + LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", + {'old_name': lun_info.get('NAME'), + 'new_name': new_name}) + self.client.rename_lun(lun_id, new_name, description) + + location = huawei_utils.to_string(huawei_lun_id=lun_id, + huawei_lun_wwn=lun_info['WWN']) + model_update = {'provider_location': location} + + if new_opts and new_opts.get('replication_enabled'): + LOG.debug("Manage volume need to create replication.") + try: + lun_info = self.client.get_lun_info(lun_id) + replica_info = self.replica.create_replica( + lun_info, new_opts.get('replication_type')) + model_update.update(replica_info) + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + LOG.exception("Manage exist volume failed.") + + return model_update + + def _get_lun_info_by_ref(self, external_ref): + LOG.debug("Get external_ref: %s", external_ref) + name = external_ref.get('source-name') + id = external_ref.get('source-id') + if not (name or id): + msg = _('Must specify source-name or source-id.') + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + lun_id = id or self.client.get_lun_id_by_name(name) + if not lun_id: + msg = _("Can't find LUN on the array, please check the " + "source-name or source-id.") + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + lun_info = self.client.get_lun_info(lun_id) + return lun_info + + def unmanage(self, volume): + """Export Huawei volume from Cinder.""" + LOG.debug("Unmanage volume: %s.", volume.id) + + def manage_existing_get_size(self, volume, external_ref): + """Get the size of the existing volume.""" + lun_info = self._get_lun_info_by_ref(external_ref) + size = int(math.ceil(lun_info.get('CAPACITY') / + constants.CAPACITY_UNIT)) + return size + + def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): + snapshot_id = snapshot_info.get('ID') + + # Check whether the snapshot is normal. + if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: + msg = _("Can't import snapshot %s to Cinder. " + "Snapshot status is not normal" + " or running status is not online.") % snapshot_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': + msg = _("Can't import snapshot %s to Cinder. " + "Snapshot is exposed to initiator.") % snapshot_id + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + def _get_snapshot_info_by_ref(self, external_ref): + LOG.debug("Get snapshot external_ref: %s.", external_ref) + name = external_ref.get('source-name') + id = external_ref.get('source-id') + if not (name or id): + msg = _('Must specify snapshot source-name or source-id.') + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + snapshot_id = id or self.client.get_snapshot_id_by_name(name) + if not snapshot_id: + msg = _("Can't find snapshot on array, please check the " + "source-name or source-id.") + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + snapshot_info = self.client.get_snapshot_info(snapshot_id) + return snapshot_info + + def manage_existing_snapshot(self, snapshot, existing_ref): + snapshot_info = self._get_snapshot_info_by_ref(existing_ref) + snapshot_id = snapshot_info.get('ID') + + parent_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( + self.client, snapshot.volume) + if parent_lun_id != snapshot_info.get('PARENTID'): + msg = (_("Can't import snapshot %s to Cinder. " + "Snapshot doesn't belong to volume."), snapshot_id) + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=msg) + + # Check whether this snapshot can be imported. + self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) + + # Rename the snapshot to make it manageable for Cinder. + description = snapshot.id + snapshot_name = huawei_utils.encode_name(snapshot.id) + self.client.rename_snapshot(snapshot_id, snapshot_name, description) + if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: + self.client.activate_snapshot(snapshot_id) + + LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", + {'old_name': snapshot_info.get('NAME'), + 'new_name': snapshot_name}) + + location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id) + return {'provider_location': location} + + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Get the size of the existing snapshot.""" + snapshot_info = self._get_snapshot_info_by_ref(existing_ref) + size = int(math.ceil(snapshot_info.get('USERCAPACITY') / + constants.CAPACITY_UNIT)) + return size + + def unmanage_snapshot(self, snapshot): + """Unmanage the specified snapshot from Cinder management.""" + LOG.debug("Unmanage snapshot: %s.", snapshot.id) + + def remove_host_with_check(self, host_id): + wwns_in_host = ( + self.client.get_host_fc_initiators(host_id)) + iqns_in_host = ( + self.client.get_host_iscsi_initiators(host_id)) + if not (wwns_in_host or iqns_in_host or + self.client.is_host_associated_to_hostgroup(host_id)): + self.client.remove_host(host_id) + + def _get_group_type(self, group): + opts = [] + for vol_type in group.volume_types: + specs = vol_type.extra_specs + opts.append(self._get_volume_params_from_specs(specs)) + + return opts + + def _check_group_type_support(self, opts, vol_type): + if not opts: + return False + + for opt in opts: + if opt.get(vol_type) == 'true': + return True + + return False + + def _get_group_type_value(self, opts, vol_type): + if not opts: + return + + for opt in opts: + if vol_type in opt: + return opt[vol_type] + + def create_group(self, context, group): + """Creates a group.""" + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = {'status': fields.GroupStatus.AVAILABLE} + opts = self._get_group_type(group) + + if self._check_group_type_support(opts, 'hypermetro'): + if not self.check_func_support("HyperMetro_ConsistentGroup"): + msg = _("Can't create consistency group, array does not " + "support hypermetro consistentgroup, " + "group id: %(group_id)s." + ) % {"group_id": group.id} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.create_consistencygroup(group) + return model_update + + return model_update + + def create_group_from_src(self, context, group, volumes, + group_snapshot=None, snapshots=None, + source_group=None, source_vols=None): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = self.create_group(context, group) + volumes_model_update = [] + delete_snapshots = False + + if not snapshots and source_vols: + snapshots = [] + for src_vol in source_vols: + vol_kwargs = { + 'id': src_vol.id, + 'provider_location': src_vol.provider_location, + } + snapshot_kwargs = {'id': six.text_type(uuid.uuid4()), + 'volume': objects.Volume(**vol_kwargs)} + snapshot = objects.Snapshot(**snapshot_kwargs) + snapshots.append(snapshot) + + snapshots_model_update = self._create_group_snapshot(snapshots) + for i, model in enumerate(snapshots_model_update): + snapshot = snapshots[i] + snapshot.provider_location = model['provider_location'] + + delete_snapshots = True + + if snapshots: + for i, vol in enumerate(volumes): + snapshot = snapshots[i] + vol_model_update = self.create_volume_from_snapshot( + vol, snapshot) + vol_model_update.update({'id': vol.id}) + volumes_model_update.append(vol_model_update) + + if delete_snapshots: + self._delete_group_snapshot(snapshots) + + return model_update, volumes_model_update + + def delete_group(self, context, group, volumes): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + opts = self._get_group_type(group) + model_update = {'status': fields.GroupStatus.DELETED} + volumes_model_update = [] + + if self._check_group_type_support(opts, 'hypermetro'): + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.delete_consistencygroup(context, group, volumes) + + for volume in volumes: + volume_model_update = {'id': volume.id} + try: + self.delete_volume(volume) + except Exception: + LOG.exception('Delete volume %s failed.', volume) + volume_model_update.update({'status': 'error_deleting'}) + else: + volume_model_update.update({'status': 'deleted'}) + + volumes_model_update.append(volume_model_update) + + return model_update, volumes_model_update + + def update_group(self, context, group, + add_volumes=None, remove_volumes=None): + if not volume_utils.is_group_a_cg_snapshot_type(group): + raise NotImplementedError() + + model_update = {'status': fields.GroupStatus.AVAILABLE} + opts = self._get_group_type(group) + if self._check_group_type_support(opts, 'hypermetro'): + metro = hypermetro.HuaweiHyperMetro(self.client, + self.rmt_client, + self.configuration) + metro.update_consistencygroup(context, group, + add_volumes, + remove_volumes) + return model_update, None, None + + for volume in add_volumes: + self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + + return model_update, None, None + + def create_group_snapshot(self, context, group_snapshot, snapshots): + """Create group snapshot.""" + if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + raise NotImplementedError() + + LOG.info('Create group snapshot for group: %(group_id)s', + {'group_id': group_snapshot.group_id}) + + snapshots_model_update = self._create_group_snapshot(snapshots) + model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} + return model_update, snapshots_model_update + + def _create_group_snapshot(self, snapshots): + snapshots_model_update = [] + added_snapshots_info = [] + + try: + for snapshot in snapshots: + snapshot_id = self._create_snapshot_base(snapshot) + info = self.client.get_snapshot_info(snapshot_id) + location = huawei_utils.to_string( + huawei_snapshot_id=info['ID'], + huawei_snapshot_wwn=info['WWN']) + snapshot_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.AVAILABLE, + 'provider_location': location, + } + snapshots_model_update.append(snapshot_model_update) + added_snapshots_info.append(info) + except Exception: + with excutils.save_and_reraise_exception(): + for added_snapshot in added_snapshots_info: + self.client.delete_snapshot(added_snapshot['ID']) + + snapshot_ids = [added_snapshot['ID'] + for added_snapshot in added_snapshots_info] + try: + self.client.activate_snapshot(snapshot_ids) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Active group snapshots %s failed.", snapshot_ids) + for snapshot_id in snapshot_ids: + self.client.delete_snapshot(snapshot_id) + + return snapshots_model_update + + def delete_group_snapshot(self, context, group_snapshot, snapshots): + """Delete group snapshot.""" + if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): + raise NotImplementedError() + + LOG.info('Delete group snapshot %(snap_id)s for group: ' + '%(group_id)s', + {'snap_id': group_snapshot.id, + 'group_id': group_snapshot.group_id}) + + try: + snapshots_model_update = self._delete_group_snapshot(snapshots) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Delete group snapshots failed. " + "Group snapshot id: %s", group_snapshot.id) + + model_update = {'status': fields.GroupSnapshotStatus.DELETED} + return model_update, snapshots_model_update + + def _delete_group_snapshot(self, snapshots): + snapshots_model_update = [] + for snapshot in snapshots: + self.delete_snapshot(snapshot) + snapshot_model_update = { + 'id': snapshot.id, + 'status': fields.SnapshotStatus.DELETED + } + snapshots_model_update.append(snapshot_model_update) + + return snapshots_model_update + + def _classify_volume(self, volumes): + normal_volumes = [] + replica_volumes = [] + + for v in volumes: + volume_type = self._get_volume_type(v) + opts = self._get_volume_params(volume_type) + if opts.get('replication_enabled') == 'true': + replica_volumes.append(v) + else: + normal_volumes.append(v) + + return normal_volumes, replica_volumes + + def _failback_normal_volumes(self, volumes): + volumes_update = [] + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + metadata = huawei_utils.get_volume_metadata(v) + old_status = 'available' + if 'old_status' in metadata: + old_status = metadata.pop('old_status') + v_update['updates'] = {'status': old_status, + 'metadata': metadata} + volumes_update.append(v_update) + + return volumes_update + + def _failback(self, volumes): + if self.active_backend_id in ('', None): + return 'default', [] + + normal_volumes, replica_volumes = self._classify_volume(volumes) + volumes_update = [] + + replica_volumes_update = self.replica.failback(replica_volumes) + volumes_update.extend(replica_volumes_update) + + normal_volumes_update = self._failback_normal_volumes(normal_volumes) + volumes_update.extend(normal_volumes_update) + + self.active_backend_id = "" + secondary_id = 'default' + + # Switch array connection. + self.client, self.replica_client = self.replica_client, self.client + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + return secondary_id, volumes_update + + def _failover_normal_volumes(self, volumes): + volumes_update = [] + + for v in volumes: + v_update = {} + v_update['volume_id'] = v.id + metadata = huawei_utils.get_volume_metadata(v) + metadata.update({'old_status': v.status}) + v_update['updates'] = {'status': 'error', + 'metadata': metadata} + volumes_update.append(v_update) + + return volumes_update + + def _failover(self, volumes): + if self.active_backend_id not in ('', None): + return self.replica_dev_conf['backend_id'], [] + + normal_volumes, replica_volumes = self._classify_volume(volumes) + volumes_update = [] + + replica_volumes_update = self.replica.failover(replica_volumes) + volumes_update.extend(replica_volumes_update) + + normal_volumes_update = self._failover_normal_volumes(normal_volumes) + volumes_update.extend(normal_volumes_update) + + self.active_backend_id = self.replica_dev_conf['backend_id'] + secondary_id = self.active_backend_id + + # Switch array connection. + self.client, self.replica_client = self.replica_client, self.client + self.replica = replication.ReplicaPairManager(self.client, + self.replica_client, + self.configuration) + return secondary_id, volumes_update + + def failover_host(self, context, volumes, secondary_id=None, groups=None): + """Failover all volumes to secondary.""" + if secondary_id == 'default': + secondary_id, volumes_update = self._failback(volumes) + elif (secondary_id == self.replica_dev_conf['backend_id'] + or secondary_id is None): + secondary_id, volumes_update = self._failover(volumes) + else: + msg = _("Invalid secondary id %s.") % secondary_id + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return secondary_id, volumes_update, [] + + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + """Map a snapshot to a host and return target iSCSI information.""" + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.initialize_connection(volume, connector) + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + """Delete map between a snapshot and a host.""" + # From the volume structure. + volume = Volume(id=snapshot.id, + provider_location=snapshot.provider_location, + lun_type=constants.SNAPSHOT_TYPE, + metadata=None) + + return self.terminate_connection(volume, connector) + + def get_lun_id_and_type(self, volume): + if hasattr(volume, 'lun_type'): + metadata = huawei_utils.get_snapshot_metadata(volume) + lun_id = metadata['huawei_snapshot_id'] + lun_type = constants.SNAPSHOT_TYPE + else: + lun_id = self._check_volume_exist_on_array( + volume, constants.VOLUME_NOT_EXISTS_RAISE) + lun_type = constants.LUN_TYPE + + return lun_id, lun_type diff --git a/cinder/volume/drivers/huawei/huawei_driver.py b/cinder/volume/drivers/huawei/huawei_driver.py index 7e880cb492f..1f7fe6f156c 100644 --- a/cinder/volume/drivers/huawei/huawei_driver.py +++ b/cinder/volume/drivers/huawei/huawei_driver.py @@ -13,1937 +13,28 @@ # License for the specific language governing permissions and limitations # under the License. -import collections import json -import math -import re -import six -import uuid -from oslo_config import cfg from oslo_log import log as logging -from oslo_utils import excutils from oslo_utils import strutils -from oslo_utils import units -from cinder import context from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface -from cinder import objects -from cinder.objects import fields -from cinder.volume import configuration from cinder.volume import driver +from cinder.volume.drivers.huawei import common from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper -from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro -from cinder.volume.drivers.huawei import replication -from cinder.volume.drivers.huawei import rest_client -from cinder.volume.drivers.huawei import smartx -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) -huawei_opts = [ - cfg.StrOpt('cinder_huawei_conf_file', - default='/etc/cinder/cinder_huawei_conf.xml', - help='The configuration file for the Cinder Huawei driver.'), - cfg.StrOpt('hypermetro_devices', - default=None, - help='The remote device hypermetro will use.'), - cfg.StrOpt('metro_san_user', - default=None, - help='The remote metro device san user.'), - cfg.StrOpt('metro_san_password', - default=None, - secret=True, - help='The remote metro device san password.'), - cfg.StrOpt('metro_domain_name', - default=None, - help='The remote metro device domain name.'), - cfg.StrOpt('metro_san_address', - default=None, - help='The remote metro device request url.'), - cfg.StrOpt('metro_storage_pools', - default=None, - help='The remote metro device pool names.'), -] - -CONF = cfg.CONF -CONF.register_opts(huawei_opts, group=configuration.SHARED_CONF_GROUP) - -snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') -Snapshot = collections.namedtuple('Snapshot', snap_attrs) -vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') -Volume = collections.namedtuple('Volume', vol_attrs) - - -class HuaweiBaseDriver(driver.VolumeDriver): - - # ThirdPartySytems wiki page - CI_WIKI_NAME = "Huawei_volume_CI" - - def __init__(self, *args, **kwargs): - super(HuaweiBaseDriver, self).__init__(*args, **kwargs) - - if not self.configuration: - msg = _('Configuration is not found.') - raise exception.InvalidInput(reason=msg) - - self.active_backend_id = kwargs.get('active_backend_id') - - self.configuration.append_config_values(huawei_opts) - self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) - self.support_func = None - self.metro_flag = False - self.replica = None - - def check_func_support(self, obj_name): - try: - self.client._get_object_count(obj_name) - return True - except Exception: - return False - - def get_local_and_remote_dev_conf(self): - self.loc_dev_conf = self.huawei_conf.get_local_device() - - # Now just support one replication device. - replica_devs = self.huawei_conf.get_replication_devices() - self.replica_dev_conf = replica_devs[0] if replica_devs else {} - - def get_local_and_remote_client_conf(self): - if self.active_backend_id: - return self.replica_dev_conf, self.loc_dev_conf - else: - return self.loc_dev_conf, self.replica_dev_conf - - def do_setup(self, context): - """Instantiate common class and login storage system.""" - # Set huawei private configuration into Configuration object. - self.huawei_conf.update_config_value() - - self.get_local_and_remote_dev_conf() - client_conf, replica_client_conf = ( - self.get_local_and_remote_client_conf()) - - # init local client - if not client_conf: - msg = _('Get active client failed.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - self.client = rest_client.RestClient(self.configuration, - **client_conf) - self.client.login() - - # init remote client - metro_san_address = self.configuration.safe_get("metro_san_address") - metro_san_user = self.configuration.safe_get("metro_san_user") - metro_san_password = self.configuration.safe_get("metro_san_password") - if metro_san_address and metro_san_user and metro_san_password: - metro_san_address = metro_san_address.split(";") - self.rmt_client = rest_client.RestClient(self.configuration, - metro_san_address, - metro_san_user, - metro_san_password) - - self.rmt_client.login() - self.metro_flag = True - else: - self.metro_flag = False - LOG.warning("Remote device not configured in cinder.conf") - # init replication manager - if replica_client_conf: - self.replica_client = rest_client.RestClient(self.configuration, - **replica_client_conf) - self.replica_client.try_login() - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - - def check_for_setup_error(self): - pass - - def get_volume_stats(self, refresh=False): - """Get volume status and reload huawei config file.""" - self.huawei_conf.update_config_value() - stats = self.client.update_volume_stats() - stats = self.update_support_capability(stats) - - if self.replica: - stats = self.replica.update_replica_capability(stats) - targets = [self.replica_dev_conf['backend_id']] - stats['replication_targets'] = targets - stats['replication_enabled'] = True - - return stats - - def update_support_capability(self, stats): - for pool in stats['pools']: - pool['smartpartition'] = ( - self.check_func_support("SMARTCACHEPARTITION")) - pool['smartcache'] = self.check_func_support("smartcachepool") - pool['QoS_support'] = self.check_func_support("ioclass") - pool['splitmirror'] = self.check_func_support("splitmirror") - pool['luncopy'] = self.check_func_support("luncopy") - pool['thick_provisioning_support'] = True - pool['thin_provisioning_support'] = True - pool['smarttier'] = True - pool['consistencygroup_support'] = True - pool['consistent_group_snapshot_enabled'] = True - - if self.configuration.san_product == "Dorado": - pool['smarttier'] = False - pool['thick_provisioning_support'] = False - - if self.metro_flag: - pool['hypermetro'] = self.check_func_support("HyperMetroPair") - - # assign the support function to global parameter. - self.support_func = pool - - return stats - - def _get_volume_type(self, volume): - volume_type = None - type_id = volume.volume_type_id - if type_id: - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - - return volume_type - - def _get_volume_params(self, volume_type): - """Return the parameters for creating the volume.""" - specs = {} - if volume_type: - specs = dict(volume_type).get('extra_specs') - - opts = self._get_volume_params_from_specs(specs) - return opts - - def _get_volume_params_from_specs(self, specs): - """Return the volume parameters from extra specs.""" - opts_capability = { - 'smarttier': False, - 'smartcache': False, - 'smartpartition': False, - 'thin_provisioning_support': False, - 'thick_provisioning_support': False, - 'hypermetro': False, - 'replication_enabled': False, - 'replication_type': 'async', - } - - opts_value = { - 'policy': None, - 'partitionname': None, - 'cachename': None, - } - - opts_associate = { - 'smarttier': 'policy', - 'smartcache': 'cachename', - 'smartpartition': 'partitionname', - } - - opts = self._get_opts_from_specs(opts_capability, - opts_value, - opts_associate, - specs) - opts = smartx.SmartX().get_smartx_specs_opts(opts) - opts = replication.get_replication_opts(opts) - LOG.debug('volume opts %(opts)s.', {'opts': opts}) - return opts - - def _get_opts_from_specs(self, opts_capability, opts_value, - opts_associate, specs): - """Get the well defined extra specs.""" - opts = {} - opts.update(opts_capability) - opts.update(opts_value) - - for key, value in specs.items(): - # Get the scope, if it is using scope format. - scope = None - key_split = key.split(':') - if len(key_split) > 2 and key_split[0] != "capabilities": - continue - - if len(key_split) == 1: - key = key_split[0].lower() - else: - scope = key_split[0].lower() - key = key_split[1].lower() - - if ((not scope or scope == 'capabilities') - and key in opts_capability): - words = value.split() - if words and len(words) == 2 and words[0] in ('', ''): - opts[key] = words[1].lower() - elif key == 'replication_type': - LOG.error("Extra specs must be specified as " - "replication_type=' sync' or " - "' async'.") - else: - LOG.error("Extra specs must be specified as " - "capabilities:%s=' True'.", key) - - if ((scope in opts_capability) - and (key in opts_value) - and (scope in opts_associate) - and (opts_associate[scope] == key)): - opts[key] = value - - return opts - - def _get_lun_params(self, volume, opts): - pool_name = volume_utils.extract_host(volume.host, level='pool') - params = { - 'TYPE': '11', - 'NAME': huawei_utils.encode_name(volume.id), - 'PARENTTYPE': '216', - 'PARENTID': self.client.get_pool_id(pool_name), - 'DESCRIPTION': volume.name, - 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), - 'CAPACITY': huawei_utils.get_volume_size(volume), - 'WRITEPOLICY': self.configuration.lun_write_type, - 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, - 'PREFETCHVALUE': self.configuration.lun_prefetch_value, - 'DATATRANSFERPOLICY': - opts.get('policy', self.configuration.lun_policy), - 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, - 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } - - LOG.info('volume: %(volume)s, lun params: %(params)s.', - {'volume': volume.id, 'params': params}) - return params - - def _create_volume(self, lun_params): - # Create LUN on the array. - lun_info = self.client.create_lun(lun_params) - metadata = {'huawei_lun_id': lun_info['ID'], - 'huawei_lun_wwn': lun_info['WWN']} - model_update = {'metadata': metadata} - - return lun_info, model_update - - def _create_base_type_volume(self, opts, volume, volume_type): - """Create volume and add some base type. - - Base type is the service type which doesn't conflict with the other. - """ - lun_params = self._get_lun_params(volume, opts) - lun_info, model_update = self._create_volume(lun_params) - lun_id = lun_info['ID'] - - try: - qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) - if qos: - if not self.support_func.get('QoS_support'): - msg = (_("Can't support qos on the array")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - smart_qos = smartx.SmartQos(self.client) - smart_qos.add(qos, lun_id) - - smartpartition = smartx.SmartPartition(self.client) - smartpartition.add(opts, lun_id) - - smartcache = smartx.SmartCache(self.client) - smartcache.add(opts, lun_id) - except Exception as err: - self._delete_lun_with_check(lun_id) - msg = _('Create volume error. Because %s.') % six.text_type(err) - raise exception.VolumeBackendAPIException(data=msg) - - return lun_params, lun_info, model_update - - def _add_extend_type_to_volume(self, opts, lun_params, lun_info, - model_update): - """Add the extend type. - - Extend type is the service type which may conflict with the other. - So add it after those services. - """ - lun_id = lun_info['ID'] - if opts.get('hypermetro') == 'true': - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - try: - metro_info = metro.create_hypermetro(lun_id, lun_params) - model_update['metadata'].update(metro_info) - except exception.VolumeBackendAPIException as err: - LOG.error('Create hypermetro error: %s.', err) - self._delete_lun_with_check(lun_id) - raise - - if opts.get('replication_enabled') == 'true': - replica_model = opts.get('replication_type') - try: - replica_info = self.replica.create_replica(lun_info, - replica_model) - model_update.update(replica_info) - except Exception as err: - LOG.exception('Create replication volume error.') - self._delete_lun_with_check(lun_id) - raise - - return model_update - - def create_volume(self, volume): - """Create a volume.""" - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if (opts.get('hypermetro') == 'true' - and opts.get('replication_enabled') == 'true'): - err_msg = _("Hypermetro and Replication can not be " - "used in the same volume_type.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - lun_params, lun_info, model_update = ( - self._create_base_type_volume(opts, volume, volume_type)) - - model_update = self._add_extend_type_to_volume(opts, lun_params, - lun_info, model_update) - - model_update['provider_location'] = huawei_utils.to_string( - **model_update.pop('metadata')) - - return model_update - - def _delete_volume(self, volume): - lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) - if not lun_id: - return - - lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) - if lun_group_ids and len(lun_group_ids) == 1: - self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) - - self.client.delete_lun(lun_id) - - def delete_volume(self, volume): - """Delete a volume. - - Three steps: - Firstly, remove associate from lungroup. - Secondly, remove associate from QoS policy. - Thirdly, remove the lun. - """ - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_WARN) - if not lun_id: - return - - if self.support_func.get('QoS_support'): - qos_id = self.client.get_qosid_by_lunid(lun_id) - if qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(qos_id, lun_id) - - metadata = huawei_utils.get_lun_metadata(volume) - if metadata.get('hypermetro_id'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - try: - metro.delete_hypermetro(volume) - except exception.VolumeBackendAPIException as err: - LOG.error('Delete hypermetro error: %s.', err) - # We have checked the LUN WWN above, - # no need to check again here. - self._delete_volume(volume) - raise - - # Delete a replication volume - replica_data = volume.replication_driver_data - if replica_data: - try: - self.replica.delete_replica(volume) - except exception.VolumeBackendAPIException as err: - with excutils.save_and_reraise_exception(): - LOG.exception("Delete replication error.") - self._delete_volume(volume) - - self._delete_volume(volume) - - def _delete_lun_with_check(self, lun_id, lun_wwn=None): - if not lun_id: - return - - if self.client.check_lun_exist(lun_id, lun_wwn): - if self.support_func.get('QoS_support'): - qos_id = self.client.get_qosid_by_lunid(lun_id) - if qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(qos_id, lun_id) - - self.client.delete_lun(lun_id) - - def _is_lun_migration_complete(self, src_id, dst_id): - result = self.client.get_lun_migration_task() - found_migration_task = False - if 'data' not in result: - return False - - for item in result['data']: - if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): - found_migration_task = True - if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: - return True - if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: - msg = _("Lun migration error.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not found_migration_task: - err_msg = _("Cannot find migration task.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - return False - - def _is_lun_migration_exist(self, src_id, dst_id): - try: - result = self.client.get_lun_migration_task() - except Exception: - LOG.error("Get LUN migration error.") - return False - - if 'data' in result: - for item in result['data']: - if (src_id == item['PARENTID'] - and dst_id == item['TARGETLUNID']): - return True - return False - - def _migrate_lun(self, src_id, dst_id): - try: - self.client.create_lun_migration(src_id, dst_id) - - def _is_lun_migration_complete(): - return self._is_lun_migration_complete(src_id, dst_id) - - wait_interval = constants.MIGRATION_WAIT_INTERVAL - huawei_utils.wait_for_condition(_is_lun_migration_complete, - wait_interval, - self.configuration.lun_timeout) - # Clean up if migration failed. - except Exception as ex: - raise exception.VolumeBackendAPIException(data=ex) - finally: - if self._is_lun_migration_exist(src_id, dst_id): - self.client.delete_lun_migration(src_id, dst_id) - self._delete_lun_with_check(dst_id) - - LOG.debug("Migrate lun %s successfully.", src_id) - return True - - def _wait_volume_ready(self, lun_id): - wait_interval = self.configuration.lun_ready_wait_interval - - def _volume_ready(): - result = self.client.get_lun_info(lun_id) - if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH - and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): - return True - return False - - huawei_utils.wait_for_condition(_volume_ready, - wait_interval, - wait_interval * 10) - - def _get_original_status(self, volume): - return 'in-use' if volume.volume_attachment else 'available' - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status=None): - orig_lun_name = huawei_utils.encode_name(volume.id) - new_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( - self.client, new_volume) - new_metadata = huawei_utils.get_lun_metadata(new_volume) - model_update = { - 'provider_location': huawei_utils.to_string(**new_metadata), - } - - try: - self.client.rename_lun(new_lun_id, orig_lun_name) - except exception.VolumeBackendAPIException: - LOG.error('Unable to rename lun %s on array.', new_lun_id) - model_update['_name_id'] = new_volume.name_id - else: - LOG.debug("Renamed lun %(id)s to %(name)s successfully.", - {'id': new_lun_id, - 'name': orig_lun_name}) - model_update['_name_id'] = None - - return model_update - - def migrate_volume(self, ctxt, volume, host, new_type=None): - """Migrate a volume within the same array.""" - self._check_volume_exist_on_array(volume, - constants.VOLUME_NOT_EXISTS_RAISE) - - # NOTE(jlc): Replication volume can't migrate. But retype - # can remove replication relationship first then do migrate. - # So don't add this judgement into _check_migration_valid(). - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - return (False, None) - - return self._migrate_volume(volume, host, new_type) - - def _check_migration_valid(self, host, volume): - if 'pool_name' not in host['capabilities']: - return False - - target_device = host['capabilities']['location_info'] - - # Source and destination should be on same array. - if target_device != self.client.device_id: - return False - - # Same protocol should be used if volume is in-use. - protocol = self.configuration.san_protocol - if (host['capabilities']['storage_protocol'] != protocol - and self._get_original_status(volume) == 'in-use'): - return False - - pool_name = host['capabilities']['pool_name'] - if len(pool_name) == 0: - return False - - return True - - def _migrate_volume(self, volume, host, new_type=None): - if not self._check_migration_valid(host, volume): - return (False, None) - - type_id = volume.volume_type_id - - volume_type = None - if type_id: - volume_type = volume_types.get_volume_type(None, type_id) - - pool_name = host['capabilities']['pool_name'] - pools = self.client.get_all_pools() - pool_info = self.client.get_pool_info(pool_name, pools) - dst_volume_name = six.text_type(uuid.uuid4()) - - src_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) - opts = None - qos = None - if new_type: - # If new type exists, use new type. - new_specs = new_type['extra_specs'] - opts = self._get_volume_params_from_specs(new_specs) - if 'LUNType' not in opts: - opts['LUNType'] = self.configuration.lun_type - - qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - elif volume_type: - qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) - - if not opts: - opts = self._get_volume_params(volume_type) - - lun_info = self.client.get_lun_info(src_id) - - if opts['policy']: - policy = opts['policy'] - else: - policy = lun_info.get('DATATRANSFERPOLICY', - self.configuration.lun_policy) - - lun_params = { - 'NAME': huawei_utils.encode_name(dst_volume_name), - 'PARENTID': pool_info['ID'], - 'DESCRIPTION': lun_info['DESCRIPTION'], - 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), - 'CAPACITY': lun_info['CAPACITY'], - 'WRITEPOLICY': lun_info['WRITEPOLICY'], - 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], - 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], - 'DATATRANSFERPOLICY': policy, - 'READCACHEPOLICY': lun_info.get( - 'READCACHEPOLICY', - self.configuration.lun_read_cache_policy), - 'WRITECACHEPOLICY': lun_info.get( - 'WRITECACHEPOLICY', - self.configuration.lun_write_cache_policy), - 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } - - for item in lun_params: - if lun_params.get(item) == '--': - del lun_params[item] - - lun_info = self.client.create_lun(lun_params) - lun_id = lun_info['ID'] - - if qos: - LOG.info('QoS: %s.', qos) - SmartQos = smartx.SmartQos(self.client) - SmartQos.add(qos, lun_id) - if opts: - smartpartition = smartx.SmartPartition(self.client) - smartpartition.add(opts, lun_id) - smartcache = smartx.SmartCache(self.client) - smartcache.add(opts, lun_id) - - dst_id = lun_info['ID'] - self._wait_volume_ready(dst_id) - moved = self._migrate_lun(src_id, dst_id) - - return moved, {} - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot. - - We use LUNcopy to copy a new volume from snapshot. - The time needed increases as volume size does. - """ - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if (opts.get('hypermetro') == 'true' - and opts.get('replication_enabled') == 'true'): - msg = _("Hypermetro and Replication can not be " - "used in the same volume_type.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot) - if snapshot_id is None: - msg = _('create_volume_from_snapshot: Snapshot %(name)s ' - 'does not exist.') % {'name': snapshot.id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - lun_params, lun_info, model_update = ( - self._create_base_type_volume(opts, volume, volume_type)) - - tgt_lun_id = lun_info['ID'] - luncopy_name = huawei_utils.encode_name(volume.id) - LOG.info( - 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' - 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.', - {'src_lun_id': snapshot_id, - 'tgt_lun_id': tgt_lun_id, - 'copy_name': luncopy_name}) - - wait_interval = self.configuration.lun_ready_wait_interval - - def _volume_ready(): - result = self.client.get_lun_info(tgt_lun_id) - - if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH - and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): - return True - return False - - huawei_utils.wait_for_condition(_volume_ready, - wait_interval, - wait_interval * 10) - - self._copy_volume(volume, luncopy_name, - snapshot_id, tgt_lun_id) - - # NOTE(jlc): Actually, we just only support replication here right - # now, not hypermetro. - model_update = self._add_extend_type_to_volume(opts, lun_params, - lun_info, model_update) - model_update['provider_location'] = huawei_utils.to_string( - **model_update.pop('metadata')) - - return model_update - - def create_cloned_volume(self, volume, src_vref): - """Clone a new volume from an existing volume.""" - self._check_volume_exist_on_array(src_vref, - constants.VOLUME_NOT_EXISTS_RAISE) - - # Form the snapshot structure. - snapshot = Snapshot(id=uuid.uuid4().__str__(), - volume_id=src_vref.id, - volume=src_vref, - provider_location=None) - - # Create snapshot. - self.create_snapshot(snapshot) - - try: - # Create volume from snapshot. - model_update = self.create_volume_from_snapshot(volume, snapshot) - finally: - try: - # Delete snapshot. - self.delete_snapshot(snapshot) - except exception.VolumeBackendAPIException: - LOG.warning( - 'Failure deleting the snapshot %(snapshot_id)s ' - 'of volume %(volume_id)s.', - {'snapshot_id': snapshot.id, - 'volume_id': src_vref.id},) - - return model_update - - def _check_volume_exist_on_array(self, volume, action): - """Check whether the volume exists on the array. - - If the volume exists on the array, return the LUN ID. - If not exists, raise or log warning. - """ - lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) - if not lun_id: - msg = _("Volume %s does not exist on the array.") % volume.id - if action == constants.VOLUME_NOT_EXISTS_WARN: - LOG.warning(msg) - if action == constants.VOLUME_NOT_EXISTS_RAISE: - raise exception.VolumeBackendAPIException(data=msg) - return - - if not lun_wwn: - LOG.debug("No LUN WWN recorded for volume %s.", volume.id) - - if not self.client.check_lun_exist(lun_id, lun_wwn): - msg = (_("Volume %s does not exist on the array.") - % volume.id) - if action == constants.VOLUME_NOT_EXISTS_WARN: - LOG.warning(msg) - if action == constants.VOLUME_NOT_EXISTS_RAISE: - raise exception.VolumeBackendAPIException(data=msg) - return - return lun_id - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - msg = (_("Can't extend replication volume, volume: %(id)s") % - {"id": volume.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - lun_info = self.client.get_lun_info(lun_id) - old_size = int(lun_info.get('CAPACITY')) - - new_size = int(new_size) * units.Gi / 512 - - if new_size == old_size: - LOG.info("New size is equal to the real size from backend" - " storage, no need to extend." - " realsize: %(oldsize)s, newsize: %(newsize)s.", - {'oldsize': old_size, - 'newsize': new_size}) - return - if new_size < old_size: - msg = (_("New size should be bigger than the real size from " - "backend storage." - " realsize: %(oldsize)s, newsize: %(newsize)s."), - {'oldsize': old_size, - 'newsize': new_size}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info('Extend volume: %(id)s, oldsize: %(oldsize)s, ' - 'newsize: %(newsize)s.', - {'id': volume.id, - 'oldsize': old_size, - 'newsize': new_size}) - - self.client.extend_lun(lun_id, new_size) - - def _create_snapshot_base(self, snapshot): - volume = snapshot.volume - if not volume: - msg = _("Can't get volume id from snapshot, snapshot: %(id)s" - ) % {'id': snapshot.id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) - snapshot_name = huawei_utils.encode_name(snapshot.id) - snapshot_description = snapshot.id - snapshot_info = self.client.create_snapshot(lun_id, - snapshot_name, - snapshot_description) - snapshot_id = snapshot_info['ID'] - return snapshot_id - - def create_snapshot(self, snapshot): - snapshot_id = self._create_snapshot_base(snapshot) - try: - self.client.activate_snapshot(snapshot_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Active snapshot %s failed, now deleting it.", - snapshot_id) - self.client.delete_snapshot(snapshot_id) - - snapshot_info = self.client.get_snapshot_info(snapshot_id) - location = huawei_utils.to_string( - huawei_snapshot_id=snapshot_id, - huawei_snapshot_wwn=snapshot_info['WWN']) - return {'provider_location': location} - - def delete_snapshot(self, snapshot): - LOG.info('Delete snapshot %s.', snapshot.id) - - snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot) - if snapshot_id and self.client.check_snapshot_exist(snapshot_id): - self.client.stop_snapshot(snapshot_id) - self.client.delete_snapshot(snapshot_id) - else: - LOG.warning("Can't find snapshot on the array.") - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " - "diff=%(diff)s, host=%(host)s.", {'id': volume.id, - 'new_type': new_type, - 'diff': diff, - 'host': host}) - self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - - # Check what changes are needed - migration, change_opts, lun_id = self.determine_changes_when_retype( - volume, new_type, host) - - model_update = {} - replica_enabled_change = change_opts.get('replication_enabled') - replica_type_change = change_opts.get('replication_type') - if replica_enabled_change and replica_enabled_change[0] == 'true': - try: - self.replica.delete_replica(volume) - model_update.update({'replication_status': 'disabled', - 'replication_driver_data': None}) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error. ' - 'Delete replication failed.') - return False - - try: - if migration: - LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " - "change %(change_opts)s.", - {"lun_id": lun_id, "change_opts": change_opts}) - if not self._migrate_volume(volume, host, new_type): - LOG.warning("Storage-assisted migration failed during " - "retype.") - return False - else: - # Modify lun to change policy - self.modify_lun(lun_id, change_opts) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error.') - return False - - if replica_enabled_change and replica_enabled_change[1] == 'true': - try: - # If replica_enabled_change is not None, the - # replica_type_change won't be None. See function - # determine_changes_when_retype. - lun_info = self.client.get_lun_info(lun_id) - replica_info = self.replica.create_replica( - lun_info, replica_type_change[1]) - model_update.update(replica_info) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error. ' - 'Create replication failed.') - return False - - return (True, model_update) - - def modify_lun(self, lun_id, change_opts): - if change_opts.get('partitionid'): - old, new = change_opts['partitionid'] - old_id = old[0] - old_name = old[1] - new_id = new[0] - new_name = new[1] - if old_id: - self.client.remove_lun_from_partition(lun_id, old_id) - if new_id: - self.client.add_lun_to_partition(lun_id, new_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) success.", - {"lun_id": lun_id, - "old_id": old_id, "old_name": old_name, - "new_id": new_id, "new_name": new_name}) - - if change_opts.get('cacheid'): - old, new = change_opts['cacheid'] - old_id = old[0] - old_name = old[1] - new_id = new[0] - new_name = new[1] - if old_id: - self.client.remove_lun_from_cache(lun_id, old_id) - if new_id: - self.client.add_lun_to_cache(lun_id, new_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) successfully.", - {'lun_id': lun_id, - 'old_id': old_id, "old_name": old_name, - 'new_id': new_id, "new_name": new_name}) - - if change_opts.get('policy'): - old_policy, new_policy = change_opts['policy'] - self.client.change_lun_smarttier(lun_id, new_policy) - LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " - "%(old_policy)s to %(new_policy)s success.", - {'lun_id': lun_id, - 'old_policy': old_policy, - 'new_policy': new_policy}) - - if change_opts.get('qos'): - old_qos, new_qos = change_opts['qos'] - old_qos_id = old_qos[0] - old_qos_value = old_qos[1] - if old_qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(old_qos_id, lun_id) - if new_qos: - smart_qos = smartx.SmartQos(self.client) - smart_qos.add(new_qos, lun_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " - "%(old_qos_value)s to %(new_qos)s success.", - {'lun_id': lun_id, - 'old_qos_value': old_qos_value, - 'new_qos': new_qos}) - - def get_lun_specs(self, lun_id): - lun_opts = { - 'policy': None, - 'partitionid': None, - 'cacheid': None, - 'LUNType': None, - } - - lun_info = self.client.get_lun_info(lun_id) - lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) - if lun_info.get('DATATRANSFERPOLICY'): - lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] - if lun_info.get('SMARTCACHEPARTITIONID'): - lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] - if lun_info.get('CACHEPARTITIONID'): - lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] - - return lun_opts - - def _check_capability_support(self, new_opts, new_type): - new_cache_name = new_opts['cachename'] - if new_cache_name: - if not self.support_func.get('smartcache'): - msg = (_( - "Can't support cache on the array, cache name is: " - "%(name)s.") % {'name': new_cache_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_partition_name = new_opts['partitionname'] - if new_partition_name: - if not self.support_func.get('smartpartition'): - msg = (_( - "Can't support partition on the array, partition name is: " - "%(name)s.") % {'name': new_partition_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if new_opts['policy']: - if (not self.support_func.get('smarttier') - and new_opts['policy'] != '0'): - msg = (_("Can't support tier on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - if not self.support_func.get('QoS_support'): - if new_qos: - msg = (_("Can't support qos on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _check_needed_changes(self, lun_id, old_opts, new_opts, - change_opts, new_type): - new_cache_id = None - new_cache_name = new_opts['cachename'] - if new_cache_name: - if self.support_func.get('smartcache'): - new_cache_id = self.client.get_cache_id_by_name( - new_cache_name) - if new_cache_id is None: - msg = (_( - "Can't find cache name on the array, cache name is: " - "%(name)s.") % {'name': new_cache_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_partition_id = None - new_partition_name = new_opts['partitionname'] - if new_partition_name: - if self.support_func.get('smartpartition'): - new_partition_id = self.client.get_partition_id_by_name( - new_partition_name) - if new_partition_id is None: - msg = (_( - "Can't find partition name on the array, partition name " - "is: %(name)s.") % {'name': new_partition_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # smarttier - if old_opts['policy'] != new_opts['policy']: - if not (old_opts['policy'] == '--' - and new_opts['policy'] is None): - change_opts['policy'] = (old_opts['policy'], - new_opts['policy']) - - # smartcache - old_cache_id = old_opts['cacheid'] - if old_cache_id == '--': - old_cache_id = None - if old_cache_id != new_cache_id: - old_cache_name = None - if self.support_func.get('smartcache'): - if old_cache_id: - cache_info = self.client.get_cache_info_by_id( - old_cache_id) - old_cache_name = cache_info['NAME'] - change_opts['cacheid'] = ([old_cache_id, old_cache_name], - [new_cache_id, new_cache_name]) - - # smartpartition - old_partition_id = old_opts['partitionid'] - if old_partition_id == '--': - old_partition_id = None - if old_partition_id != new_partition_id: - old_partition_name = None - if self.support_func.get('smartpartition'): - if old_partition_id: - partition_info = self.client.get_partition_info_by_id( - old_partition_id) - old_partition_name = partition_info['NAME'] - - change_opts['partitionid'] = ([old_partition_id, - old_partition_name], - [new_partition_id, - new_partition_name]) - - # smartqos - new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - if not self.support_func.get('QoS_support'): - if new_qos: - msg = (_("Can't support qos on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - old_qos_id = self.client.get_qosid_by_lunid(lun_id) - old_qos = self._get_qos_specs_from_array(old_qos_id) - if old_qos != new_qos: - change_opts['qos'] = ([old_qos_id, old_qos], new_qos) - - return change_opts - - def determine_changes_when_retype(self, volume, new_type, host): - migration = False - change_opts = { - 'policy': None, - 'partitionid': None, - 'cacheid': None, - 'qos': None, - 'host': None, - 'LUNType': None, - 'replication_enabled': None, - 'replication_type': None, - } - - lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume) - old_opts = self.get_lun_specs(lun_id) - - new_specs = new_type['extra_specs'] - new_opts = self._get_volume_params_from_specs(new_specs) - - if 'LUNType' not in new_opts: - new_opts['LUNType'] = self.configuration.lun_type - - if volume.host != host['host']: - migration = True - change_opts['host'] = (volume.host, host['host']) - if old_opts['LUNType'] != new_opts['LUNType']: - migration = True - change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) - - volume_type = self._get_volume_type(volume) - volume_opts = self._get_volume_params(volume_type) - if (volume_opts['replication_enabled'] == 'true' - or new_opts['replication_enabled'] == 'true'): - # If replication_enabled changes, - # then replication_type in change_opts will be set. - change_opts['replication_enabled'] = ( - volume_opts['replication_enabled'], - new_opts['replication_enabled']) - - change_opts['replication_type'] = (volume_opts['replication_type'], - new_opts['replication_type']) - - change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, - change_opts, new_type) - - LOG.debug("Determine changes when retype. Migration: " - "%(migration)s, change_opts: %(change_opts)s.", - {'migration': migration, 'change_opts': change_opts}) - return migration, change_opts, lun_id - - def _get_qos_specs_from_array(self, qos_id): - qos = {} - qos_info = {} - if qos_id: - qos_info = self.client.get_qos_info(qos_id) - - for key, value in qos_info.items(): - key = key.upper() - if key in constants.QOS_KEYS: - if key == 'LATENCY' and value == '0': - continue - else: - qos[key] = value - return qos - - def create_export(self, context, volume, connector): - """Export a volume.""" - pass - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - def create_export_snapshot(self, context, snapshot, connector): - """Export a snapshot.""" - pass - - def remove_export_snapshot(self, context, snapshot): - """Remove an export for a snapshot.""" - pass - - def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): - metadata = huawei_utils.get_volume_metadata(volume) - copyspeed = metadata.get('copyspeed') - luncopy_id = self.client.create_luncopy(copy_name, - src_lun, - tgt_lun, - copyspeed) - wait_interval = self.configuration.lun_copy_wait_interval - - try: - self.client.start_luncopy(luncopy_id) - - def _luncopy_complete(): - luncopy_info = self.client.get_luncopy_info(luncopy_id) - if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: - # luncopy_info['status'] means for the running status of - # the luncopy. If luncopy_info['status'] is equal to '40', - # this luncopy is completely ready. - return True - elif luncopy_info['state'] != constants.STATUS_HEALTH: - # luncopy_info['state'] means for the healthy status of the - # luncopy. If luncopy_info['state'] is not equal to '1', - # this means that an error occurred during the LUNcopy - # operation and we should abort it. - err_msg = (_( - 'An error occurred during the LUNcopy operation. ' - 'LUNcopy name: %(luncopyname)s. ' - 'LUNcopy status: %(luncopystatus)s. ' - 'LUNcopy state: %(luncopystate)s.') - % {'luncopyname': luncopy_id, - 'luncopystatus': luncopy_info['status'], - 'luncopystate': luncopy_info['state']},) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - huawei_utils.wait_for_condition(_luncopy_complete, - wait_interval, - self.configuration.lun_timeout) - - except Exception: - with excutils.save_and_reraise_exception(): - self.client.delete_luncopy(luncopy_id) - self.delete_volume(volume) - - self.client.delete_luncopy(luncopy_id) - - def _check_lun_valid_for_manage(self, lun_info, external_ref): - lun_id = lun_info.get('ID') - lun_name = lun_info.get('NAME') - - # Check whether the LUN is already in LUN group. - if lun_info.get('ISADD2LUNGROUP') == 'true': - msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " - "group.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN is Normal. - if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: - msg = _("Can't import LUN %s to Cinder. LUN status is not " - "normal.") % lun_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a HyperMetroPair. - if self.support_func.get('hypermetro'): - try: - hypermetro_pairs = self.client.get_hypermetro_pairs() - except exception.VolumeBackendAPIException: - hypermetro_pairs = [] - LOG.debug("Can't get hypermetro info, pass the check.") - - for pair in hypermetro_pairs: - if pair.get('LOCALOBJID') == lun_id: - msg = (_("Can't import LUN %s to Cinder. Already exists " - "in a HyperMetroPair.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a SplitMirror. - if self.support_func.get('splitmirror'): - try: - split_mirrors = self.client.get_split_mirrors() - except exception.VolumeBackendAPIException as ex: - if re.search('License is unavailable', ex.msg): - # Can't check whether the LUN has SplitMirror with it, - # just pass the check and log it. - split_mirrors = [] - LOG.warning('No license for SplitMirror.') - else: - msg = _("Failed to get SplitMirror.") - raise exception.VolumeBackendAPIException(data=msg) - - for mirror in split_mirrors: - try: - target_luns = self.client.get_target_luns(mirror.get('ID')) - except exception.VolumeBackendAPIException: - msg = _("Failed to get target LUN of SplitMirror.") - raise exception.VolumeBackendAPIException(data=msg) - - if ((mirror.get('PRILUNID') == lun_id) - or (lun_id in target_luns)): - msg = (_("Can't import LUN %s to Cinder. Already exists " - "in a SplitMirror.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a migration task. - try: - migration_tasks = self.client.get_migration_task() - except exception.VolumeBackendAPIException as ex: - if re.search('License is unavailable', ex.msg): - # Can't check whether the LUN has migration task with it, - # just pass the check and log it. - migration_tasks = [] - LOG.warning('No license for migration.') - else: - msg = _("Failed to get migration task.") - raise exception.VolumeBackendAPIException(data=msg) - - for migration in migration_tasks: - if lun_id in (migration.get('PARENTID'), - migration.get('TARGETLUNID')): - msg = (_("Can't import LUN %s to Cinder. Already exists in a " - "migration task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a LUN copy task. - if self.support_func.get('luncopy'): - lun_copy = lun_info.get('LUNCOPYIDS') - if lun_copy and lun_copy[1:-1]: - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a LUN copy task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a remote replication task. - rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') - if rmt_replication and rmt_replication[1:-1]: - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a remote replication task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a LUN mirror. - if self.client.is_lun_in_mirror(lun_name): - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a LUN mirror.") % lun_name) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - def manage_existing(self, volume, external_ref): - """Manage an existing volume on the backend storage.""" - # Check whether the LUN is belonged to the specified pool. - pool = volume_utils.extract_host(volume.host, 'pool') - LOG.debug("Pool specified is: %s.", pool) - lun_info = self._get_lun_info_by_ref(external_ref) - lun_id = lun_info.get('ID') - description = lun_info.get('DESCRIPTION', '') - if len(description) <= ( - constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): - description = volume.name + ' ' + description - - lun_pool = lun_info.get('PARENTNAME') - LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", - {"lun": lun_id, "pool": lun_pool}) - if pool != lun_pool: - msg = (_("The specified LUN does not belong to the given " - "pool: %s.") % pool) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check other stuffs to determine whether this LUN can be imported. - self._check_lun_valid_for_manage(lun_info, external_ref) - type_id = volume.volume_type_id - new_opts = None - if type_id: - # Handle volume type if specified. - old_opts = self.get_lun_specs(lun_id) - volume_type = volume_types.get_volume_type(None, type_id) - new_specs = volume_type.get('extra_specs') - new_opts = self._get_volume_params_from_specs(new_specs) - if ('LUNType' in new_opts and - old_opts['LUNType'] != new_opts['LUNType']): - msg = (_("Can't import LUN %(lun_id)s to Cinder. " - "LUN type mismatched.") % lun_id) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - if volume_type: - self._check_capability_support(new_opts, volume_type) - - change_opts = {'policy': None, 'partitionid': None, - 'cacheid': None, 'qos': None} - - change_opts = self._check_needed_changes(lun_id, old_opts, - new_opts, change_opts, - volume_type) - self.modify_lun(lun_id, change_opts) - - # Rename the LUN to make it manageable for Cinder. - new_name = huawei_utils.encode_name(volume.id) - LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", - {'old_name': lun_info.get('NAME'), - 'new_name': new_name}) - self.client.rename_lun(lun_id, new_name, description) - - location = huawei_utils.to_string(huawei_lun_id=lun_id, - huawei_lun_wwn=lun_info['WWN']) - model_update = {'provider_location': location} - - if new_opts and new_opts.get('replication_enabled'): - LOG.debug("Manage volume need to create replication.") - try: - lun_info = self.client.get_lun_info(lun_id) - replica_info = self.replica.create_replica( - lun_info, new_opts.get('replication_type')) - model_update.update(replica_info) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - LOG.exception("Manage exist volume failed.") - - return model_update - - def _get_lun_info_by_ref(self, external_ref): - LOG.debug("Get external_ref: %s", external_ref) - name = external_ref.get('source-name') - id = external_ref.get('source-id') - if not (name or id): - msg = _('Must specify source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - lun_id = id or self.client.get_lun_id_by_name(name) - if not lun_id: - msg = _("Can't find LUN on the array, please check the " - "source-name or source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - lun_info = self.client.get_lun_info(lun_id) - return lun_info - - def unmanage(self, volume): - """Export Huawei volume from Cinder.""" - LOG.debug("Unmanage volume: %s.", volume.id) - - def manage_existing_get_size(self, volume, external_ref): - """Get the size of the existing volume.""" - lun_info = self._get_lun_info_by_ref(external_ref) - size = int(math.ceil(lun_info.get('CAPACITY') / - constants.CAPACITY_UNIT)) - return size - - def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): - snapshot_id = snapshot_info.get('ID') - - # Check whether the snapshot is normal. - if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: - msg = _("Can't import snapshot %s to Cinder. " - "Snapshot status is not normal" - " or running status is not online.") % snapshot_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': - msg = _("Can't import snapshot %s to Cinder. " - "Snapshot is exposed to initiator.") % snapshot_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - def _get_snapshot_info_by_ref(self, external_ref): - LOG.debug("Get snapshot external_ref: %s.", external_ref) - name = external_ref.get('source-name') - id = external_ref.get('source-id') - if not (name or id): - msg = _('Must specify snapshot source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - snapshot_id = id or self.client.get_snapshot_id_by_name(name) - if not snapshot_id: - msg = _("Can't find snapshot on array, please check the " - "source-name or source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - snapshot_info = self.client.get_snapshot_info(snapshot_id) - return snapshot_info - - def manage_existing_snapshot(self, snapshot, existing_ref): - snapshot_info = self._get_snapshot_info_by_ref(existing_ref) - snapshot_id = snapshot_info.get('ID') - - parent_lun_id, lun_wwn = huawei_utils.get_volume_lun_id( - self.client, snapshot.volume) - if parent_lun_id != snapshot_info.get('PARENTID'): - msg = (_("Can't import snapshot %s to Cinder. " - "Snapshot doesn't belong to volume."), snapshot_id) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - # Check whether this snapshot can be imported. - self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) - - # Rename the snapshot to make it manageable for Cinder. - description = snapshot.id - snapshot_name = huawei_utils.encode_name(snapshot.id) - self.client.rename_snapshot(snapshot_id, snapshot_name, description) - if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: - self.client.activate_snapshot(snapshot_id) - - LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", - {'old_name': snapshot_info.get('NAME'), - 'new_name': snapshot_name}) - - location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id) - return {'provider_location': location} - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Get the size of the existing snapshot.""" - snapshot_info = self._get_snapshot_info_by_ref(existing_ref) - size = int(math.ceil(snapshot_info.get('USERCAPACITY') / - constants.CAPACITY_UNIT)) - return size - - def unmanage_snapshot(self, snapshot): - """Unmanage the specified snapshot from Cinder management.""" - LOG.debug("Unmanage snapshot: %s.", snapshot.id) - - def remove_host_with_check(self, host_id): - wwns_in_host = ( - self.client.get_host_fc_initiators(host_id)) - iqns_in_host = ( - self.client.get_host_iscsi_initiators(host_id)) - if not (wwns_in_host or iqns_in_host or - self.client.is_host_associated_to_hostgroup(host_id)): - self.client.remove_host(host_id) - - def _get_group_type(self, group): - opts = [] - for vol_type in group.volume_types: - specs = vol_type.extra_specs - opts.append(self._get_volume_params_from_specs(specs)) - - return opts - - def _check_group_type_support(self, opts, vol_type): - if not opts: - return False - - for opt in opts: - if opt.get(vol_type) == 'true': - return True - - return False - - def _get_group_type_value(self, opts, vol_type): - if not opts: - return - - for opt in opts: - if vol_type in opt: - return opt[vol_type] - - def create_group(self, context, group): - """Creates a group.""" - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.AVAILABLE} - opts = self._get_group_type(group) - - if self._check_group_type_support(opts, 'hypermetro'): - if not self.check_func_support("HyperMetro_ConsistentGroup"): - msg = _("Can't create consistency group, array does not " - "support hypermetro consistentgroup, " - "group id: %(group_id)s." - ) % {"group_id": group.id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - metro.create_consistencygroup(group) - return model_update - - return model_update - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = self.create_group(context, group) - volumes_model_update = [] - delete_snapshots = False - - if not snapshots and source_vols: - snapshots = [] - for src_vol in source_vols: - vol_kwargs = { - 'id': src_vol.id, - 'provider_location': src_vol.provider_location, - } - snapshot_kwargs = {'id': six.text_type(uuid.uuid4()), - 'volume': objects.Volume(**vol_kwargs)} - snapshot = objects.Snapshot(**snapshot_kwargs) - snapshots.append(snapshot) - - snapshots_model_update = self._create_group_snapshot(snapshots) - for i, model in enumerate(snapshots_model_update): - snapshot = snapshots[i] - snapshot.provider_location = model['provider_location'] - - delete_snapshots = True - - if snapshots: - for i, vol in enumerate(volumes): - snapshot = snapshots[i] - vol_model_update = self.create_volume_from_snapshot( - vol, snapshot) - vol_model_update.update({'id': vol.id}) - volumes_model_update.append(vol_model_update) - - if delete_snapshots: - self._delete_group_snapshot(snapshots) - - return model_update, volumes_model_update - - def delete_group(self, context, group, volumes): - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - opts = self._get_group_type(group) - model_update = {'status': fields.GroupStatus.DELETED} - volumes_model_update = [] - - if self._check_group_type_support(opts, 'hypermetro'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - metro.delete_consistencygroup(context, group, volumes) - - for volume in volumes: - volume_model_update = {'id': volume.id} - try: - self.delete_volume(volume) - except Exception: - LOG.exception('Delete volume %s failed.', volume) - volume_model_update.update({'status': 'error_deleting'}) - else: - volume_model_update.update({'status': 'deleted'}) - - volumes_model_update.append(volume_model_update) - - return model_update, volumes_model_update - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.AVAILABLE} - opts = self._get_group_type(group) - if self._check_group_type_support(opts, 'hypermetro'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - metro.update_consistencygroup(context, group, - add_volumes, - remove_volumes) - return model_update, None, None - - for volume in add_volumes: - self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - - return model_update, None, None - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Create group snapshot.""" - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - LOG.info('Create group snapshot for group: %(group_id)s', - {'group_id': group_snapshot.group_id}) - - snapshots_model_update = self._create_group_snapshot(snapshots) - model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} - return model_update, snapshots_model_update - - def _create_group_snapshot(self, snapshots): - snapshots_model_update = [] - added_snapshots_info = [] - - try: - for snapshot in snapshots: - snapshot_id = self._create_snapshot_base(snapshot) - info = self.client.get_snapshot_info(snapshot_id) - location = huawei_utils.to_string( - huawei_snapshot_id=info['ID'], - huawei_snapshot_wwn=info['WWN']) - snapshot_model_update = { - 'id': snapshot.id, - 'status': fields.SnapshotStatus.AVAILABLE, - 'provider_location': location, - } - snapshots_model_update.append(snapshot_model_update) - added_snapshots_info.append(info) - except Exception: - with excutils.save_and_reraise_exception(): - for added_snapshot in added_snapshots_info: - self.client.delete_snapshot(added_snapshot['ID']) - - snapshot_ids = [added_snapshot['ID'] - for added_snapshot in added_snapshots_info] - try: - self.client.activate_snapshot(snapshot_ids) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Active group snapshots %s failed.", snapshot_ids) - for snapshot_id in snapshot_ids: - self.client.delete_snapshot(snapshot_id) - - return snapshots_model_update - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Delete group snapshot.""" - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - LOG.info('Delete group snapshot %(snap_id)s for group: ' - '%(group_id)s', - {'snap_id': group_snapshot.id, - 'group_id': group_snapshot.group_id}) - - try: - snapshots_model_update = self._delete_group_snapshot(snapshots) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Delete group snapshots failed. " - "Group snapshot id: %s", group_snapshot.id) - - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - return model_update, snapshots_model_update - - def _delete_group_snapshot(self, snapshots): - snapshots_model_update = [] - for snapshot in snapshots: - self.delete_snapshot(snapshot) - snapshot_model_update = { - 'id': snapshot.id, - 'status': fields.SnapshotStatus.DELETED - } - snapshots_model_update.append(snapshot_model_update) - - return snapshots_model_update - - def _classify_volume(self, volumes): - normal_volumes = [] - replica_volumes = [] - - for v in volumes: - volume_type = self._get_volume_type(v) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - replica_volumes.append(v) - else: - normal_volumes.append(v) - - return normal_volumes, replica_volumes - - def _failback_normal_volumes(self, volumes): - volumes_update = [] - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - metadata = huawei_utils.get_volume_metadata(v) - old_status = 'available' - if 'old_status' in metadata: - old_status = metadata.pop('old_status') - v_update['updates'] = {'status': old_status, - 'metadata': metadata} - volumes_update.append(v_update) - - return volumes_update - - def _failback(self, volumes): - if self.active_backend_id in ('', None): - return 'default', [] - - normal_volumes, replica_volumes = self._classify_volume(volumes) - volumes_update = [] - - replica_volumes_update = self.replica.failback(replica_volumes) - volumes_update.extend(replica_volumes_update) - - normal_volumes_update = self._failback_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) - - self.active_backend_id = "" - secondary_id = 'default' - - # Switch array connection. - self.client, self.replica_client = self.replica_client, self.client - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - return secondary_id, volumes_update - - def _failover_normal_volumes(self, volumes): - volumes_update = [] - - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - metadata = huawei_utils.get_volume_metadata(v) - metadata.update({'old_status': v.status}) - v_update['updates'] = {'status': 'error', - 'metadata': metadata} - volumes_update.append(v_update) - - return volumes_update - - def _failover(self, volumes): - if self.active_backend_id not in ('', None): - return self.replica_dev_conf['backend_id'], [] - - normal_volumes, replica_volumes = self._classify_volume(volumes) - volumes_update = [] - - replica_volumes_update = self.replica.failover(replica_volumes) - volumes_update.extend(replica_volumes_update) - - normal_volumes_update = self._failover_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) - - self.active_backend_id = self.replica_dev_conf['backend_id'] - secondary_id = self.active_backend_id - - # Switch array connection. - self.client, self.replica_client = self.replica_client, self.client - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - return secondary_id, volumes_update - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover all volumes to secondary.""" - if secondary_id == 'default': - secondary_id, volumes_update = self._failback(volumes) - elif (secondary_id == self.replica_dev_conf['backend_id'] - or secondary_id is None): - secondary_id, volumes_update = self._failover(volumes) - else: - msg = _("Invalid secondary id %s.") % secondary_id - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return secondary_id, volumes_update, [] - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Map a snapshot to a host and return target iSCSI information.""" - # From the volume structure. - volume = Volume(id=snapshot.id, - provider_location=snapshot.provider_location, - lun_type=constants.SNAPSHOT_TYPE, - metadata=None) - - return self.initialize_connection(volume, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Delete map between a snapshot and a host.""" - # From the volume structure. - volume = Volume(id=snapshot.id, - provider_location=snapshot.provider_location, - lun_type=constants.SNAPSHOT_TYPE, - metadata=None) - - return self.terminate_connection(volume, connector) - - def get_lun_id_and_type(self, volume): - if hasattr(volume, 'lun_type'): - metadata = huawei_utils.get_snapshot_metadata(volume) - lun_id = metadata['huawei_snapshot_id'] - lun_type = constants.SNAPSHOT_TYPE - else: - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - lun_type = constants.LUN_TYPE - - return lun_id, lun_type - @interface.volumedriver -class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): +class HuaweiISCSIDriver(common.HuaweiBaseDriver, driver.ISCSIDriver): """ISCSI driver for Huawei storage arrays. Version history: @@ -1980,12 +71,9 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): def get_volume_stats(self, refresh=False): """Get volume status.""" - data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.__class__.__name__ + data = self._get_volume_stats(refresh=False) data['storage_protocol'] = 'iSCSI' data['driver_version'] = self.VERSION - data['vendor_name'] = 'Huawei' return data @coordination.synchronized('huawei-mapping-{connector[host]}') @@ -2140,7 +228,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): @interface.volumedriver -class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): +class HuaweiFCDriver(common.HuaweiBaseDriver, driver.FibreChannelDriver): """FC driver for Huawei OceanStor storage arrays. Version history: @@ -2178,12 +266,9 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): def get_volume_stats(self, refresh=False): """Get volume status.""" - data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.__class__.__name__ + data = self._get_volume_stats(refresh=False) data['storage_protocol'] = 'FC' data['driver_version'] = self.VERSION - data['vendor_name'] = 'Huawei' return data @coordination.synchronized('huawei-mapping-{connector[host]}') diff --git a/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst b/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst index 333fa577d10..8b19ab94851 100644 --- a/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst +++ b/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst @@ -521,4 +521,4 @@ The following are the Huawei driver specific options that may be set in .. config-table:: :config-target: Huawei - cinder.volume.drivers.huawei.huawei_driver + cinder.volume.drivers.huawei.common