diff --git a/cinder/opts.py b/cinder/opts.py index 4a2ff85bdfd..cc59d64ac15 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -126,6 +126,8 @@ from cinder.volume.drivers.lenovo import lenovo_common as \ from cinder.volume.drivers import linstordrv as \ cinder_volume_drivers_linstordrv from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm +from cinder.volume.drivers.macrosan import driver as \ + cinder_volume_drivers_macrosan_driver from cinder.volume.drivers.netapp import options as \ cinder_volume_drivers_netapp_options from cinder.volume.drivers.nexenta import options as \ @@ -319,6 +321,7 @@ def list_opts(): cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, cinder_volume_drivers_linstordrv.linstor_opts, cinder_volume_drivers_lvm.volume_opts, + cinder_volume_drivers_macrosan_driver.config.macrosan_opts, cinder_volume_drivers_netapp_options.netapp_proxy_opts, cinder_volume_drivers_netapp_options.netapp_connection_opts, cinder_volume_drivers_netapp_options.netapp_transport_opts, diff --git a/cinder/tests/unit/test_macrosan_drivers.py b/cinder/tests/unit/test_macrosan_drivers.py new file mode 100644 index 00000000000..ea82986edf8 --- /dev/null +++ b/cinder/tests/unit/test_macrosan_drivers.py @@ -0,0 +1,722 @@ +# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for macrosan drivers.""" +import mock +import os +import socket + +from six.moves import UserDict + +from cinder import exception +from cinder import test +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.macrosan import devop_client +from cinder.volume.drivers.macrosan import driver +from cinder.volume import qos_specs +from cinder.volume import utils as volutils +from cinder.volume import volume_types + + +test_volume = ( + UserDict({'name': 'volume-728ec287-bf30-4d2d-98a8-7f1bed3f59ce', + 'volume_name': 'test', + 'id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce', + 'volume_id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'test', + 'display_description': 'test', + 'host': 'controller@macrosan#MacroSAN', + 'size': 10, + 'provider_location': + 'macrosan uuid:0x00b34201-025b0000-46b35ae7-b7deec47'})) + +test_volume.size = 10 +test_volume.volume_type_id = None +test_volume.volume_attachment = [] + +test_migrate_volume = { + 'name': 'volume-d42b436a-54cc-480a-916c-275b0258ef59', + 'size': 10, + 'volume_name': 'test', + 'id': 'd42b436a-54cc-480a-916c-275b0258ef59', + 'volume_id': 'd42b436a-54cc-480a-916c-275b0258ef59', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'test', + 'display_description': 'test', + 'volume_type_id': None, + '_name_id': None, + 'host': 'controller@macrosan#MacroSAN', + 'provider_location': + 'macrosan uuid:0x00b34201-00180000-9ac35425-9e288d9a'} + +test_snap = {'name': 'volume-728ec287-bf30-4d2d-98a8-7f1bed3f59ce', + 'size': 10, + 'volume_name': 'test', + 'id': 'aa2419a3-c144-46af-831b-e0d914d3957b', + 'volume_id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'test', + 'display_description': 'test volume', + 'volume_type_id': None, + 'provider_location': 'pointid: 1', + 'volume_size': 10, + 'volume': test_volume} + +test_connector = {'initiator': 'iqn.1993-08.org.debian:01:62027e12fbc', + 'wwpns': ['500b342001001805', '500b342001004605'], + 'wwnns': ['21000024ff2003ec', '21000024ff2003ed'], + 'host': 'controller' + } + +fake_fabric_mapping = { + 'switch1': { + 'target_port_wwn_list': ['500b342001001805', '500b342001004605'], + 'initiator_port_wwn_list': ['21000024ff2003ec', '21000024ff2003ed'] + } +} + +expected_iscsi_properties = {'target_discovered': False, + 'target_portal': '192.168.251.1:3260', + 'target_iqn': + 'iqn.2010-05.com.macrosan.target:controller', + 'target_lun': 0, + 'target_iqns': + ['iqn.2010-05.com.macrosan.target:controller', + 'iqn.2010-05.com.macrosan.target:controller'], + 'target_portals': + ['192.168.251.1:3260', '192.168.251.2:3260'], + 'target_luns': [0, 0], + 'volume_id': + '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' + } + +expected_initr_port_map_tgtexist = { + '21:00:00:24:ff:20:03:ec': [{'port_name': 'FC-Target-1:1:1', + 'wwn': '50:0b:34:20:01:00:18:05'}, + {'port_name': 'FC-Target-2:1:1', + 'wwn': '50:0b:34:20:01:00:46:05'}], + '21:00:00:24:ff:20:03:ed': [{'port_name': 'FC-Target-1:1:1', + 'wwn': '50:0b:34:20:01:00:18:05'}, + {'port_name': 'FC-Target-2:1:1', + 'wwn': '50:0b:34:20:01:00:46:05'}]} + +expected_initr_port_map_tgtnotexist = {'21:00:00:24:ff:20:03:ec': [], + '21:00:00:24:ff:20:03:ed': []} + +expected_fctgtexist_properties = {'target_lun': 0, + 'target_discovered': True, + 'target_wwn': + ['500b342001001805', '500b342001004605'], + 'volume_id': + '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' + } + + +class FakeMacroSANFCDriver(driver.MacroSANFCDriver): + """Fake MacroSAN Storage, Rewrite some methods of MacroSANFCDriver.""" + def do_setup(self): + self.client = FakeClient(self.sp1_ipaddr, self.sp2_ipaddr, + self.username + self.passwd) + self.fcsan_lookup_service = FCSanLookupService() + + @property + def _self_node_wwns(self): + return ['21000024ff2003ec', '21000024ff2003ed'] + + def _snapshot_name(self, snapshotid): + return "aa2419a3c14446af831be0d914d3957" + + def _get_client_name(self, host): + return 'devstack' + + +class FCSanLookupService(object): + def get_device_mapping_from_network(self, initiator_list, + target_list): + return fake_fabric_mapping + + +class DummyBrickGetConnector(object): + def connect_volume(self, fake_con_data): + return {'path': '/dev/mapper/3600b3429d72e349d93bad6597d0000df'} + + def disconnect_volume(self, fake_con_data, fake_device): + return None + + +class FakeMacroSANISCSIDriver(driver.MacroSANISCSIDriver): + """Fake MacroSAN Storage, Rewrite some methods of MacroSANISCSIDriver.""" + def do_setup(self): + self.client = FakeClient(self.sp1_ipaddr, self.sp2_ipaddr, + self.username + self.passwd) + self.device_uuid = '0x00b34201-028100eb-4922a092-1d54b755' + + @property + def _self_node_wwns(self): + return ["iqn.1993-08.org.debian:01:62027e12fbc"] + + def _snapshot_name(self, snapshotid): + return "aa2419a3c14446af831be0d914d3957" + + def _get_iscsi_ports(self, dev_client, host): + if self.client.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed.') + else: + return [{'ip': '192.168.251.1', 'port_name': 'iSCSI-Target-1:0:0', + 'port': 'eth-1:0:0', + 'target': 'iqn.2010-05.com.macrosan.target:controller'}, + {'ip': '192.168.251.2', 'port_name': 'iSCSI-Target-2:0:0', + 'port': 'eth-2:0:0', + 'target': 'iqn.2010-05.com.macrosan.target:controller'}] + + def _get_client_name(self, host): + return 'devstack' + + @utils.synchronized('MacroSAN-Attach', external=True) + def _attach_volume(self, context, volume, properties, remote=False): + return super(FakeMacroSANISCSIDriver, self)._attach_volume( + context, volume, properties, remote) + + @utils.synchronized('MacroSAN-Attach', external=True) + def _detach_volume(self, context, attach_info, volume, + properties, force=False, remote=False, + ignore_errors=True): + return super(FakeMacroSANISCSIDriver, self)._detach_volume( + context, attach_info, volume, properties, force, remote, + ignore_errors) + + +class FakeClient(devop_client.Client): + def __init__(self, sp1_ip, sp2_ip, secret_key): + self.cmd_fail = False + self.tgt_notexist = False + + def get_raid_list(self, pool): + return [{'name': 'RAID-1', 'free_cap': 1749}] + + def get_client(self, name): + return True + + def create_lun(self, name, owner, pool, raids, lun_mode, size, lun_params): + return True + + def get_pool_cap(self, pool): + return 1862, 1749, 0 + + def delete_lun(self, name): + return True + + def setup_snapshot_resource(self, name, res_size, raids): + pass + + def snapshot_resource_exists(self, name): + return True + + def create_snapshot_point(self, lun_name, snapshot_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed') + else: + return True + + def disable_snapshot(self, volume_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed') + else: + return True + + def delete_snapshot_resource(self, volume_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed') + else: + return True + + def snapshot_point_exists(self, lun_name, pointid): + return True + + def lun_exists(self, name): + return True + + def snapshot_enabled(self, lun_name): + return True + + def create_snapshot_view(self, view_name, lun_name, pointid): + if self.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed') + else: + return True + + def get_snapshot_pointid(self, lun_name, snapshot_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException(data='Command failed') + else: + return 1 + + def delete_snapshot_view(self, view_name): + return True + + def delete_snapshot_point(self, lun_name, pointid): + return True + + def copy_volume_from_view(self, lun_name, view_name): + return True + + def snapshot_copy_task_completed(self, lun_name): + return True + + def extend_lun(self, name, raids, size): + return True + + def initiator_exists(self, initr_wwn): + return True + + def get_device_uuid(self): + return '0x00b34201-025b0000-46b35ae7-b7deec47' + + def is_initiator_mapped_to_client(self, initr_wwn, client_name): + return True + + def unmap_lun_to_it(self, lun_name, initr_wwn, tgt_port_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException('Command failed.') + else: + return None + + def map_lun_to_it(self, lun_name, initr_wwn, tgt_port_name, lun_id=-1): + if self.cmd_fail: + raise exception.VolumeBackendAPIException('Command failed.') + else: + return None + + def map_target_to_initiator(self, tgt_port_name, initr_wwn): + return True + + def get_it_unused_id_list(self, it_type, initr_wwn, tgt_port_name): + if self.cmd_fail: + raise exception.VolumeBackendAPIException('Command failed.') + else: + return [i for i in range(511)] + + def enable_lun_qos(self, name, strategy): + if self.cmd_fail: + raise Exception() + else: + return None + + def get_fc_initr_mapped_ports(self, initr_wwns): + return {'21:00:00:24:ff:20:03:ec': + [{'wwn': '50:0b:34:20:01:00:18:05', + 'port_name': 'FC-Target-1:1:1'}, + {'wwn': '50:0b:34:20:01:00:46:05', + 'port_name': 'FC-Target-2:1:1'}], + '21:00:00:24:ff:20:03:ed': + [{'wwn': '50:0b:34:20:01:00:18:05', + 'port_name': 'FC-Target-1:1:1'}, + {'wwn': '50:0b:34:20:01:00:46:05', + 'port_name': 'FC-Target-2:1:1'}] + } + + def get_fc_ports(self): + if self.tgt_notexist: + return [{'sp': 1, 'refcnt': 0, + 'port_name': 'FC-Target-1:1:1', + 'initr': '', 'online': 0, + 'wwn': '50:0b:34:20:01:00:18:05', + 'port': 'FC-1:1:1'}, + {'sp': 2, 'refcnt': 0, + 'port_name': 'FC-Target-2:1:1', + 'initr': '', 'online': 0, + 'wwn': '50:0b:34:20:01:00:46:05', + 'port': 'FC-2:1:1'}, + ] + else: + return [{'sp': 1, 'refcnt': 0, + 'port_name': 'FC-Target-1:1:1', + 'initr': '', 'online': 1, + 'wwn': '50:0b:34:20:01:00:18:05', + 'port': 'FC-1:1:1'}, + {'sp': 2, 'refcnt': 0, + 'port_name': 'FC-Target-2:1:1', + 'initr': '', 'online': 1, + 'wwn': '50:0b:34:20:01:00:46:05', + 'port': 'FC-2:1:1'}, + ] + + def get_lun_uuid(self, lun_name): + return '0x00b34201-025b0000-46b35ae7-b7deec47' + + def get_lun_name(self, lun_uuid): + if lun_uuid == "0x00b34201-025b0000-46b35ae7-b7deec47": + return '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' + if lun_uuid == "0x00b34201-00180000-9ac35425-9e288d9a": + return 'd42b436a-54cc-480a-916c-275b0258ef59' + + def get_lun_name_from_rename_file(self, name): + return None + + def backup_lun_name_to_rename_file(self, cur_name, original_name): + return None + + def get_lun_id(self, tgt_name, lun_name, type='FC'): + return 0 + + def get_view_lun_id(self, tgt_name, view_name, type='FC'): + return 0 + + +class MacroSANISCSIDriverTestCase(test.TestCase): + def setUp(self): + super(MacroSANISCSIDriverTestCase, self).setUp() + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.san_ip = \ + "172.192.251.1, 172.192.251.2" + self.configuration.san_login = "openstack" + self.configuration.san_password = "passwd" + self.configuration.macrosan_sdas_ipaddrs = None + self.configuration.macrosan_replication_ipaddrs = None + self.configuration.san_thin_provision = False + self.configuration.macrosan_pool = 'Pool-1' + self.configuration.macrosan_thin_lun_extent_size = 8 + self.configuration.macrosan_thin_lun_low_watermark = 8 + self.configuration.macrosan_thin_lun_high_watermark = 40 + self.configuration.macrosan_force_unmap_itl = False + self.configuration.macrosan_snapshot_resource_ratio = 0.3 + self.configuration.macrosan_log_timing = True + self.configuration.macrosan_client = \ + ['devstack; decive1; "eth-1:0:0"; "eth-2:0:0"'] + self.configuration.macrosan_client_default = \ + "eth-1:0:0;eth-2:0:0" + self.driver = FakeMacroSANISCSIDriver(configuration=self.configuration) + self.driver.do_setup() + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_create_volume(self, mock_volume_type, mock_qos): + ret = self.driver.create_volume(test_volume) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_create_qos_volume(self, mock_volume_type, mock_qos): + test_volume.volume_type_id = 'a2ed23e0-76c4-426f-a574-a1327275e725' + ret = self.driver.create_volume(test_volume) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_delete_volume(self, mock_volume_type, mock_qos): + self.driver.delete_volume(test_volume) + + def test_create_snapshot(self): + self.driver.client.snappoid = True + ret = self.driver.create_snapshot(test_snap) + actual = ret['provider_location'] + self.assertEqual(test_snap['provider_location'], actual) + + def test_delete_snapshot(self): + self.driver.delete_snapshot(test_snap) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_volume_from_snapshot(self, mock_volume_type, mock_qos, + mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + ret = self.driver.create_volume_from_snapshot(test_volume, test_snap) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_cloned_volume(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.snappoid = True + ret = self.driver.create_cloned_volume(test_volume, test_volume) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_extend_volume(self, mock_volume_type, mock_qos): + self.driver.extend_volume(test_volume, 15) + + def test_update_migrated_volume(self): + expected = {'_name_id': + test_migrate_volume['id'], + 'provider_location': + test_migrate_volume['provider_location']} + ret = self.driver.update_migrated_volume("", test_volume, + test_migrate_volume) + self.assertEqual(expected, ret) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_initialize_connection(self, mock_volume_type, mock_qos): + ret = self.driver.initialize_connection(test_volume, test_connector) + self.assertEqual(expected_iscsi_properties, ret['data']) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_terminate_connection(self, mock_volume_type, mock_qos): + self.driver.terminate_connection(test_volume, test_connector) + + def test_get_raid_list(self): + expected = ["RAID-1"] + ret = self.driver.get_raid_list(20) + self.assertEqual(expected, ret) + + def test_get_volume_stats(self): + ret = self.driver.get_volume_stats(True) + expected = "iSCSI" + self.assertEqual(expected, ret['storage_protocol']) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_create_qos_volume_fail(self, mock_volume_type, mock_qos): + test_volume.volume_type_id = 'a2ed23e0-76c4-426f-a574-a1327275e725' + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, test_volume) + + def test_create_snapshot_fail(self): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, test_snap) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_volume_from_snapshot_fail(self, mock_volume_type, + mock_qos, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + test_volume, test_snap) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_cloned_volume_fail(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + test_volume, test_volume) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_initialize_connection_fail(self, mock_volume_type, mock_qos): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + test_volume, test_connector) + + @mock.patch.object(volume_types, 'get_volume_type', + return_value={'qos_specs_id': + '99f3d240-1b20-4b7b-9321-c6b8b86243ff', + 'extra_specs': {}}) + @mock.patch.object(qos_specs, 'get_qos_specs', + return_value={'specs': {'qos-strategy': 'QoS-1'}}) + def test_terminate_connection_fail(self, mock_volume_type, mock_qos): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + test_volume, test_connector) + + def test_get_raid_list_fail(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.get_raid_list, 2000) + + +class MacroSANFCDriverTestCase(test.TestCase): + def setUp(self): + super(MacroSANFCDriverTestCase, self).setUp() + self.configuration = mock.Mock(spec=conf.Configuration) + self.configuration.san_ip = \ + "172.192.251.1, 172.192.251.2" + self.configuration.san_login = "openstack" + self.configuration.san_password = "passwd" + self.configuration.macrosan_sdas_ipaddrs = None + self.configuration.macrosan_replication_ipaddrs = None + self.configuration.san_thin_provision = False + self.configuration.macrosan_pool = 'Pool-1' + self.configuration.macrosan_thin_lun_extent_size = 8 + self.configuration.macrosan_thin_lun_low_watermark = 8 + self.configuration.macrosan_thin_lun_high_watermark = 40 + self.configuration.macrosan_force_unmap_itl = False + self.configuration.macrosan_snapshot_resource_ratio = 0.3 + self.configuration.macrosan_log_timing = True + self.configuration.macrosan_host_name = 'devstack' + self.configuration.macrosan_fc_use_sp_port_nr = 1 + self.configuration.macrosan_fc_keep_mapped_ports = True + self.configuration.macrosan_host_name = 'devstack' + self.configuration.macrosan_client = \ + ['devstack; decive1; "eth-1:0:0"; "eth-2:0:0"'] + self.configuration.macrosan_client_default = \ + "eth-1:0:0;eth-2:0:0" + self.driver = FakeMacroSANFCDriver(configuration=self.configuration) + self.driver.do_setup() + + def test_get_initr_port_map_tgtnotexist(self): + self.driver.client.tgt_notexist = True + ret = self.driver._get_initr_port_map(self.driver.client, + test_connector['wwpns']) + self.assertEqual(expected_initr_port_map_tgtnotexist, ret) + + def test_get_initr_port_map_tgtexist(self): + ret = self.driver._get_initr_port_map(self.driver.client, + test_connector['wwpns']) + self.assertEqual(expected_initr_port_map_tgtexist, ret) + + def test_initialize_connection(self): + ret = self.driver.initialize_connection(test_volume, test_connector) + self.assertEqual(expected_fctgtexist_properties, ret['data']) + + def test_terminate_connection(self): + self.driver.terminate_connection(test_volume, test_connector) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_volume_from_snapshot(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + ret = self.driver.create_volume_from_snapshot(test_volume, test_snap) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_cloned_volume(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.snappoid = True + ret = self.driver.create_cloned_volume(test_volume, test_volume) + actual = ret['provider_location'] + self.assertEqual(test_volume['provider_location'], actual) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_volume_from_snapshot_fail(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + test_volume, test_snap) + + @mock.patch.object(socket, 'gethostname', return_value='controller') + @mock.patch.object(utils, 'brick_get_connector', + return_value=DummyBrickGetConnector()) + @mock.patch.object(volutils, 'copy_volume', return_value=None) + @mock.patch.object(os.path, 'realpath', return_value=None) + def test_create_cloned_volume_fail(self, mock_hostname, + mock_brick_get_connector, + mock_copy_volume, + mock_os_path): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + test_volume, test_volume) + + def test_initialize_connection_fail(self): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + test_volume, test_connector) + + def test_terminate_connection_fail(self): + self.driver.client.cmd_fail = True + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + test_volume, test_connector) diff --git a/cinder/volume/drivers/macrosan/__init__.py b/cinder/volume/drivers/macrosan/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/macrosan/config.py b/cinder/volume/drivers/macrosan/config.py new file mode 100644 index 00000000000..69ca770866e --- /dev/null +++ b/cinder/volume/drivers/macrosan/config.py @@ -0,0 +1,100 @@ +# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume Drivers Config Registration documents for MacroSAN SAN.""" + +from oslo_config import cfg + + +macrosan_opts = [ + # sdas login_info + cfg.ListOpt('macrosan_sdas_ipaddrs', + default=None, + help="MacroSAN sdas devices' ip addresses"), + cfg.StrOpt('macrosan_sdas_username', + default=None, + help=""), + cfg.StrOpt('macrosan_sdas_password', + default=None, + help="", + secret=True), + # replication login_info + cfg.ListOpt('macrosan_replication_ipaddrs', + default=None, + help="MacroSAN replication devices' ip addresses"), + cfg.StrOpt('macrosan_replication_username', + default=None, + help=""), + cfg.StrOpt('macrosan_replication_password', + default=None, + help="", + secret=True), + cfg.ListOpt('macrosan_replication_destination_ports', + default=None, + sample_default="eth-1:0/eth-1:1, eth-2:0/eth-2:1", + help="Slave device"), + # device_features + cfg.StrOpt('macrosan_pool', quotes=True, + default=None, + help='Pool to use for volume creation'), + cfg.IntOpt('macrosan_thin_lun_extent_size', + default=8, + help="Set the thin lun's extent size"), + cfg.IntOpt('macrosan_thin_lun_low_watermark', + default=5, + help="Set the thin lun's low watermark"), + cfg.IntOpt('macrosan_thin_lun_high_watermark', + default=20, + help="Set the thin lun's high watermark"), + cfg.BoolOpt('macrosan_force_unmap_itl', + default=True, + help="Force disconnect while deleting volume"), + cfg.FloatOpt('macrosan_snapshot_resource_ratio', + default=1.0, + help="Set snapshot's resource ratio"), + cfg.BoolOpt('macrosan_log_timing', + default=True, + help="Whether enable log timing"), + # fc connection + cfg.IntOpt('macrosan_fc_use_sp_port_nr', + default=1, + max=4, + help="The use_sp_port_nr parameter is the number of " + "online FC ports used by the single-ended memory " + "when the FC connection is established in the switch " + "non-all-pass mode. The maximum is 4"), + cfg.BoolOpt('macrosan_fc_keep_mapped_ports', + default=True, + help="In the case of an FC connection, the configuration " + "item associated with the port is maintained."), + # iscsi connection + cfg.ListOpt('macrosan_client', + default=None, + help="""Macrosan iscsi_clients list. + You can configure multiple clients. + You can configure it in this format: + (host; client_name; sp1_iscsi_port; sp2_iscsi_port), + (host; client_name; sp1_iscsi_port; sp2_iscsi_port) + Important warning, Client_name has the following requirements: + [a-zA-Z0-9.-_:], the maximum number of characters is 31 + E.g: + (controller1; decive1; eth-1:0; eth-2:0), + (controller2; decive2; eth-1:0/eth-1:1; eth-2:0/eth-2:1), + """), + cfg.StrOpt('macrosan_client_default', + default=None, + help="This is the default connection information for iscsi. " + "This default configuration is used " + "when no host related information is obtained.") +] diff --git a/cinder/volume/drivers/macrosan/devop_client.py b/cinder/volume/drivers/macrosan/devop_client.py new file mode 100644 index 00000000000..985bd3ecc13 --- /dev/null +++ b/cinder/volume/drivers/macrosan/devop_client.py @@ -0,0 +1,679 @@ +# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Base device operation on MacroSAN SAN.""" + + +import logging +from random import shuffle + +import requests + +from cinder import exception +from cinder.i18n import _ + + +LOG = logging.getLogger(__name__) + +context_request_id = None + + +class Client(object): + """Device Client to do operation.""" + + def __init__(self, sp1_ip, sp2_ip, secret_key): + """Initialize the client.""" + self.sp1_ip = sp1_ip + self.sp2_ip = sp2_ip + self.port = 12138 + self.choosed_ip = None + self.last_request_id = None + self.last_ip = None + self.timeout = 30 + self.SECRET_KEY = secret_key + self.url_prefix = '/api/v1' + + def conn_test(self): + iplist = [('sp1', self.sp1_ip), ('sp2', self.sp2_ip)] + shuffle(iplist) + ha = {} + for sp, ip in iplist: + try: + url = ('http://%s:%s%s/ha_status' % + (ip, str(self.port), self.url_prefix)) + header = {'Authorization': 'Bearer %s' % self.SECRET_KEY} + response = requests.get(url=url, + timeout=self.timeout, headers=header) + ha = self.response_processing(response) + if ha[sp] in ['single', 'double']: + LOG.debug('Heart Beating......%(ha)s ', {'ha': ha}) + return ip + except Exception: + pass + raise exception.VolumeBackendAPIException( + data=_('Connect to MacroSAN IPSAN Error, HA Status:%s') % str(ha)) + + def send_request(self, method='get', url='/', data=None): + header = {'Authorization': 'Bearer %s' % self.SECRET_KEY} + try: + ip = self.conn_test() + url = ('http://%s:%s%s%s' % + (ip, str(self.port), self.url_prefix, url)) + response = None + if method == 'get': + response = requests.get(url=url, params=data, + timeout=self.timeout, headers=header) + elif method == 'post': + response = requests.post(url=url, json=data, + timeout=self.timeout, headers=header) + elif method == 'put': + response = requests.put(url=url, json=data, + timeout=self.timeout, headers=header) + elif method == 'delete': + response = requests.delete(url=url, json=data, + timeout=self.timeout, + headers=header) + return self.response_processing(response) + except requests.exceptions.ConnectionError: + LOG.error('========== Unable to establish connection ' + 'with VolumeBackend %(url)s', {'url': url}) + + def response_processing(self, response): + if response.status_code != 200: + LOG.error('========== Command %(url)s execution error,' + 'response_conde: %(status)s', + {'url': response.url, 'status': response.status_code}) + raise exception.VolumeBackendAPIException(data=response.json()) + LOG.debug('The response is: %(response)s, %(text)s', + {'response': response, 'text': response.json()}) + return response.json() + + def get_ha_state(self): + """Get HA state.""" + return self.send_request(method='get', url='/ha_status') + + def lun_exists(self, name): + """Whether the lun exists.""" + data = { + 'attr': 'existence', + 'name': name + } + return self.send_request('get', '/lun', data=data) + + def snapshot_point_exists(self, lun_name, pointid): + """Whether the snapshot point exists.""" + data = { + 'attr': 'existence', + 'lun_name': lun_name, + 'pointid': pointid + } + return self.send_request(method='get', + url='/snapshot_point', data=data) + + def it_exists(self, initr_wwn, tgt_port_name): + """Whether the it exists.""" + data = { + 'attr': 'it', + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name + } + return self.send_request(method='get', url='/itl', data=data) + + def is_initiator_mapped_to_client(self, initr_wwn, client_name): + """Whether initiator is mapped to client.""" + data = { + 'initr_wwn': initr_wwn, + 'client_name': client_name, + 'attr': 'list' + } + return self.send_request(method='get', url='/initiator', data=data) + + def snapshot_resource_exists(self, lun_name): + """Whether the snapshot resource exists.""" + data = { + 'lun_name': lun_name + } + return self.send_request(method='get', + url='/snapshot_resource', data=data) + + def initiator_exists(self, initr_wwn): + """Whether the initiator exists.""" + data = { + 'attr': 'existence', + 'initr_wwn': initr_wwn, + } + return self.send_request(method='get', url='/initiator', data=data) + + def get_client(self, name): + """Get client info.""" + return self.send_request(method='get', + url='/client', data={'name': name}) + + def delete_lun(self, name): + """Delete a lun.""" + return self.send_request(method='delete', + url='/lun', data={'name': name}) + + def get_lun_sp(self, name): + """Get lun sp.""" + data = { + 'attr': 'lun_sp', + 'name': name + } + return self.send_request(method='get', url='/lun', data=data) + + def get_snapshot_resource_name(self, lun_name): + """Whether the snapshot resource exists.""" + return self.send_request(method='get', url='/snapshot_resource', + data={'lun_name': lun_name}) + + def rename_lun(self, old_name, new_name): + """Rename a lun.""" + return self.send_request(method='put', url='/lun', + data={'attr': 'name', 'old_name': old_name, + 'new_name': new_name}) + + def create_lun(self, name, owner, pool, raids, lun_mode, size, lun_params): + """Create a lun.""" + data = {'name': name, + 'owner': owner, + 'pool': pool, + 'raids': raids, + 'lun_mode': lun_mode, + 'size': size, + 'lun_params': lun_params} + return self.send_request(method='post', url='/lun', data=data) + + def get_raid_list(self, pool): + """Get a raid list.""" + return self.send_request(method='get', + url='/raid_list', data={'pool': pool}) + + def get_pool_cap(self, pool): + """Get pool capacity.""" + return self.send_request(method='get', + url='/pool', data={'pool': pool}) + + def get_lun_base_info(self, name): + data = {'attr': 'base_info', + 'name': name} + return self.send_request(method='get', url='/lun', data=data) + + def extend_lun(self, name, raids, size): + """Extend a lun.""" + data = { + 'attr': 'capicity', + 'name': name, + 'raids': raids, + 'size': size + } + return self.send_request(method='put', url='/lun', data=data) + + def enable_lun_qos(self, name, strategy): + """Enable lun qos.""" + data = { + 'attr': 'qos', + 'name': name, + 'strategy': strategy + } + return self.send_request(method='put', url='/lun', data=data) + + def localclone_exists(self, lun): + """Whether localclone lun exists.""" + return self.send_request(method='get', url='/local_clone', + data={'attr': 'existence', 'lun': lun}) + + def localclone_completed(self, lun): + """Whether localclone lun completed.""" + return self.send_request(method='get', url='/local_clone', + data={'attr': 'completed', 'lun': lun}) + + def start_localclone_lun(self, master, slave): + """start localclone lun.""" + return self.send_request(method='post', url='/local_clone', + data={'master': master, 'slave': slave}) + + def stop_localclone_lun(self, lun): + """stop localclone lun.""" + return self.send_request(method='delete', url='/local_clone', + data={'lun': lun}) + + def create_snapshot_resource(self, lun_name, raids, size): + """Create a snapshot resource.""" + data = { + 'lun_name': lun_name, + 'raids': raids, + 'size': size + } + return self.send_request(method='post', url='/snapshot_resource', + data=data) + + def enable_snapshot_resource_autoexpand(self, lun_name): + """Enable snapshot resource autoexpand.""" + data = { + 'attr': 'autoexpand', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/snapshot_resource', + data=data) + + def enable_snapshot(self, lun_name): + """Enable snapshot.""" + data = { + 'attr': 'enable', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/snapshot', data=data) + + def snapshot_enabled(self, lun_name): + """Weather enable snapshot""" + params = { + 'attr': 'enable', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/snapshot', data=params) + + def delete_snapshot_resource(self, lun_name): + """Delete a snapshot resource.""" + data = {'lun_name': lun_name} + return self.send_request(method='delete', url='/snapshot_resource', + data=data) + + def create_snapshot_point(self, lun_name, snapshot_name): + """Create a snapshot point.""" + data = { + 'lun_name': lun_name, + 'snapshot_name': snapshot_name + } + return self.send_request(method='post', url='/snapshot_point', + data=data) + + def get_snapshot_pointid(self, lun_name, snapshot_name): + """Get a snapshot pointid.""" + params = { + 'attr': 'point_id', + 'lun_name': lun_name, + 'snapshot_name': snapshot_name + } + return self.send_request(method='get', url='/snapshot_point', + data=params) + + def rename_snapshot_point(self, lun_name, pointid, name): + data = { + 'attr': 'name', + 'lun_name': lun_name, + 'pointid': pointid, + 'name': name + } + return self.send_request(method='put', url='/snapshot_point', + data=data) + + def disable_snapshot(self, lun_name): + """Disable snapshot.""" + data = { + 'attr': 'disable', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/snapshot', data=data) + + def delete_snapshot_point(self, lun_name, pointid): + """Delete a snapshot point.""" + data = { + 'lun_name': lun_name, + 'pointid': pointid + } + return self.send_request(method='delete', url='/snapshot_point', + data=data) + + def get_snapshot_point_num(self, lun_name): + """Get snapshot point number.""" + data = { + 'attr': 'number', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/snapshot_point', + data=data) + + def create_client(self, name): + """Create a client.""" + return self.send_request(method='post', url='/client', + data={'name': name}) + + def create_target(self, port_name, type='fc'): + """Create a target.""" + data = { + 'port_name': port_name, + 'type': type + } + return self.send_request(method='post', url='/target', data=data) + + def delete_target(self, tgt_name): + """Delete a target.""" + return self.send_request(method='delete', url='/target', + data={'tgt_name': tgt_name}) + + def create_initiator(self, initr_wwn, alias, type='fc'): + """Create an initiator.""" + data = { + 'initr_wwn': initr_wwn, + 'alias': alias, + 'type': type + } + return self.send_request(method='post', url='/initiator', data=data) + + def delete_initiator(self, initr_wwn): + """Delete an initiator.""" + return self.send_request(method='delete', url='/initiator', + data={'initr_wwn': initr_wwn}) + + def map_initiator_to_client(self, initr_wwn, client_name): + """Map initiator to client.""" + data = { + 'attr': 'mapinitiator', + 'initr_wwn': initr_wwn, + 'client_name': client_name + } + return self.send_request(method='put', url='/client', data=data) + + def unmap_initiator_from_client(self, initr_wwn, client_name): + """Unmap target from initiator.""" + data = { + 'attr': 'unmapinitiator', + 'initr_wwn': initr_wwn, + 'client_name': client_name + } + return self.send_request(method='put', url='/client', data=data) + + def map_target_to_initiator(self, tgt_port_name, initr_wwn): + """Map target to initiator.""" + data = { + 'attr': 'maptarget', + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name + } + return self.send_request(method='post', url='/itl', data=data) + + def unmap_target_from_initiator(self, tgt_port_name, initr_wwn): + """Unmap target from initiator.""" + data = { + 'attr': 'unmaptarget', + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name + } + return self.send_request(method='delete', url='/itl', data=data) + + def map_lun_to_it(self, lun_name, initr_wwn, tgt_port_name, lun_id=-1): + """Map lun to it.""" + data = { + 'attr': 'maplun', + 'lun_name': lun_name, + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name, + 'lun_id': lun_id + } + return self.send_request(method='post', url='/itl', data=data) + + def unmap_lun_to_it(self, lun_name, initr_wwn, tgt_port_name): + """Unmap lun to it.""" + data = { + 'attr': 'unmaplun', + 'lun_name': lun_name, + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name, + } + return self.send_request(method='delete', url='/itl', data=data) + + def has_initiators_mapped_any_lun(self, initr_wwns, type='fc'): + """Whether has initiators mapped any lun.""" + data = { + 'attr': 'itl', + 'initr_wwns': initr_wwns, + 'type': type + } + return self.send_request(method='get', url='/itl', data=data) + + def create_snapshot_view(self, view_name, lun_name, pointid): + """Create a snapshot view.""" + data = { + 'view_name': view_name, + 'lun_name': lun_name, + 'pointid': pointid + } + return self.send_request(method='post', url='/snapshot_view', + data=data) + + def delete_snapshot_view(self, view_name): + """Delete a snapshot view.""" + return self.send_request(method='delete', url='/snapshot_view', + data={'view_name': view_name}) + + def get_fc_initr_mapped_ports(self, initr_wwns): + """Get initiator mapped port.""" + data = { + 'attr': 'fc_initr_mapped_ports', + 'initr_wwns': initr_wwns + } + return self.send_request(method='get', url='/initiator', data=data) + + def get_fc_ports(self): + """Get FC ports.""" + data = { + 'attr': 'fc_ports', + } + return self.send_request(method='get', url='/initiator', data=data) + + def get_iscsi_ports(self): + """Get iSCSI ports.""" + data = { + 'attr': 'iscsi_ports', + } + return self.send_request(method='get', url='/initiator', data=data) + + def get_lun_id(self, initr_wwn, tgt_port_name, lun_name): + """Get lun id.""" + data = { + 'attr': 'lun_id', + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name, + 'lun_name': lun_name + } + return self.send_request(method='get', url='/lun', data=data) + + def get_lun_uuid(self, lun_name): + """Get lun uuid.""" + data = { + 'attr': 'lun_uuid', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/lun', data=data) + + def get_lun_name(self, lun_uuid): + """Get lun name.""" + data = { + 'attr': 'lun_name', + 'lun_uuid': lun_uuid + } + return self.send_request(method='get', url='/lun', data=data) + + def copy_volume_from_view(self, lun_name, view_name): + """Copy volume from view.""" + data = { + 'attr': 'from_view', + 'lun_name': lun_name, + 'view_name': view_name + } + return self.send_request(method='post', url='/copy_volume', data=data) + + def snapshot_copy_task_completed(self, lun_name): + data = { + 'attr': 'snapshot_copy_task_completed', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/copy_volume', data=data) + + def copy_volume_from_volume(self, lun_name, src_lun_name): + """Copy volume from volume.""" + data = { + 'attr': 'from_volume', + 'lun_name': lun_name, + 'src_lun_name': src_lun_name + } + return self.send_request(method='post', url='/copy_volume', data=data) + + def query_bcopy_task(self, task_id): + """Query bcopy task.""" + data = { + 'attr': 'bcopy_task', + 'task_id': task_id + } + return self.send_request(method='get', url='/copy_volume', data=data) + + def get_it_unused_id_list(self, it_type, initr_wwn, tgt_port_name): + data = { + 'attr': 'it_unused_id_list', + 'it_type': it_type, + 'initr_wwn': initr_wwn, + 'tgt_port_name': tgt_port_name + } + return self.send_request(method='get', url='/initiator', data=data) + + def backup_lun_name_to_rename_file(self, cur_name, original_name): + """Backup lun name to rename file.""" + data = { + 'cur_name': cur_name, + 'original_name': original_name + } + return self.send_request(method='post', url='/rename_file', data=data) + + def get_lun_name_from_rename_file(self, name): + """Get lun name from rename file.""" + data = {'name': name} + return self.send_request(method='get', url='/rename_file', data=data) + + def create_dalun(self, lun_name): + data = {'lun_name': lun_name} + return self.send_request(method='post', url='/dalun', data=data) + + def delete_dalun(self, lun_name): + data = {'lun_name': lun_name} + return self.send_request(method='delete', url='/dalun', data=data) + + def dalun_exists(self, lun_name): + data = { + 'attr': 'existence', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/dalun', data=data) + + def suspend_dalun(self, lun_name): + data = { + 'attr': 'suspend', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/dalun', data=data) + + def resume_dalun(self, lun_name): + data = { + 'attr': 'resume', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/dalun', data=data) + + def setup_snapshot_resource(self, volume_name, size, raids): + if not self.snapshot_resource_exists(volume_name): + self.create_snapshot_resource(volume_name, raids, size) + if self.enable_snapshot_resource_autoexpand( + volume_name).status_code != 200: + LOG.warning('========== Enable snapshot resource auto ' + 'expand for volume: %s error', volume_name) + + def get_raid_list_to_create_lun(self, pool, size): + raids = self.get_raid_list(pool) + free = sum(raid['free_cap'] for raid in raids) + if size > free: + raise exception.VolumeBackendAPIException( + data=_('Pool has not enough free capacity')) + + raids = sorted(raids, key=lambda x: x['free_cap'], reverse=True) + + selected = [] + cap = 0 + for raid in raids: + if raid['free_cap']: + cap += raid['free_cap'] + selected.append(raid['name']) + if cap >= size: + break + return selected + + def get_port_ipaddr(self, port): + data = { + 'attr': 'port_ipaddr', + 'port': port, + } + return self.send_request(method='get', url='/itl', data=data) + + def enable_replication(self, lun_name, sp1, sp2): + data = { + 'attr': 'enable', + 'lun_name': lun_name, + 'sp1': sp1, + 'sp2': sp2, + } + return self.send_request(method='put', url='/replication', data=data) + + def disable_replication(self, lun_name): + data = { + 'attr': 'disable', + 'lun_name': lun_name, + } + return self.send_request(method='put', url='/replication', data=data) + + def replication_enabled(self, lun_name): + data = { + 'attr': 'enabled', + 'lun_name': lun_name + } + return self.send_request(method='get', url='/replication', data=data) + + def startscan_replication(self, lun_name): + data = { + 'attr': 'startscan', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/replication', data=data) + + def stopscan_replication(self, lun_name): + data = { + 'attr': 'stopscan', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/replication', data=data) + + def pausereplicate(self, lun_name): + data = { + 'attr': 'pause', + 'lun_name': lun_name + } + return self.send_request(method='put', url='/replication', data=data) + + def get_device_uuid(self): + return self.send_request(method='get', url='/device') + + def get_lun_it(self, name): + data = { + 'attr': 'getitl', + 'name': name + } + return self.send_request(method='get', url='/itl', data=data) diff --git a/cinder/volume/drivers/macrosan/driver.py b/cinder/volume/drivers/macrosan/driver.py new file mode 100644 index 00000000000..675919f40a1 --- /dev/null +++ b/cinder/volume/drivers/macrosan/driver.py @@ -0,0 +1,1629 @@ +# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume Drivers for MacroSAN SAN.""" + +import base64 +from contextlib import contextmanager +import math +import re +import six +import socket +import time +import uuid + +from os_brick.initiator import connector as cn +from os_brick.initiator import linuxfc +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from oslo_utils import timeutils + +from cinder import context +from cinder.coordination import synchronized +from cinder import exception +from cinder.i18n import _ +from cinder import interface +from cinder import utils +from cinder.volume import configuration +from cinder.volume import driver +from cinder.volume.drivers.macrosan import config +from cinder.volume.drivers.macrosan import devop_client +from cinder.volume.drivers.san import san +from cinder.volume import qos_specs +from cinder.volume import utils as volume_utils +from cinder.volume import volume_types +from cinder.zonemanager import utils as fczm_utils + +version = '1.0.0' +lock_name = 'MacroSAN' + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.register_opts(config.macrosan_opts, group=configuration.SHARED_CONF_GROUP) + + +@contextmanager +def ignored(*exceptions): + try: + yield + except exceptions: + pass + + +def _timing(fn): + def __timing(*vargs, **kv): + start = time.time() + if timing_on: + LOG.info('========== start %s', fn.__name__) + + result = fn(*vargs, **kv) + + if timing_on: + end = time.time() + LOG.info('========== end %(fname)s, cost: %(cost).2f secs', + {'fname': fn.__name__, 'cost': end - start}) + return result + return __timing + + +def record_request_id(fn): + def _record_request_id(*vargs, **kv): + ctx = context.context.get_current() + devop_client.context_request_id = ctx.request_id + + return fn(*vargs, **kv) + return _record_request_id + + +def replication_synced(params): + return (params['replication_enabled'] and + params['replication_mode'] == 'sync') + + +def b64encode(s): + return base64.b64encode(six.b(s)).decode() + + +def b64decode(s): + return base64.b64decode(six.b(s)).decode() + + +class MacroSANBaseDriver(driver.VolumeDriver): + """Base driver for MacroSAN SAN.""" + + CI_WIKI_NAME = 'MacroSAN Volume CI' + + def __init__(self, *args, **kwargs): + """Initialize the driver.""" + super(MacroSANBaseDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(config.macrosan_opts) + self.configuration.append_config_values(san.san_opts) + self._stats = {} + self.use_multipath = True + self.owners = ['SP1', 'SP2'] + self.owner_idx = 0 + self.volume_backend_name = ( + self.configuration.safe_get('volume_backend_name') or 'MacroSAN') + + self.username = self.configuration.san_login + self.passwd = self.configuration.san_password + self.sp1_ipaddr, self.sp2_ipaddr = ( + self.configuration.san_ip.replace(' ', '').split(",")) + self.login_info = self.username + self.passwd + + if self.configuration.macrosan_sdas_ipaddrs: + self.sdas_username = self.configuration.macrosan_sdas_username + self.sdas_passwd = self.configuration.macrosan_sdas_password + self.sdas_sp1_ipaddr, self.sdas_sp2_ipaddr = ( + self.configuration.macrosan_sdas_ipaddrs) + self.sdas_sp1_ipaddr = ( + self.sdas_sp1_ipaddr.replace('/', ',').replace(' ', '')) + self.sdas_sp2_ipaddr = ( + self.sdas_sp2_ipaddr.replace('/', ',').replace(' ', '')) + self.sdas_login_info = self.sdas_username + self.sdas_passwd + + if self.configuration.macrosan_replication_ipaddrs: + self.rep_username = ( + self.configuration.macrosan_replication_username) + + self.rep_passwd = self.configuration.macrosan_replication_password + self.rep_sp1_ipaddr, self.rep_sp2_ipaddr = ( + self.configuration.macrosan_replication_ipaddrs) + self.rep_sp1_ipaddr = ( + self.rep_sp1_ipaddr.replace('/', ',').replace(' ', '')) + self.rep_sp2_ipaddr = ( + self.rep_sp2_ipaddr.replace('/', ',').replace(' ', '')) + self.replica_login_info = self.rep_username + self.rep_passwd + self.replication_params = { + 'destination': + {'sp1': self.configuration. + macrosan_replication_destination_ports[0], + 'sp2': self.configuration. + macrosan_replication_destination_ports[1]}} + + self.client = None + self.replica_client = None + self.sdas_client = None + self.storage_protocol = None + self.device_uuid = None + self.client_info = dict() + + self.lun_params = {} + self.lun_mode = self.configuration.san_thin_provision + if self.lun_mode: + self.lun_params = ( + {'extent-size': + self.configuration.macrosan_thin_lun_extent_size, + 'low-watermark': + self.configuration.macrosan_thin_lun_low_watermark, + 'high-watermark': + self.configuration.macrosan_thin_lun_high_watermark}) + self.pool = self.configuration.macrosan_pool + + self.force_unmap_itl_when_deleting = ( + self.configuration.macrosan_force_unmap_itl) + self.snapshot_resource_ratio = ( + self.configuration.macrosan_snapshot_resource_ratio) + global timing_on + timing_on = self.configuration.macrosan_log_timing + + self.initialize_iscsi_info() + + @property + def _self_node_wwns(self): + return [] + + def _size_str_to_int(self, size_in_g): + if int(size_in_g) == 0: + return 1 + return int(size_in_g) + + def _volume_name(self, volume): + try: + lun_uuid = re.search(r'macrosan uuid:(.+)', + volume['provider_location']).group(1) + return self.client.get_lun_name(lun_uuid) + except Exception: + return volume['id'] + + def _snapshot_name(self, snapshotid): + return snapshotid.replace('-', '')[:31] + + def initialize_iscsi_info(self): + sp1_port, sp2_port = \ + self.configuration.macrosan_client_default.split(';') + host = socket.gethostname() + self.client_info['default'] = {'client_name': host, + 'sp1_port': + sp1_port.replace(' ', ''), + 'sp2_port': + sp2_port.replace(' ', '')} + client_list = self.configuration.macrosan_client + if client_list: + for i in client_list: + client = i.strip('(').strip(')').split(";") + host, client_name, sp1_port, sp2_port = [j.strip() for j + in client] + self.client_info[host] = ( + {'client_name': client_name, + 'sp1_port': sp1_port.replace(' ', '').replace('/', ','), + 'sp2_port': sp2_port.replace(' ', '').replace('/', ',')}) + + def _get_client_name(self, host): + if host in self.client_info: + return self.client_info[host]['client_name'] + return self.client_info['default']['client_name'] + + @utils.synchronized('MacroSAN-Setup', external=True) + @record_request_id + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + LOG.debug('Enter in Macrosan do_setup.') + self.client = devop_client.Client(self.sp1_ipaddr, + self.sp2_ipaddr, + self.login_info) + + if self.configuration.macrosan_sdas_ipaddrs: + self.sdas_client = ( + devop_client.Client(self.sdas_sp1_ipaddr, + self.sdas_sp2_ipaddr, + self.sdas_login_info)) + + if self.configuration.macrosan_replication_ipaddrs: + self.replica_client = ( + devop_client.Client(self.rep_sp1_ipaddr, + self.rep_sp2_ipaddr, + self.replica_login_info)) + self.device_uuid = self.client.get_device_uuid() + self._do_setup() + LOG.info('MacroSAN Cinder Driver setup complete.') + + def _do_setup(self): + pass + + def _get_owner(self): + owner = self.owners[self.owner_idx % 2] + self.owner_idx += 1 + return owner + + def check_for_setup_error(self): + """Check any setup error.""" + pass + + def _check_volume_params(self, params): + if params['sdas'] and params['replication_enabled']: + raise exception.VolumeBackendAPIException( + data=_('sdas and replication can not be enabled at same time')) + + if params['sdas'] and self.sdas_client is None: + raise exception.VolumeBackendAPIException( + data=_('sdas is not configured, cannot use sdas')) + + if params['replication_enabled'] and self.replica_client is None: + raise exception.VolumeBackendAPIException( + data=_('replica is not configured, cannot use replication')) + + def get_raid_list(self, size): + raids = self.client.get_raid_list(self.pool) + free = sum(raid['free_cap'] for raid in raids) + if size > free: + raise exception.VolumeBackendAPIException(_('Pool has not enough' + 'free capacity')) + + raids = sorted(raids, key=lambda x: x['free_cap'], reverse=True) + + selected = [] + cap = 0 + for raid in raids: + if raid['free_cap']: + cap += raid['free_cap'] + selected.append(raid['name']) + if cap >= size: + break + return selected + + def _create_volume(self, name, size, params, owner=None, pool=None): + rmt_client = None + if params['sdas']: + rmt_client = self.sdas_client + elif params['replication_enabled']: + rmt_client = self.replica_client + + owner = self._get_owner() if owner is None else owner + raids = [] + pool = self.pool if pool is None else pool + + if not params['lun_mode']: + raids = self.client.get_raid_list_to_create_lun(pool, size) + + self.client.create_lun(name, owner, pool, raids, + params['lun_mode'], size, self.lun_params) + + if params['qos-strategy']: + try: + self.client.enable_lun_qos(name, params['qos-strategy']) + except Exception: + self.client.delete_lun(name) + raise exception.VolumeBackendAPIException( + _('Enable lun qos failed.')) + + if params['sdas'] or params['replication_enabled']: + res_size = int(max(int(size) * self.snapshot_resource_ratio, 1)) + try: + raids = self.client.get_raid_list_to_create_lun(pool, + res_size) + self.client.setup_snapshot_resource(name, res_size, raids) + except Exception: + with excutils.save_and_reraise_exception(): + self.client.delete_lun(name) + + try: + raids = [] + if not params['lun_mode']: + raids = rmt_client.get_raid_list_to_create_lun( + pool, size) + + rmt_client.create_lun(name, owner, pool, raids, + params['lun_mode'], size, + self.lun_params) + except Exception: + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_resource(name) + self.client.delete_lun(name) + + try: + raids = rmt_client.get_raid_list_to_create_lun(pool, + res_size) + rmt_client.setup_snapshot_resource(name, res_size, raids) + except Exception: + with ignored(Exception): + rmt_client.delete_lun(name) + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_resource(name) + self.client.delete_lun(name) + + if params['sdas'] or replication_synced(params): + try: + self.client.create_dalun(name) + except Exception: + with ignored(Exception): + rmt_client.delete_snapshot_resource(name) + rmt_client.delete_lun(name) + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_resource(name) + self.client.delete_lun(name) + elif params['replication_mode'] == 'async': + destination = self.replication_params['destination'] + sp1_ipaddr = rmt_client.get_port_ipaddr(destination['sp1']) + sp2_ipaddr = rmt_client.get_port_ipaddr(destination['sp2']) + + try: + self.client.enable_replication(name, sp1_ipaddr, + sp2_ipaddr) + self.client.startscan_replication(name) + except Exception: + with ignored(Exception): + rmt_client.delete_snapshot_resource(name) + rmt_client.delete_lun(name) + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_resource(name) + self.client.delete_lun(name) + + lun_uuid = self.client.get_lun_uuid(name) + return {'provider_location': 'macrosan uuid:%s' % lun_uuid} + + def _parse_qos_strategy(self, volume_type): + qos_specs_id = volume_type.get('qos_specs_id') + + if qos_specs_id is None: + return '' + + ctx = context.get_admin_context() + specs = qos_specs.get_qos_specs(ctx, qos_specs_id)['specs'] + + return specs.pop('qos-strategy', '').strip() if specs else '' + + def _default_volume_params(self): + params = { + 'qos-strategy': '', + 'replication_enabled': False, + 'replication_mode': 'async', + 'sdas': False, + 'lun_mode': self.lun_mode + } + return params + + def _parse_volume_params(self, volume): + params = self._default_volume_params() + + if volume.volume_type_id is None: + return params + + ctx = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctx, volume.volume_type_id) + + params['qos-strategy'] = self._parse_qos_strategy(volume_type) + + specs = dict(volume_type).get('extra_specs') + for k, val in specs.items(): + ks = k.lower().split(':') + if len(ks) == 2 and ks[0] != "capabilities": + continue + + k = ks[-1] + if k not in params: + continue + + else: + v = val.split()[-1] + val_type = type(params[k]).__name__ + if val_type == 'int': + v = int(v) + elif val_type == 'bool': + v = strutils.bool_from_string(v) + params[k] = v + + if params['sdas']: + params['lun_mode'] = False + + return params + + @synchronized(lock_name) + @record_request_id + @_timing + def create_volume(self, volume): + """Create a volume.""" + LOG.debug(('========== create volume, name: %(name)s,' + 'id: %(volume_id)s, size: %(size)s.'), + {'name': volume['name'], 'volume_id': volume['id'], + 'size': volume['size']}) + + name = volume['name'] + size = self._size_str_to_int(volume['size']) + params = self._parse_volume_params(volume) + self._check_volume_params(params) + + return self._create_volume(name, size, params) + + def _delete_volume(self, name, params=None): + if not self.client.lun_exists(name): + return + + if params is None: + params = self._default_volume_params() + + if self.force_unmap_itl_when_deleting: + self.force_terminate_connection(name, False) + + if params['sdas'] or replication_synced(params): + if self.client.dalun_exists(name): + self.client.suspend_dalun(name) + self.client.delete_dalun(name) + with ignored(Exception): + self.sdas_client.delete_snapshot_resource(name) + self.sdas_client.delete_lun(name) + self.client.delete_snapshot_resource(name) + + if (params['replication_enabled'] and + params['replication_mode'] == 'async'): + if self.client.replication_enabled(name): + with ignored(Exception): + self.client.stopscan_replication(name) + self.client.pausereplicate(name) + self.client.disable_replication(name) + self.client.delete_snapshot_resource(name) + + self.client.delete_lun(name) + + try: + migrated_name = self.client.get_lun_name_from_rename_file(name) + if not migrated_name: + return + try: + self.client.rename_lun(migrated_name, name) + except Exception: + LOG.warning('========== failed to rename %(migrated_name)s' + ' to %(name)s', + {'migrated_name': migrated_name, 'name': name}) + except Exception: + return + + @synchronized(lock_name) + @record_request_id + @_timing + def delete_volume(self, volume): + """Delete a volume.""" + LOG.debug('========== delete volume, id: %s.', volume['id']) + name = self._volume_name(volume) + params = self._parse_volume_params(volume) + self._delete_volume(name, params) + + @utils.synchronized('MacroSAN-Attach', external=True) + def _attach_volume(self, context, volume, properties, remote=False): + return super(MacroSANBaseDriver, self)._attach_volume(context, + volume, + properties, + remote) + + @utils.synchronized('MacroSAN-Attach', external=True) + def _detach_volume(self, context, attach_info, volume, + properties, force=False, remote=False, + ignore_errors=True): + return super(MacroSANBaseDriver, self)._detach_volume(context, + attach_info, + volume, + properties, + force, + remote, + ignore_errors) + + def _create_snapshot(self, snapshot_name, volume_name, volume_size): + size = int(max(int(volume_size) * self.snapshot_resource_ratio, 1)) + raids = self.client.get_raid_list_to_create_lun(self.pool, size) + if not self.client.snapshot_resource_exists(volume_name): + self.client.create_snapshot_resource(volume_name, raids, size) + try: + self.client.enable_snapshot_resource_autoexpand(volume_name) + except exception.VolumeBackendAPIException: + LOG.warning('========== Enable snapshot resource auto ' + 'expand for volume: %(volume_name)s error', + {'volume_name': volume_name}) + + if not self.client.snapshot_enabled(volume_name): + try: + self.client.enable_snapshot(volume_name) + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_resource(volume_name) + + try: + self.client.create_snapshot_point(volume_name, snapshot_name) + pointid = self.client.get_snapshot_pointid(volume_name, + snapshot_name) + except exception.VolumeBackendAPIException: + with ignored(Exception): + self.client.disable_snapshot(volume_name) + self.client.delete_snapshot_resource(volume_name) + raise + + return int(pointid) + + @synchronized(lock_name) + @record_request_id + @_timing + def create_snapshot(self, snapshot): + """Create a snapshot.""" + volume = snapshot['volume'] + LOG.debug(('========== create snapshot, snapshot id: %(snapshot_id)s,' + ' volume id: %(volume_id)s, size: %(size)s.'), + {'snapshot_id': snapshot['id'], 'volume_id': volume['id'], + 'size': volume['size']}) + + snapshot_name = self._snapshot_name(snapshot['name']) + volume_name = self._volume_name(volume) + + pointid = self._create_snapshot(snapshot_name, + volume_name, + volume['size']) + return {'provider_location': 'pointid: %s' % pointid} + + def _delete_snapshot(self, snapshot_name, volume_name, pointid): + if self.client.snapshot_point_exists(volume_name, pointid): + self.client.delete_snapshot_point(volume_name, pointid) + + with ignored(Exception): + n = self.client.get_snapshot_point_num(volume_name) + if n != 0: + return + with ignored(Exception): + self.client.disable_snapshot(volume_name) + if not (self.client.dalun_exists(volume_name) or + self.client.replication_enabled(volume_name)): + self.client.delete_snapshot_resource(volume_name) + + @synchronized(lock_name) + @record_request_id + @_timing + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + volume = snapshot['volume'] + provider = snapshot['provider_location'] + if not provider: + return + + m = re.findall(r'pointid: (\d+)', provider) + if m is None: + return + LOG.debug(('========== delete snapshot, snapshot id: %(snapshot_id)s,' + ' pointid: %(point_id)s, volume id: %(volume_id)s.'), + {'snapshot_id': snapshot['id'], 'point_id': m[0], + 'volume_id': volume['id']}) + snapshot_name = self._snapshot_name(snapshot['id']) + volume_name = self._volume_name(volume) + self._delete_snapshot(snapshot_name, volume_name, m[0]) + + def _initialize_connection(self, name, host, wwns): + raise NotImplementedError + + def _terminate_connection(self, name, host, wwns): + raise NotImplementedError + + def _connect(self, name): + host = socket.gethostname() + conn = self._initialize_connection(name, host, + self._self_node_wwns) + + device_scan_attempts = self.configuration.num_volume_device_scan_tries + protocol = conn['driver_volume_type'] + connector = utils.brick_get_connector( + protocol, + use_multipath=self.use_multipath, + device_scan_attempts=device_scan_attempts, + conn=conn) + device = None + try: + device = connector.connect_volume(conn['data']) + except Exception: + with excutils.save_and_reraise_exception(): + self._terminate_connection(name, host, self._self_node_wwns) + + return {'conn': conn, 'device': device, 'connector': connector} + + def _disconnect(self, conn, name): + connector = conn['connector'] + connector.disconnect_volume(conn['conn']['data'], + conn['device']) + + self._terminate_connection(name, socket.gethostname(), + self._self_node_wwns) + + def _create_volume_from_snapshot(self, vol_name, vol_size, + vol_params, snp_name, pointid, + snp_vol_name, snp_vol_size): + self._create_volume(vol_name, vol_size, vol_params) + + try: + self.client.create_snapshot_view(snp_name, + snp_vol_name, + pointid) + except Exception: + self._delete_volume(vol_name) + raise exception.VolumeBackendAPIException( + _('Create snapshot view failed.')) + try: + self.client.copy_volume_from_view(vol_name, snp_name) + + while not self.client.snapshot_copy_task_completed(vol_name): + time.sleep(2) + except Exception: + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_view(snp_name) + self._delete_volume(vol_name) + else: + self.client.delete_snapshot_view(snp_name) + + lun_uuid = self.client.get_lun_uuid(vol_name) + return {'provider_location': 'macrosan uuid:%s' % lun_uuid} + + @synchronized(lock_name) + @record_request_id + @_timing + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + LOG.debug('========== create volume from snapshot.') + snapshot_volume = snapshot['volume'] + provider = snapshot['provider_location'] + m = re.findall(r'pointid: (\d+)', provider) + pointid = int(m[0]) + + vol_name = self._volume_name(volume) + snp_name = self._snapshot_name(snapshot['id']) + snp_vol_name = self._volume_name(snapshot_volume) + + params = self._parse_volume_params(volume) + self._check_volume_params(params) + + return self._create_volume_from_snapshot(vol_name, + volume['size'], + params, + snp_name, + pointid, + snp_vol_name, + snapshot['volume_size']) + + def _create_cloned_volume(self, vol_name, vol_size, vol_params, + src_vol_name, src_vol_size, snp_name): + pointid = self._create_snapshot(snp_name, + src_vol_name, + src_vol_size) + + try: + return self._create_volume_from_snapshot(vol_name, + vol_size, + vol_params, + snp_name, + pointid, + src_vol_name, + src_vol_size) + finally: + self._delete_snapshot(snp_name, src_vol_name, pointid) + + @record_request_id + @_timing + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + LOG.debug('========== create cloned volume.') + vol_name = volume['id'] + src_vol_name = self._volume_name(src_vref) + snapshotid =\ + src_vref['id'][:12] + timeutils.utcnow().strftime('%Y%m%d%H%M%S%f') + snp_name = self._snapshot_name(snapshotid) + + params = self._parse_volume_params(volume) + self._check_volume_params(params) + + return self._create_cloned_volume(vol_name, volume['size'], params, + src_vol_name, src_vref['size'], + snp_name) + + def _extend_volume(self, name, moresize, params): + if params['replication_enabled']: + raise Exception( + 'Volume %s has replication enabled, cannot extend' % name) + + if params['sdas']: + self.client.suspend_dalun(name) + + raids = self.client.get_raid_list_to_create_lun(self.pool, + moresize) + self.client.extend_lun(name, raids, moresize) + + raids = self.sdas_client.get_raid_list_to_create_lun(self.pool, + moresize) + self.sdas_client.extend_lun(name, raids, moresize) + + self.client.resume_dalun(name) + else: + raids = self.client.get_raid_list_to_create_lun(self.pool, + moresize) + self.client.extend_lun(name, raids, moresize) + + @synchronized(lock_name) + @record_request_id + @_timing + def extend_volume(self, volume, new_size): + """Extend a volume.""" + LOG.debug(('========== extend volume, id: %(volume_id)s,' + 'size: %(size)s.'), + {'volume_id': volume['id'], 'size': new_size}) + + name = self._volume_name(volume) + moresize = self._size_str_to_int(new_size - int(volume['size'])) + params = self._parse_volume_params(volume) + self._extend_volume(name, moresize, params) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a volume.""" + pass + + def create_export(self, context, volume, connector): + """Export the volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + @record_request_id + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + data = {} + pool = {} + + total, free, thin_unalloced = self.client.get_pool_cap(self.pool) + pool['location_info'] = self.device_uuid + pool['pool_name'] = self.pool + pool['total_capacity_gb'] = total + pool['free_capacity_gb'] = free + thin_unalloced + pool['reserved_percentage'] = self.configuration.safe_get( + 'reserved_percentage') + pool['max_over_subscription_ratio'] = self.configuration.safe_get( + 'max_over_subscription_ratio') + pool['QoS_support'] = True + pool['multiattach'] = True + pool['lun_mode'] = True + pool['replication_mode'] = [] + + if self.replica_client: + pool['replication_enabled'] = 'True' + pool['replication_mode'].append('async') + + if self.sdas_client: + pool['replication_enabled'] = 'True' + pool['sdas'] = 'True' + pool['replication_mode'].append('sync') + + if len(pool['replication_mode']) == 0: + del pool['replication_mode'] + + data['pools'] = [pool] + data["volume_backend_name"] = self.volume_backend_name + data["vendor_name"] = 'MacroSAN' + data["driver_version"] = version + data["storage_protocol"] = self.storage_protocol + + self._stats = data + + @record_request_id + def update_migrated_volume(self, ctxt, volume, new_volume, + original_volume_status=None): + """Return model update for migrated volume.""" + original_name = self._volume_name(volume) + cur_name = self._volume_name(new_volume) + LOG.debug(('========== update migrated volume,' + 'volume: %(original_name)s, new_volume: %(cur_name)s'), + {'original_name': original_name, 'cur_name': cur_name}) + + if self.client.lun_exists(original_name): + self.client.backup_lun_name_to_rename_file(cur_name, original_name) + else: + if original_volume_status == 'available': + try: + self.client.rename_lun(cur_name, original_name) + except Exception: + LOG.warning('========== failed to rename ' + '%(cur_name)s to %(original_name)s', + {'cur_name': cur_name, + 'original_name': original_name}) + + name_id = new_volume['_name_id'] or new_volume['id'] + return {'_name_id': name_id, + 'provider_location': new_volume['provider_location']} + + @synchronized(lock_name) + @record_request_id + @_timing + def initialize_connection_snapshot(self, snapshot, connector, **kwargs): + volume = snapshot['volume'] + provider = snapshot['provider_location'] + m = re.findall(r'pointid: (\d+)', provider) + + pointid = m[0] + snp_name = self._snapshot_name(snapshot['id']) + snp_vol_name = self._volume_name(volume) + + self.client.create_snapshot_view(snp_name, snp_vol_name, pointid) + + try: + conn = self._initialize_connection_snapshot(snp_name, connector) + conn['data']['volume_id'] = snapshot['id'] + return conn + except Exception: + with excutils.save_and_reraise_exception(): + self.client.delete_snapshot_view(snp_name) + + def _initialize_connection_snapshot(self, snp_name, connector): + raise NotImplementedError + + def terminate_connection_snapshot(self, snapshot, connector, **kwargs): + snp_name = self._snapshot_name(snapshot['id']) + self._terminate_connection_snapshot(snp_name, connector) + self.client.delete_snapshot_view(snp_name) + + def _terminate_connection_snapshot(self, snp_name, connector): + raise NotImplementedError + + @record_request_id + def manage_existing_get_size(self, volume, external_ref): + __, info, __ = self._get_existing_lun_info(external_ref) + size = int(math.ceil(info['size'])) + return size + + @synchronized(lock_name) + @record_request_id + @_timing + def manage_existing(self, volume, external_ref): + vol_params = self._parse_volume_params(volume) + self._check_volume_params(vol_params) + if vol_params['qos-strategy']: + raise exception.VolumeBackendAPIException( + data=_('Not support to import qos-strategy')) + + pool = volume_utils.extract_host(volume.host, 'pool') + name, info, params = self._get_existing_lun_info(external_ref) + if pool != info['pool']: + msg = _("LUN %(name)s does not belong to the pool: " + "%(pool)s."), {'name': name, 'pool': pool} + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + if params['sdas'] and params['replication_enabled']: + msg = _('LUN %(name)s sdas and replication ' + 'enabled at same time'), {'name': name} + raise exception.VolumeBackendAPIException(data=msg) + + if replication_synced(vol_params) and params['sdas']: + params.update({'sdas': False, + 'replication_mode': 'sync', + 'replication_enabled': True}) + + def notequal(attr): + return vol_params[attr] != params[attr] + + if (notequal('replication_enabled') or notequal('replication_mode') or + notequal('sdas') or notequal('lun_mode')): + msg = _("Volume type: %(vol_params)s doesn't equal " + "to existing lun: " + "%(params)s"), {'vol_params': vol_params, 'params': params} + raise exception.VolumeBackendAPIException(data=msg) + + rmt_client = None + if params['sdas']: + rmt_client = self.sdas_client + elif params['replication_enabled']: + rmt_client = self.replica_client + + snp_res_name = self.client.get_snapshot_resource_name(name) + self.client.rename_lun(name, volume['name']) + if snp_res_name: + self.client.rename_lun(snp_res_name, 'SR-%s' % volume['id']) + + if params['sdas'] or params['replication_enabled']: + snp_res_name = rmt_client.get_snapshot_resource_name(name) + rmt_client.rename_lun(name, volume['name']) + if snp_res_name: + rmt_client.rename_lun(snp_res_name, 'SR-%s' % volume['id']) + + lun_uuid = self.client.get_lun_uuid(volume['name']) + return {'provider_location': 'macrosan uuid:%s' % lun_uuid} + + def _get_existing_lun_info(self, external_ref): + name = external_ref.get('source-name') + if not name: + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, + reason=_('No source-name to get existing lun')) + + info = self.client.get_lun_base_info(name) + + params = { + 'qos-strategy': '', + 'replication_enabled': False, + 'replication_mode': 'async', + 'sdas': False, + 'lun_mode': False + } + + sdas = self.client.dalun_exists(name) + rep = self.client.replication_enabled(name) + params['replication_enabled'] = rep + params['sdas'] = sdas + if info['lun_mode'] == 'thin': + info['lun_mode'] = True + else: + info['lun_mode'] = False + params['lun_mode'] = info['lun_mode'] + + return name, info, params + + def unmanage(self, volume): + pass + + @synchronized(lock_name) + @record_request_id + @_timing + def manage_existing_snapshot(self, snapshot, existing_ref): + volume = snapshot['volume'] + src_name = self._get_existing_snapname(existing_ref).lstrip('_') + src_name = self._snapshot_name(src_name) + pointid = self.client.get_snapshot_pointid(volume['name'], src_name) + snap_name = self._snapshot_name(snapshot['id']) + + self.client.rename_snapshot_point(volume['name'], pointid, snap_name) + return {'provider_location': 'pointid: %s' % pointid} + + @record_request_id + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + volume = snapshot['volume'] + return volume['size'] + + def _get_existing_snapname(self, external_ref): + name = external_ref.get('source-name') + if not name: + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, + reason=_('No source-name to get existing snap')) + return name + + def unmanage_snapshot(self, snapshot): + pass + + def migration_valid(self, volume, host): + if volume.volume_attachment: + return False + + pool_name = host['capabilities'].get('pool_name', '') + if pool_name == '': + return False + + device_uuid = host['capabilities']['location_info'] + if device_uuid != self.device_uuid: + return False + + params = self._parse_volume_params(volume) + if params['sdas'] or params['replication_enabled']: + return False + + return True + + @synchronized(lock_name) + @record_request_id + @_timing + def migrate_volume(self, ctxt, volume, host): + if not self.migration_valid(volume, host): + return False, None + + size = self._size_str_to_int(volume['size']) + params = self._parse_volume_params(volume) + name = str(uuid.uuid4()) + src_name = self._volume_name(volume) + owner = self.client.get_lun_sp(src_name) + pool = host['capabilities'].get('pool_name', self.pool) + + LOG.info('host: %(host)s, backend: %(volume_backend_name)s', + {'host': host, + 'volume_backend_name': self.volume_backend_name}) + self._create_volume(name, size, params, owner, pool) + + res_sz = int(max(int(size) * self.snapshot_resource_ratio, 1)) + src_snp_res_exists = self.client.snapshot_resource_exists(src_name) + if not src_snp_res_exists: + raids = self.client.get_raid_list_to_create_lun(self.pool, res_sz) + self.client.create_snapshot_resource(src_name, raids, res_sz) + + snp_res_exists = self.client.snapshot_resource_exists(name) + if not snp_res_exists: + raids = self.client.get_raid_list_to_create_lun(pool, res_sz) + self.client.create_snapshot_resource(name, raids, res_sz) + + self.client.start_localclone_lun(src_name, name) + while not self.client.localclone_completed(name): + time.sleep(2) + self.client.stop_localclone_lun(name) + + if not snp_res_exists: + self.client.delete_snapshot_resource(name) + if not src_snp_res_exists: + self.client.delete_snapshot_resource(src_name) + + self._delete_volume(src_name, params) + self.client.rename_lun(name, src_name) + + lun_uuid = self.client.get_lun_uuid(src_name) + return True, {'provider_location': 'macrosan uuid:%s' % lun_uuid} + + def force_terminate_connection(self, name, force_connected=False): + it_list = self.client.get_lun_it(name) + it_list = [it for it in it_list + if (force_connected or not it['connected'])] + if len(it_list) > 0: + for it in it_list: + self.client.unmap_lun_to_it(name, + it['initiator'], + it['port']) + + +@interface.volumedriver +class MacroSANISCSIDriver(MacroSANBaseDriver, driver.ISCSIDriver): + """ISCSI driver for MacroSan storage arrays. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver + """ + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + """Initialize the driver.""" + super(MacroSANISCSIDriver, self).__init__(*args, **kwargs) + self.storage_protocol = 'iSCSI' + + def _do_setup(self): + ports = self.client.get_iscsi_ports() + for port in ports: + if port['port_name'] == '' and port['ip'] != '0': + self.client.create_target(port['port'], type='iscsi') + + if self.sdas_client: + ports = self.sdas_client.get_iscsi_ports() + for port in ports: + if port['port_name'] == '' and port['ip'] != '0': + self.sdas_client.create_target(port['port'], type='iscsi') + + def _get_iscsi_ports(self, dev_client, host): + ha_state = dev_client.get_ha_state() + + if host in self.client_info: + iscsi_sp1 = self.client_info[host]['sp1_port'] + iscsi_sp2 = self.client_info[host]['sp2_port'] + else: + iscsi_sp1 = self.client_info['default']['sp1_port'] + iscsi_sp2 = self.client_info['default']['sp2_port'] + ports = [] + if ha_state['sp1'] in ['single', 'double', 'idle']: + ports.extend(iscsi_sp1.split(',')) + + if ha_state['sp2'] in ['single', 'double', 'idle']: + ports.extend(iscsi_sp2.split(',')) + + all_ports = {p['port']: p for p in dev_client.get_iscsi_ports()} + + return [all_ports[p] for p in ports] + + def _map_initr_tgt(self, dev_client, itl_client_name, initr, ports): + if not dev_client.get_client(itl_client_name): + dev_client.create_client(itl_client_name) + + if not dev_client.initiator_exists(initr): + dev_client.create_initiator(initr, itl_client_name, type='iscsi') + + if not dev_client.is_initiator_mapped_to_client(initr, + itl_client_name): + dev_client.map_initiator_to_client(initr, itl_client_name) + + for p in ports: + port_name = p['port_name'] + dev_client.map_target_to_initiator(port_name, initr) + + def _unmap_itl(self, dev_client, itl_client_name, + wwns, ports, volume_name): + wwn = wwns[0] + for p in ports: + port_name = p['port_name'] + dev_client.unmap_lun_to_it(volume_name, wwn, port_name) + + def _map_itl(self, dev_client, wwn, ports, volume_name, hint_lun_id): + lun_id = hint_lun_id + exists = False + for p in ports: + port_name = p['port_name'] + exists = dev_client.map_lun_to_it(volume_name, wwn, port_name, + hint_lun_id) + + if exists and lun_id == hint_lun_id: + lun_id = self.client.get_lun_id(wwn, port_name, volume_name) + + return lun_id + + def _get_unused_lun_id(self, wwn, dev_client, ports, + sdas_client, sdas_ports): + id_list = set(range(0, 511)) + for p in ports: + port_name = p['port_name'] + tmp_list = dev_client.get_it_unused_id_list('iscsi', wwn, + port_name) + id_list = id_list.intersection(tmp_list) + + for p in sdas_ports: + port_name = p['port_name'] + tmp_list = sdas_client.get_it_unused_id_list('iscsi', wwn, + port_name) + id_list = id_list.intersection(tmp_list) + + return id_list.pop() + + @property + def _self_node_wwns(self): + connector = cn.ISCSIConnector(utils.get_root_helper()) + return [connector.get_initiator()] + + def _initialize_connection(self, name, vol_params, host, wwns): + client_name = self._get_client_name(host) + wwn = wwns[0] + LOG.debug('initialize_connection, initiator: %(wwpns)s,' + 'volume name: %(volume)s.', + {'wwpns': wwns, 'volume': name}) + + ports = self._get_iscsi_ports(self.client, host) + self._map_initr_tgt(self.client, client_name, wwn, ports) + + if vol_params['sdas']: + sdas_ports = self._get_iscsi_ports(self.sdas_client, host) + self._map_initr_tgt(self.sdas_client, client_name, wwn, sdas_ports) + lun_id = self._get_unused_lun_id(wwn, self.client, ports, + self.sdas_client, sdas_ports) + + self._map_itl(self.sdas_client, wwn, sdas_ports, name, lun_id) + + lun_id = self._map_itl(self.client, wwn, ports, name, lun_id) + + ports = ports + sdas_ports + else: + lun_id = self._get_unused_lun_id(wwn, self.client, ports, None, {}) + lun_id = self._map_itl(self.client, wwn, ports, name, lun_id) + + properties = {'target_discovered': False, + 'target_portal': '%s:3260' % ports[0]['ip'], + 'target_iqn': ports[0]['target'], + 'target_lun': lun_id, + 'target_iqns': [p['target'] for p in ports], + 'target_portals': ['%s:3260' % p['ip'] for p in ports], + 'target_luns': [lun_id] * len(ports)} + + LOG.info('initialize_connection, iSCSI properties: %(properties)s', + {'properties': properties}) + return {'driver_volume_type': 'iscsi', 'data': properties} + + @synchronized(lock_name) + @record_request_id + @_timing + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + LOG.debug('========== initialize_connection connector: %(connector)s', + {'connector': connector}) + + name = self._volume_name(volume) + params = self._parse_volume_params(volume) + conn = self._initialize_connection(name, params, connector['host'], + [connector['initiator']]) + conn['data']['volume_id'] = volume['id'] + return conn + + def _unmap_initr_tgt(self, dev_client, itl_client_name, wwn): + for p in dev_client.get_iscsi_ports(): + port_name = p['port_name'] + if dev_client.it_exists(wwn, port_name): + dev_client.unmap_target_from_initiator(port_name, wwn) + + if dev_client.initiator_exists(wwn): + dev_client.unmap_initiator_from_client(wwn, itl_client_name) + dev_client.delete_initiator(wwn) + + def _terminate_connection(self, name, volume_params, host, wwns): + client_name = self._get_client_name(host) + ports = self._get_iscsi_ports(self.client, host) + + self._unmap_itl(self.client, client_name, wwns, ports, name) + if volume_params['sdas']: + self._unmap_itl(self.sdas_client, client_name, wwns, ports, name) + + @synchronized(lock_name) + @record_request_id + @_timing + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + LOG.debug('========== terminate_connection %(connector)s', + {'connector': connector}) + + name = self._volume_name(volume) + if not connector: + self.force_terminate_connection(name, True) + else: + params = self._parse_volume_params(volume) + self._terminate_connection(name, params, connector['host'], + [connector['initiator']]) + + def _initialize_connection_snapshot(self, snp_name, connector): + return self._initialize_connection(snp_name, None, connector['host'], + [connector['initiator']]) + + def _terminate_connection_snapshot(self, snp_name, connector): + return self._terminate_connection(snp_name, None, connector['host'], + [connector['initiator']]) + + +@interface.volumedriver +class MacroSANFCDriver(MacroSANBaseDriver, driver.FibreChannelDriver): + """FC driver for MacroSan storage arrays. + + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver + """ + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + """Initialize the driver.""" + super(MacroSANFCDriver, self).__init__(*args, **kwargs) + self.storage_protocol = 'FC' + self.fcsan_lookup_service = None + self.use_sp_port_nr = self.configuration.macrosan_fc_use_sp_port_nr + self.keep_mapped_ports = \ + self.configuration.macrosan_fc_keep_mapped_ports + + def _do_setup(self): + self.fcsan_lookup_service = fczm_utils.create_lookup_service() + + ports = self.client.get_fc_ports() + for port in ports: + if port['port_name'] == '': + self.client.create_target(port['port']) + + if self.sdas_client: + ports = self.sdas_client.get_fc_ports() + for port in ports: + if port['port_name'] == '': + self.sdas_client.create_target(port['port']) + + @property + def _self_node_wwns(self): + fc = linuxfc.LinuxFibreChannel(utils.get_root_helper()) + return [self._format_wwn_with_colon(wwn) for wwn in fc.get_fc_wwpns()] + + def _strip_wwn_colon(self, wwn_str): + return wwn_str.replace(':', '') + + def _format_wwn_with_colon(self, wwn_str): + wwn_str = wwn_str.replace(":", "") + return (':'.join([wwn_str[i:i + 2] + for i in range(0, len(wwn_str), 2)])).lower() + + def _select_fc_ports(self, ports_in_storage, ports_in_fabric): + selected = [] + for sp in [1, 2]: + n = 0 + for p in ports_in_storage: + if (p['sp'] == sp + and p['online'] == 1 and p['wwn'] in ports_in_fabric): + selected.append({'port_name': p['port_name'], + 'wwn': p['wwn']}) + n += 1 + if n >= self.use_sp_port_nr: + break + return selected + + def _get_initr_port_map(self, dev_client, wwns): + initr_port_map = {} + ports_in_storage = dev_client.get_fc_ports() + if self.fcsan_lookup_service is not None: + mapping = (self.fcsan_lookup_service + .get_device_mapping_from_network( + wwns, [p['wwn'] for p in ports_in_storage])) + + for fabric in mapping: + wwns = mapping[fabric]['target_port_wwn_list'] + mapping[fabric]['target_port_wwn_list'] = ( + [self._format_wwn_with_colon(wwn) for wwn in wwns]) + wwns = mapping[fabric]['initiator_port_wwn_list'] + mapping[fabric]['initiator_port_wwn_list'] = ( + [self._format_wwn_with_colon(wwn) for wwn in wwns]) + + for fabric in mapping: + ports_in_fabric = mapping[fabric]['target_port_wwn_list'] + selected_ports = self._select_fc_ports(ports_in_storage, + ports_in_fabric) + + for initr in mapping[fabric]['initiator_port_wwn_list']: + initr_port_map[initr] = selected_ports + else: + initr_port_map = {} + for wwn in wwns: + for port in ports_in_storage: + if port['initr'] == wwn: + initr_port_map[wwn] = [port] + break + + return initr_port_map + + def _map_initr_tgt_do(self, dev_client, itl_client_name, + initr_port_map, mapped_ports): + for wwn in initr_port_map: + if wwn in mapped_ports: + continue + + if not dev_client.initiator_exists(wwn): + dev_client.create_initiator(wwn, wwn) + + if not dev_client.is_initiator_mapped_to_client(wwn, + itl_client_name): + dev_client.map_initiator_to_client(wwn, itl_client_name) + + for p in initr_port_map[wwn]: + port_name = p['port_name'] + dev_client.map_target_to_initiator(port_name, wwn) + + def _unmap_initr_tgt(self, dev_client, client_name, mapped_ports): + for wwn in mapped_ports: + for p in mapped_ports[wwn]: + port_name = p['port_name'] + if dev_client.it_exists(wwn, port_name): + dev_client.unmap_target_from_initiator(port_name, wwn) + + if dev_client.initiator_exists(wwn): + dev_client.unmap_initiator_from_client(wwn, client_name) + dev_client.delete_initiator(wwn) + + def _map_initr_tgt(self, dev_client, itl_client_name, wwns): + if not dev_client.get_client(itl_client_name): + dev_client.create_client(itl_client_name) + + initr_port_map = {} + mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) + has_port_not_mapped = not all(wwn in mapped_ports for wwn in wwns) + if has_port_not_mapped: + initr_port_map = self._get_initr_port_map(dev_client, wwns) + + initr_port_map.update(mapped_ports) + + if has_port_not_mapped: + self._map_initr_tgt_do(dev_client, itl_client_name, + initr_port_map, mapped_ports) + return has_port_not_mapped, initr_port_map + + def _map_itl(self, dev_client, initr_port_map, volume_name, hint_lun_id): + lun_id = hint_lun_id + exists = False + for wwn in initr_port_map: + for p in initr_port_map[wwn]: + port_name = p['port_name'] + exists = dev_client.map_lun_to_it(volume_name, wwn, + port_name, lun_id) + + if exists and lun_id == hint_lun_id: + lun_id = dev_client.get_lun_id(wwn, port_name, volume_name) + return lun_id + + def _get_unused_lun_id(self, dev_client, initr_port_map, + sdas_client, sdas_initr_port_map): + id_list = set(range(0, 511)) + for wwn in initr_port_map: + for p in initr_port_map[wwn]: + port_name = p['port_name'] + tmp_list = dev_client.get_it_unused_id_list('fc', wwn, + port_name) + id_list = id_list.intersection(tmp_list) + + for wwn in sdas_initr_port_map: + for p in sdas_initr_port_map[wwn]: + port_name = p['port_name'] + tmp_list = sdas_client.get_it_unused_id_list('fc', wwn, + port_name) + id_list = id_list.intersection(tmp_list) + + return id_list.pop() + + def _initialize_connection(self, name, vol_params, host, wwns): + client_name = self._get_client_name(host) + + LOG.info('initialize_connection, initiator: %(wwpns)s, ' + 'volume name: %(volume)s.', + {'wwpns': wwns, 'volume': name}) + + has_port_not_mapped, initr_port_map = ( + self._map_initr_tgt(self.client, client_name, wwns)) + LOG.info('====================initr_port_map %(initr_port_map)s', + {'initr_port_map': initr_port_map}) + + if vol_params and vol_params['sdas']: + sdas_has_port_not_mapped, sdas_initr_port_map = ( + self._map_initr_tgt(self.sdas_client, client_name, wwns)) + lun_id = self._get_unused_lun_id(self.client, initr_port_map, + self.sdas_client, + sdas_initr_port_map) + LOG.info('%(fr)sdas_initr_port_map %(sdas_initr_port_map)s', + {'fr': '=' * 10, + 'sdas_initr_port_map': sdas_initr_port_map}) + self._map_itl(self.sdas_client, sdas_initr_port_map, name, lun_id) + + lun_id = self._map_itl(self.client, initr_port_map, name, lun_id) + for initr, ports in sdas_initr_port_map.items(): + if len(ports): + initr_port_map[initr].extend(ports) + + has_port_not_mapped = (has_port_not_mapped or + sdas_has_port_not_mapped) + else: + lun_id = self._get_unused_lun_id(self.client, initr_port_map, + None, {}) + lun_id = self._map_itl(self.client, initr_port_map, name, lun_id) + + tgt_wwns = list(set(self._strip_wwn_colon(p['wwn']) + for wwn in initr_port_map + for p in initr_port_map[wwn])) + tgt_wwns.sort() + + properties = {'target_lun': lun_id, + 'target_discovered': True, + 'target_wwn': tgt_wwns} + if has_port_not_mapped and self.fcsan_lookup_service is not None: + initr_tgt_map = {} + for initr, ports in initr_port_map.items(): + initr = self._strip_wwn_colon(initr) + initr_tgt_map[initr] = ( + [self._strip_wwn_colon(p['wwn']) for p in ports]) + + properties['initiator_target_map'] = initr_tgt_map + + LOG.info('initialize_connection, FC properties: %(properties)s', + {'properties': properties}) + return {'driver_volume_type': 'fibre_channel', 'data': properties} + + @synchronized(lock_name) + @record_request_id + @_timing + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + LOG.debug('========== initialize_connection connector: %(connector)s', + {'connector': connector}) + + name = self._volume_name(volume) + params = self._parse_volume_params(volume) + wwns = [self._format_wwn_with_colon(wwns) + for wwns in connector['wwnns']] + conn = self._initialize_connection(name, params, + connector['host'], wwns) + conn['data']['volume_id'] = volume['id'] + fczm_utils.add_fc_zone(conn) + return conn + + def _unmap_itl(self, dev_client, name, itl_client_name, wwns): + mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) + if len(mapped_ports) == 0: + return [], {} + + for wwn, ports in mapped_ports.items(): + for p in ports: + port_name = p['port_name'] + dev_client.unmap_lun_to_it(name, wwn, port_name) + + ports, initr_tgt_map = [], {} + if (not self.keep_mapped_ports and + not dev_client.has_initiators_mapped_any_lun(wwns)): + mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) + initr_tgt_map = {self._strip_wwn_colon(wwn): + [self._strip_wwn_colon(p['wwn']) + for p in mapped_ports[wwn]] for wwn in wwns} + ports = list(set(self._strip_wwn_colon(p['wwn']) + for ports in mapped_ports.values() + for p in ports)) + self._unmap_initr_tgt(dev_client, itl_client_name, mapped_ports) + if self.fcsan_lookup_service is None: + initr_tgt_map = {} + + return ports, initr_tgt_map + + def _terminate_connection(self, name, vol_params, host, wwns): + client_name = self._get_client_name(host) + ports, initr_tgt_map = self._unmap_itl(self.client, name, + client_name, wwns) + if vol_params and vol_params['sdas']: + sdas_ports, sdas_initr_tgt_map = ( + self._unmap_itl(self.sdas_client, name, + client_name, wwns)) + ports.extend(sdas_ports) + for initr, tgt_wwns in sdas_initr_tgt_map.items(): + if len(tgt_wwns): + initr_tgt_map[initr].extend(tgt_wwns) + + data = {} + if ports: + data['target_wwn'] = ports + if initr_tgt_map: + data['initiator_target_map'] = initr_tgt_map + LOG.info('terminate_connection, data: %(data)s', {'data': data}) + return {'driver_volume_type': 'fibre_channel', 'data': data} + + @synchronized(lock_name) + @record_request_id + @_timing + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + LOG.debug('========== terminate_connection %(connector)s', + {'connector': connector}) + + name = self._volume_name(volume) + conn = None + if not connector: + self.force_terminate_connection(name, True) + conn = {'driver_volume_type': 'fibre_channel', 'data': {}} + else: + params = self._parse_volume_params(volume) + wwns = [self._format_wwn_with_colon(wwns) + for wwns in connector['wwpns']] + attachments = volume.volume_attachment + hostnum = 0 + for i in attachments: + if connector['host'] == i['attached_host']: + hostnum += 1 + if hostnum > 1: + pass + else: + conn = self._terminate_connection(name, params, + connector['host'], wwns) + fczm_utils.remove_fc_zone(conn) + return conn + + def _initialize_connection_snapshot(self, snp_name, connector): + wwns = [self._format_wwn_with_colon(wwns) + for wwns in connector['wwpns']] + return self._initialize_connection(snp_name, None, connector['host'], + wwns) + + def _terminate_connection_snapshot(self, snp_name, connector): + wwns = [self._format_wwn_with_colon(wwns) + for wwns in connector['wwpns']] + return self._terminate_connection(snp_name, None, connector['host'], + wwns) diff --git a/doc/source/configuration/block-storage/drivers/MacroSAN-storage-driver.rst b/doc/source/configuration/block-storage/drivers/MacroSAN-storage-driver.rst new file mode 100644 index 00000000000..68bd80b4bbf --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/MacroSAN-storage-driver.rst @@ -0,0 +1,430 @@ +========================================== +MacroSAN Fibre Channel and iSCSI drivers +========================================== + +The ``MacroSANFCDriver`` and ``MacroSANISCSIDriver`` Cinder drivers allow the +MacroSAN Storage arrays to be used for Block Storage in +OpenStack deployments. + +System requirements +~~~~~~~~~~~~~~~~~~~ + +To use the MacroSAN drivers, the following are required: + +- MacroSAN Storage arrays with: + - iSCSI or FC host interfaces + - Enable RESTful service on the MacroSAN Storage Appliance. + +- Network connectivity between the OpenStack host and the array management + interfaces + +- HTTPS or HTTP must be enabled on the array + +When creating a volume from image, install the ``multipath`` tool and add the +following configuration keys in the ``[DEFAULT]`` configuration group of +the ``/etc/cinder/cinder.conf`` file: + +.. code-block:: ini + + use_multipath_for_image_xfer = True + +Add and change the following configuration keys of +the ``/etc/multipath.conf`` file: + +.. code-block:: ini + + blacklist { + devnode "^sda$" + devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" + devnode "^hd[a-z]" + devnode "^nbd*" + } + +Need to set user_friendly_names to no in the multipath.conf file. + +In addition, you need to delete the getuid_callout parameter in +the centos7 system. + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +- Create, delete, attach, and detach volumes. +- Create, list, and delete volume snapshots. +- Create a volume from a snapshot. +- Copy an image to a volume. +- Copy a volume to an image. +- Clone a volume. +- Extend a volume. +- Volume Migration (Host assisted). +- Volume Migration (Storage Assisted). +- Retype a volume. +- Manage and unmanage a volume. +- Manage and unmanage a snapshot. +- Volume Replication +- Thin Provisioning + +Configuring the array +~~~~~~~~~~~~~~~~~~~~~ + +#. Verify that the array can be managed via an HTTPS connection. + + Confirm that virtual pools A and B are present if you plan to use virtual + pools for OpenStack storage. + +#. Edit the ``cinder.conf`` file to define a storage backend entry for each + storage pool on the array that will be managed by OpenStack. Each entry + consists of a unique section name, surrounded by square brackets, followed + by options specified in a ``key=value`` format. + + + * The ``volume_backend_name`` option value can be a unique value, if you + wish to be able to assign volumes to a specific storage pool on the + array, or a name that is shared among multiple storage pools to let the + volume scheduler choose where new volumes are allocated. + + In the examples below, two back ends are defined, one for pool A and one + for pool B. + + * Add the following configuration keys in the configuration group of + enabled_backends of the ``/etc/cinder/cinder.conf`` file: + + **iSCSI example back-end entries** + + .. code-block:: ini + + [DEFAULT] + enabled_backends = cinder-iscsi-a, cinder-iscsi-b + rpc_response_timeout = 300 + + [cinder-iscsi-a] + # Storage protocol. + iscsi_protocol = iscsi + + #iSCSI target user-land tool. + iscsi_helper = tgtadm + + # The iSCSI driver to load + volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANISCSIDriver. + + # Name to give this storage back-end. + volume_backend_name = macrosan + + #Chose attach/detach volumes in cinder using multipath for volume to image and image to volume transfers. + use_multipath_for_image_xfer = True + + # IP address of the Storage if attaching directly. + san_ip = 172.17.251.142, 172.17.251.143 + + # Storage user name. + san_login = openstack + + # Storage user password. + san_password = openstack + + #Chose using thin-lun or thick lun.When set san_thin_provision to True,you must set + #macrosan_thin_lun_extent_size, macrosan_thin_lun_low_watermark, macrosan_thin_lun_high_watermark. + san_thin_provision = False + + #The name of Pool in the Storage. + macrosan_pool = Pool-a + + #The default ports used for initializing connection. + #Separate the controller by semicolons (``;``) + #Separate the ports by semicolons (``,``) + macrosan_client_default = eth-1:0:0, eth-1:0:1; eth-2:0:0, eth-2:0:1 + + #The switch to force detach volume when deleting + macrosan_force_unmap_itl = True + + #Set snapshot's resource ratio + macrosan_snapshot_resource_ratio = 1 + + #Calculate the time spent on the operation in the log file. + macrosan_log_timing = True + + # =============Optional settings============= + + #Set the thin lun's extent size when the san_thin_provision is True. + macrosan_thin_lun_extent_size = 8 + + #Set the thin lun's low watermark when the san_thin_provision is True. + #macrosan_thin_lun_low_watermark = 8 + + #Set the thin lun's high watermark when the san_thin_provision is True. + macrosan_thin_lun_high_watermark = 40 + + #The setting of Symmetrical Dual Active Storage + macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_sdas_username = openstack + macrosan_sdas_password = openstack + + #The setting of Replication Storage.When you set ip, you must set + #the macrosan_replication_destination_ports parameter. + macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_replication_username = openstack + macrosan_replication_password = openstack + + ##The ports used for the Replication Storage. + #Separate the controller by semicolons (``,``) + #Separate the ports by semicolons (``/``) + macrosan_replication_destination_ports = eth-1:0:0/eth-1:0:1, eth-2:0:0/eth-2:0:1 + + #Macrosan iscsi_clients list.You can configure multiple clients.Separate the ports by semicolons (``/``) + macrosan_client = (devstack; controller1name; eth-1:0:0/eth-1:0:1; eth-2:0:0/eth-2:0:1), (dev; controller2name; eth-1:0:0/eth-1:0:1; eth-2:0:0/eth-2:0:1) + + [cinder-iscsi-b] + iscsi_protocol = iscsi + iscsi_helper = tgtadm + volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANISCSIDriver + volume_backend_name = macrosan + use_multipath_for_image_xfer = True + san_ip = 172.17.251.142, 172.17.251.143 + san_login = openstack + san_password = openstack + macrosan_pool = Pool-b + san_thin_provision = False + macrosan_force_unmap_itl = True + macrosan_snapshot_resource_ratio = 1 + macrosan_log_timing = True + macrosan_client_default = eth-1:0:0, eth-1:0:1; eth-2:0:0, eth-2:0:1 + + macrosan_thin_lun_extent_size = 8 + macrosan_thin_lun_low_watermark = 8 + macrosan_thin_lun_high_watermark = 40 + macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_sdas_username = openstack + macrosan_sdas_password = openstack + macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_replication_username = openstack + macrosan_replication_password = openstack + macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 + macrosan_client = (devstack; controller1name; eth-1:0:0; eth-2:0:0), (dev; controller2name; eth-1:0:0; eth-2:0:0) + + **Fibre Channel example backend entries** + + .. code-block:: ini + + [DEFAULT] + enabled_backends = cinder-fc-a, cinder-fc-b + rpc_response_timeout = 300 + + [cinder-fc-a] + volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANFCDriver + volume_backend_name = macrosan + use_multipath_for_image_xfer = True + san_ip = 172.17.251.142, 172.17.251.143 + san_login = openstack + san_password = openstack + macrosan_pool = Pool-a + san_thin_provision = False + macrosan_force_unmap_itl = True + macrosan_snapshot_resource_ratio = 1 + macrosan_log_timing = True + + #FC Zoning mode configured. + zoning_mode = fabric + + #The number of ports used for initializing connection. + macrosan_fc_use_sp_port_nr = 1 + + #In the case of an FC connection, the configuration item associated with the port is maintained. + macrosan_fc_keep_mapped_ports = True + + # =============Optional settings============= + + macrosan_thin_lun_extent_size = 8 + macrosan_thin_lun_low_watermark = 8 + macrosan_thin_lun_high_watermark = 40 + macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_sdas_username = openstack + macrosan_sdas_password = openstack + macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_replication_username = openstack + macrosan_replication_password = openstack + macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 + + + [cinder-fc-b] + volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANFCDriver + volume_backend_name = macrosan + use_multipath_for_image_xfer = True + san_ip = 172.17.251.142, 172.17.251.143 + san_login = openstack + san_password = openstack + macrosan_pool = Pool-b + san_thin_provision = False + macrosan_force_unmap_itl = True + macrosan_snapshot_resource_ratio = 1 + macrosan_log_timing = True + zoning_mode = fabric + macrosan_fc_use_sp_port_nr = 1 + macrosan_fc_keep_mapped_ports = True + + macrosan_thin_lun_extent_size = 8 + macrosan_thin_lun_low_watermark = 8 + macrosan_thin_lun_high_watermark = 40 + macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_sdas_username = openstack + macrosan_sdas_password = openstack + macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 + macrosan_replication_username = openstack + macrosan_replication_password = openstack + macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 + +#. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` + service. + +#. Create and use volume types. + + **Create and use sdas volume types** + + .. code-block:: console + + $ openstack volume type create sdas + $ openstack volume type set --property sdas=True sdas + + **Create and use replication volume types** + + .. code-block:: console + + $ openstack volume type create replication + $ openstack volume type set --property replication_enabled=True replication + +Configuration file parameters +----------------------------- + +This section describes mandatory and optional configuration file parameters +of the MacroSAN volume driver. + +.. list-table:: **Mandatory parameters** + :widths: 10 10 50 10 + :header-rows: 1 + + * - Parameter + - Default value + - Description + - Applicable to + * - volume_backend_name + - ``-`` + - indicates the name of the backend + - All + * - volume_driver + - ``cinder.volume.drivers.lvm.LVMVolumeDriver`` + - indicates the loaded driver + - All + * - use_multipath_for_image_xfer + - ``False`` + - Chose attach/detach volumes in cinder using multipath for volume to image and image to volume transfers. + - All + * - san_thin_provision + - ``True`` + - Default volume type setting, True is thin lun, and False is thick lun. + - All + * - macrosan_force_unmap_itl + - ``True`` + - Force detach volume when deleting + - All + * - macrosan_log_timing + - ``True`` + - Calculate the time spent on the operation in the log file. + - All + * - macrosan_snapshot_resource_ratio + - ``1`` + - Set snapshot's resource ratio". + - All + * - iscsi_helper + - ``tgtadm`` + - iSCSI target user-land tool to use. + - iSCSI + * - iscsi_protocol + - ``iscsi`` + - Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm. + - iSCSI + * - macrosan_client_default + - ``None`` + - This is the default connection information for iscsi.This default configuration is used when no host related information is obtained. + - iSCSI + * - zoning_mode + - ``True`` + - FC Zoning mode configured. + - Fibre channel + * - macrosan_fc_use_sp_port_nr + - ``1`` + - The use_sp_port_nr parameter is the number of online FC ports used by the single-ended memory when the FC connection is established in the switch non-all-pass mode. The maximum is 4. + - Fibre channel + * - macrosan_fc_keep_mapped_ports + - ``True`` + - In the case of an FC connection, the configuration item associated with the port is maintained. + - Fibre channel + +.. list-table:: **Optional parameters** + :widths: 20 10 50 15 + :header-rows: 1 + + * - Parameter + - Default value + - Description + - Applicable to + * - macrosan_sdas_ipaddrs + - ``-`` + - The ip of Symmetrical Dual Active Storage + - All + * - macrosan_sdas_username + - ``-`` + - The username of Symmetrical Dual Active Storage + - All + * - macrosan_sdas_password + - ``-`` + - The password of Symmetrical Dual Active Storage + - All + * - macrosan_replication_ipaddrs + - ``-`` + - The ip of replication Storage.When you set ip, you must set + the macrosan_replication_destination_ports parameter. + - All + * - macrosan_replication_username + - ``-`` + - The username of replication Storage + - All + * - macrosan_replication_password + - ``-`` + - The password of replication Storage + - All + * - macrosan_replication_destination_ports + - ``-`` + - The ports of replication storage when using replication storage. + - All + * - macrosan_thin_lun_extent_size + - ``8`` + - Set the thin lun's extent size when the san_thin_provision is True. + - All + * - macrosan_thin_lun_low_watermark + - ``5`` + - Set the thin lun's low watermark when the san_thin_provision is True. + - All + * - macrosan_thin_lun_high_watermark + - ``20`` + - Set the thin lun's high watermark when the san_thin_provision is True. + - All + * - macrosan_client + - ``True`` + - Macrosan iscsi_clients list.You can configure multiple clients. + You can configure it in this format: + (hostname; client_name; sp1_iscsi_port; sp2_iscsi_port), + E.g: + (controller1; decive1; eth-1:0:0; eth-2:0:0),(controller2; decive2; eth-1:0:0/ eth-1:0:1; eth-2:0:0/ eth-2:0:1) + - All + +.. important:: + + Client_name has the following requirements: + [a-zA-Z0-9.-_:], the maximum number of characters is 31 + +The following are the MacroSAN driver specific options that may be set in +`cinder.conf`: + +.. config-table:: + :config-target: MacroSAN + + cinder.volume.drivers.macrosan.config + diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini index efedbfc8c71..3553c4de184 100644 --- a/doc/source/reference/support-matrix.ini +++ b/doc/source/reference/support-matrix.ini @@ -120,6 +120,9 @@ title=LINBIT DRBD/LINSTOR Driver (DRBD) [driver.lvm] title=Logical Volume Manager (LVM) Reference Driver (iSCSI) +[driver.macrosan] +title=MacroSAN Storage Driver (iSCSI, FC) + [driver.nec] title=NEC Storage M Series Driver (iSCSI, FC) @@ -231,6 +234,7 @@ driver.kaminario=complete driver.lenovo=complete driver.linbit_linstor=complete driver.lvm=complete +driver.macrosan=complete driver.nec=complete driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -294,6 +298,7 @@ driver.kaminario=complete driver.lenovo=complete driver.linbit_linstor=complete driver.lvm=complete +driver.macrosan=complete driver.nec=complete driver.netapp_ontap=missing driver.netapp_solidfire=complete @@ -357,6 +362,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=missing driver.nec=missing driver.netapp_ontap=missing driver.netapp_solidfire=missing @@ -423,6 +429,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=complete driver.nec=complete driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -488,6 +495,7 @@ driver.kaminario=complete driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=complete driver.nec=missing driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -554,6 +562,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=missing driver.nec=missing driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -619,6 +628,7 @@ driver.kaminario=complete driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=complete +driver.macrosan=complete driver.nec=complete driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -685,6 +695,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=complete driver.nec=complete driver.netapp_ontap=missing driver.netapp_solidfire=missing @@ -751,6 +762,7 @@ driver.kaminario=missing driver.lenovo=complete driver.linbit_linstor=missing driver.lvm=complete +driver.macrosan=missing driver.nec=missing driver.netapp_ontap=complete driver.netapp_solidfire=complete @@ -814,6 +826,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=complete +driver.macrosan=missing driver.nec=missing driver.netapp_ontap=missing driver.netapp_solidfire=complete @@ -881,6 +894,7 @@ driver.kaminario=missing driver.lenovo=missing driver.linbit_linstor=missing driver.lvm=missing +driver.macrosan=complete driver.nec=missing driver.netapp_ontap=missing driver.netapp_solidfire=missing diff --git a/releasenotes/notes/MacroSAN-volume-driver-6477e4ec7c38f49d.yaml b/releasenotes/notes/MacroSAN-volume-driver-6477e4ec7c38f49d.yaml new file mode 100644 index 00000000000..718e47de7c3 --- /dev/null +++ b/releasenotes/notes/MacroSAN-volume-driver-6477e4ec7c38f49d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added MacroSAN drivers that allows cinder to manage volumes in ISCSI and FC environment \ No newline at end of file