diff --git a/cinder/opts.py b/cinder/opts.py
index cf3b79057c3..a3475a47282 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -72,6 +72,12 @@ from cinder.volume import api as cinder_volume_api
from cinder.volume import driver as cinder_volume_driver
from cinder.volume.drivers.ceph import rbd_iscsi as \
cinder_volume_drivers_ceph_rbdiscsi
+from cinder.volume.drivers.datacore import driver as \
+ cinder_volume_drivers_datacore_driver
+from cinder.volume.drivers.datacore import fc as \
+ cinder_volume_drivers_datacore_fc
+from cinder.volume.drivers.datacore import iscsi as \
+ cinder_volume_drivers_datacore_iscsi
from cinder.volume.drivers.datera import datera_iscsi as \
cinder_volume_drivers_datera_dateraiscsi
from cinder.volume.drivers.dell_emc.powerflex import driver as \
@@ -319,6 +325,9 @@ def list_opts():
cinder_volume_driver.image_opts,
cinder_volume_driver.fqdn_opts,
cinder_volume_drivers_ceph_rbdiscsi.RBD_ISCSI_OPTS,
+ cinder_volume_drivers_datacore_driver.datacore_opts,
+ cinder_volume_drivers_datacore_fc.datacore_fc_opts,
+ cinder_volume_drivers_datacore_iscsi.datacore_iscsi_opts,
cinder_volume_drivers_dell_emc_powerflex_driver.
powerflex_opts,
cinder_volume_drivers_dell_emc_powermax_common.powermax_opts,
diff --git a/cinder/tests/unit/volume/drivers/datacore/__init__.py b/cinder/tests/unit/volume/drivers/datacore/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py
new file mode 100644
index 00000000000..36be7403498
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py
@@ -0,0 +1,732 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for classes that are used to invoke DataCore SANsymphony API."""
+
+from unittest import mock
+
+from oslo_utils import units
+import suds
+from suds.sax import parser
+from suds import wsdl
+
+from cinder.tests.unit import test
+from cinder.volume.drivers.datacore import api
+from cinder.volume.drivers.datacore import exception
+
+
+class FakeWebSocketException(Exception):
+ pass
+
+
+class DataCoreClientTestCase(test.TestCase):
+ """Tests for the DataCore SANsymphony client."""
+
+ def setUp(self):
+ super(DataCoreClientTestCase, self).setUp()
+ self.mock_storage_services = mock.MagicMock()
+ self.mock_executive_service = mock.MagicMock()
+
+ self.mock_suds_client = mock.MagicMock()
+ self.mock_object(
+ api.suds_client, 'Client', return_value=self.mock_suds_client)
+
+ self.mock_channel = mock.MagicMock()
+ mock_websocket = self.mock_object(api, 'websocket')
+ mock_websocket.WebSocketException = FakeWebSocketException
+ mock_websocket.create_connection.return_value = self.mock_channel
+
+ setattr(self.mock_suds_client.service.__getitem__,
+ 'side_effect',
+ self._get_service_side_effect)
+
+ self.client = api.DataCoreClient('hostname', 'username', 'password', 1)
+ self.client.API_RETRY_INTERVAL = 0
+
+ # Make sure failure logging does not get emitted during testing
+ self.mock_object(api, 'LOG')
+
+ def _get_service_side_effect(self, service_name):
+ self.assertIn(service_name,
+ [
+ api.DataCoreClient.STORAGE_SERVICES_BINDING,
+ api.DataCoreClient.EXECUTIVE_SERVICE_BINDING
+ ])
+
+ if service_name is api.DataCoreClient.STORAGE_SERVICES_BINDING:
+ return self.mock_storage_services
+ else:
+ return self.mock_executive_service
+
+ def _assert_storage_services_method_called(self, method_name):
+ return self.mock_storage_services.__getitem__.assert_called_with(
+ method_name)
+
+ @property
+ def mock_storage_service_context(self):
+ return self.mock_storage_services.__getitem__()()
+
+ @property
+ def mock_executive_service_context(self):
+ return self.mock_executive_service.__getitem__()()
+
+ def test_process_request_failed(self):
+ def fail_with_socket_error():
+ raise FakeWebSocketException()
+
+ def fail_with_web_fault(message):
+ fault = mock.Mock()
+ fault.faultstring = "General error."
+ document = mock.Mock()
+ raise suds.WebFault(fault, document)
+
+ self.mock_channel.recv.side_effect = fail_with_socket_error
+ self.assertRaises(exception.DataCoreConnectionException,
+ self.client.get_server_groups)
+ self.mock_channel.recv.side_effect = None
+
+ (self.mock_storage_service_context.process_reply
+ .side_effect) = fail_with_web_fault
+ self.assertRaises(exception.DataCoreFaultException,
+ self.client.get_server_groups)
+
+ def test_channel_closing_failed(self):
+ def fail_with_socket_error():
+ raise FakeWebSocketException()
+
+ def fail_with_web_fault(message):
+ fault = mock.Mock()
+ fault.faultstring = "General error."
+ document = mock.Mock()
+ raise suds.WebFault(fault, document)
+
+ self.mock_channel.close.side_effect = fail_with_socket_error
+ (self.mock_storage_service_context.process_reply
+ .side_effect) = fail_with_web_fault
+ self.assertRaises(exception.DataCoreFaultException,
+ self.client.get_server_groups)
+
+ def test_update_api_endpoints(self):
+ def fail_with_socket_error():
+ try:
+ raise FakeWebSocketException()
+ finally:
+ self.mock_channel.recv.side_effect = None
+
+ self.mock_channel.recv.side_effect = fail_with_socket_error
+
+ mock_executive_endpoints = [{
+ 'network_address': '127.0.0.1:3794',
+ 'http_endpoint': 'http://127.0.0.1:3794/',
+ 'ws_endpoint': 'ws://127.0.0.1:3794/',
+ }]
+ self.mock_object(self.client,
+ '_executive_service_endpoints',
+ mock_executive_endpoints)
+
+ mock_storage_endpoint = {
+ 'network_address': '127.0.0.1:3794',
+ 'http_endpoint': 'http://127.0.0.1:3794/',
+ 'ws_endpoint': 'ws://127.0.0.1:3794/',
+ }
+ self.mock_object(self.client,
+ '_storage_services_endpoint',
+ mock_storage_endpoint)
+
+ node = mock.Mock()
+ node.HostAddress = '127.0.0.1:3794'
+ reply = mock.MagicMock()
+ reply.RegionNodeData = [node]
+ self.mock_storage_service_context.process_reply.return_value = reply
+
+ result = self.client.get_server_groups()
+ self.assertIsNotNone(result)
+
+ def test_update_api_endpoints_failed(self):
+ def fail_with_socket_error():
+ try:
+ raise FakeWebSocketException()
+ finally:
+ self.mock_channel.recv.side_effect = None
+
+ self.mock_channel.recv.side_effect = fail_with_socket_error
+
+ mock_executive_endpoints = [{
+ 'network_address': '127.0.0.1:3794',
+ 'http_endpoint': 'http://127.0.0.1:3794/',
+ 'ws_endpoint': 'ws://127.0.0.1:3794/',
+ }]
+ self.mock_object(self.client,
+ '_executive_service_endpoints',
+ mock_executive_endpoints)
+
+ reply = mock.MagicMock()
+ reply.RegionNodeData = []
+ self.mock_storage_service_context.process_reply.return_value = reply
+
+ self.mock_executive_service_context.process_reply.return_value = None
+
+ result = self.client.get_server_groups()
+ self.assertIsNotNone(result)
+
+ def test_get_server_groups(self):
+ self.client.get_server_groups()
+ self._assert_storage_services_method_called('GetServerGroups')
+
+ def test_get_servers(self):
+ self.client.get_servers()
+ self._assert_storage_services_method_called('GetServers')
+
+ def test_get_disk_pools(self):
+ self.client.get_disk_pools()
+ self._assert_storage_services_method_called('GetDiskPools')
+
+ def test_get_logical_disks(self):
+ self.client.get_logical_disks()
+ self._assert_storage_services_method_called('GetLogicalDisks')
+
+ def test_create_pool_logical_disk(self):
+ pool_id = 'pool_id'
+ pool_volume_type = 'Striped'
+ size = 1 * units.Gi
+ min_quota = 1
+ max_quota = 1 * units.Gi
+ self.client.create_pool_logical_disk(
+ pool_id, pool_volume_type, size, min_quota, max_quota)
+ self._assert_storage_services_method_called('CreatePoolLogicalDisk')
+
+ def test_delete_logical_disk(self):
+ logical_disk_id = 'disk_id'
+ self.client.delete_logical_disk(logical_disk_id)
+ self._assert_storage_services_method_called('DeleteLogicalDisk')
+
+ def test_get_logical_disk_chunk_allocation_map(self):
+ logical_disk_id = 'disk_id'
+ self.client.get_logical_disk_chunk_allocation_map(logical_disk_id)
+ self._assert_storage_services_method_called(
+ 'GetLogicalDiskChunkAllocationMap')
+
+ def test_get_next_virtual_disk_alias(self):
+ base_alias = 'volume'
+ self.client.get_next_virtual_disk_alias(base_alias)
+ self._assert_storage_services_method_called('GetNextVirtualDiskAlias')
+
+ def test_get_virtual_disks(self):
+ self.client.get_virtual_disks()
+ self._assert_storage_services_method_called('GetVirtualDisks')
+
+ def test_build_virtual_disk_data(self):
+ disk_alias = 'alias'
+ disk_type = 'Mirrored'
+ size = 1 * units.Gi
+ description = 'description'
+ storage_profile_id = 'storage_profile_id'
+
+ vd_data = self.client.build_virtual_disk_data(
+ disk_alias, disk_type, size, description, storage_profile_id)
+
+ self.assertEqual(disk_alias, vd_data.Alias)
+ self.assertEqual(size, vd_data.Size.Value)
+ self.assertEqual(description, vd_data.Description)
+ self.assertEqual(storage_profile_id, vd_data.StorageProfileId)
+ self.assertTrue(hasattr(vd_data, 'Type'))
+ self.assertTrue(hasattr(vd_data, 'SubType'))
+ self.assertTrue(hasattr(vd_data, 'DiskStatus'))
+ self.assertTrue(hasattr(vd_data, 'RecoveryPriority'))
+
+ def test_create_virtual_disk_ex2(self):
+ disk_alias = 'alias'
+ disk_type = 'Mirrored'
+ size = 1 * units.Gi
+ description = 'description'
+ storage_profile_id = 'storage_profile_id'
+ first_disk_id = 'disk_id'
+ second_disk_id = 'disk_id'
+ add_redundancy = True
+ vd_data = self.client.build_virtual_disk_data(
+ disk_alias, disk_type, size, description, storage_profile_id)
+ self.client.create_virtual_disk_ex2(
+ vd_data, first_disk_id, second_disk_id, add_redundancy)
+ self._assert_storage_services_method_called('CreateVirtualDiskEx2')
+
+ def test_set_virtual_disk_size(self):
+ disk_id = 'disk_id'
+ size = 1 * units.Gi
+ self.client.set_virtual_disk_size(disk_id, size)
+ self._assert_storage_services_method_called('SetVirtualDiskSize')
+
+ def test_delete_virtual_disk(self):
+ virtual_disk_id = 'disk_id'
+ delete_logical_disks = True
+ self.client.delete_virtual_disk(virtual_disk_id, delete_logical_disks)
+ self._assert_storage_services_method_called('DeleteVirtualDisk')
+
+ def test_serve_virtual_disks_to_host(self):
+ host_id = 'host_id'
+ disks = ['disk_id']
+ self.client.serve_virtual_disks_to_host(host_id, disks)
+ self._assert_storage_services_method_called('ServeVirtualDisksToHost')
+
+ def test_unserve_virtual_disks_from_host(self):
+ host_id = 'host_id'
+ disks = ['disk_id']
+ self.client.unserve_virtual_disks_from_host(host_id, disks)
+ self._assert_storage_services_method_called(
+ 'UnserveVirtualDisksFromHost')
+
+ def test_unserve_virtual_disks_from_port(self):
+ port_id = 'port_id'
+ disks = ['disk_id']
+ self.client.unserve_virtual_disks_from_port(port_id, disks)
+ self._assert_storage_services_method_called(
+ 'UnserveVirtualDisksFromPort')
+
+ def test_bind_logical_disk(self):
+ disk_id = 'disk_id'
+ logical_disk_id = 'disk_id'
+ role = 'Second'
+ create_mirror_mappings = True
+ create_client_mappings = False
+ add_redundancy = True
+ self.client.bind_logical_disk(
+ disk_id, logical_disk_id, role, create_mirror_mappings,
+ create_client_mappings, add_redundancy)
+ self._assert_storage_services_method_called(
+ 'BindLogicalDisk')
+
+ def test_get_snapshots(self):
+ self.client.get_snapshots()
+ self._assert_storage_services_method_called('GetSnapshots')
+
+ def test_create_snapshot(self):
+ disk_id = 'disk_id'
+ name = 'name'
+ description = 'description'
+ pool_id = 'pool_id'
+ snapshot_type = 'Full'
+ duplicate_disk_id = False
+ storage_profile_id = 'profile_id'
+ self.client.create_snapshot(
+ disk_id, name, description, pool_id, snapshot_type,
+ duplicate_disk_id, storage_profile_id)
+ self._assert_storage_services_method_called('CreateSnapshot')
+
+ def test_delete_snapshot(self):
+ snapshot_id = "snapshot_id"
+ self.client.delete_snapshot(snapshot_id)
+ self._assert_storage_services_method_called('DeleteSnapshot')
+
+ def test_get_storage_profiles(self):
+ self.client.get_storage_profiles()
+ self._assert_storage_services_method_called('GetStorageProfiles')
+
+ def test_designate_map_store(self):
+ pool_id = 'pool_id'
+ self.client.designate_map_store(pool_id)
+ self._assert_storage_services_method_called('DesignateMapStore')
+
+ def test_get_performance_by_type(self):
+ types = ['DiskPoolPerformance']
+ self.client.get_performance_by_type(types)
+ self._assert_storage_services_method_called('GetPerformanceByType')
+
+ def test_get_ports(self):
+ self.client.get_ports()
+ self._assert_storage_services_method_called('GetPorts')
+
+ def test_build_scsi_port_data(self):
+ host_id = 'host_id'
+ port_name = 'port_name'
+ port_mode = 'Initiator'
+ port_type = 'iSCSI'
+
+ port_data = self.client.build_scsi_port_data(
+ host_id, port_name, port_mode, port_type)
+
+ self.assertEqual(host_id, port_data.HostId)
+ self.assertEqual(port_name, port_data.PortName)
+ self.assertTrue(hasattr(port_data, 'PortMode'))
+ self.assertTrue(hasattr(port_data, 'PortType'))
+
+ def test_register_port(self):
+ port_data = self.client.build_scsi_port_data(
+ 'host_id', 'port_name', 'initiator', 'iSCSI')
+ self.client.register_port(port_data)
+ self._assert_storage_services_method_called('RegisterPort')
+
+ def test_assign_port(self):
+ client_id = 'client_id'
+ port_id = 'port_id'
+ self.client.assign_port(client_id, port_id)
+ self._assert_storage_services_method_called('AssignPort')
+
+ def test_set_server_port_properties(self):
+ port_id = 'port_id'
+ port_properties = mock.MagicMock()
+ self.client.set_server_port_properties(port_id, port_properties)
+ self._assert_storage_services_method_called('SetServerPortProperties')
+
+ def test_build_access_token(self):
+ initiator_node_name = 'initiator'
+ initiator_username = 'initiator_username'
+ initiator_password = 'initiator_password'
+ mutual_authentication = True
+ target_username = 'target_username'
+ target_password = 'target_password'
+
+ access_token = self.client.build_access_token(
+ initiator_node_name, initiator_username, initiator_password,
+ mutual_authentication, target_username, target_password)
+
+ self.assertEqual(initiator_node_name, access_token.InitiatorNodeName)
+ self.assertEqual(initiator_username, access_token.InitiatorUsername)
+ self.assertEqual(initiator_password, access_token.InitiatorPassword)
+ self.assertEqual(mutual_authentication,
+ access_token.MutualAuthentication)
+ self.assertEqual(target_username, access_token.TargetUsername)
+ self.assertEqual(target_password, access_token.TargetPassword)
+
+ def test_set_access_token(self):
+ port_id = 'port_id'
+ access_token = self.client.build_access_token(
+ 'initiator_name', None, None, False, 'initiator_name', 'password')
+ self.client.set_access_token(port_id, access_token)
+ self._assert_storage_services_method_called('SetAccessToken')
+
+ def test_get_clients(self):
+ self.client.get_clients()
+ self._assert_storage_services_method_called('GetClients')
+
+ def test_register_client(self):
+ host_name = 'name'
+ description = 'description'
+ machine_type = 'Other'
+ mode = 'PreferredServer'
+ preferred_server_ids = None
+ self.client.register_client(
+ host_name, description, machine_type, mode, preferred_server_ids)
+ self._assert_storage_services_method_called('RegisterClient')
+
+ def test_set_client_capabilities(self):
+ client_id = 'client_id'
+ mpio = True
+ alua = True
+ self.client.set_client_capabilities(client_id, mpio, alua)
+ self._assert_storage_services_method_called('SetClientCapabilities')
+
+ def test_get_target_domains(self):
+ self.client.get_target_domains()
+ self._assert_storage_services_method_called('GetTargetDomains')
+
+ def test_create_target_domain(self):
+ initiator_host_id = 'host_id'
+ target_host_id = 'host_id'
+ self.client.create_target_domain(initiator_host_id, target_host_id)
+ self._assert_storage_services_method_called('CreateTargetDomain')
+
+ def test_delete_target_domain(self):
+ domain_id = 'domain_id'
+ self.client.delete_target_domain(domain_id)
+ self._assert_storage_services_method_called('DeleteTargetDomain')
+
+ def test_get_target_devices(self):
+ self.client.get_target_devices()
+ self._assert_storage_services_method_called('GetTargetDevices')
+
+ def test_build_scsi_port_nexus_data(self):
+ initiator_id = 'initiator_id'
+ target_id = 'target_id'
+
+ nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
+
+ self.assertEqual(initiator_id, nexus.InitiatorPortId)
+ self.assertEqual(target_id, nexus.TargetPortId)
+
+ def test_create_target_device(self):
+ domain_id = 'domain_id'
+ nexus = self.client.build_scsi_port_nexus_data('initiator_id',
+ 'target_id')
+ self.client.create_target_device(domain_id, nexus)
+ self._assert_storage_services_method_called('CreateTargetDevice')
+
+ def test_delete_target_device(self):
+ device_id = 'device_id'
+ self.client.delete_target_device(device_id)
+ self._assert_storage_services_method_called('DeleteTargetDevice')
+
+ def test_get_next_free_lun(self):
+ device_id = 'device_id'
+ self.client.get_next_free_lun(device_id)
+ self._assert_storage_services_method_called('GetNextFreeLun')
+
+ def test_get_logical_units(self):
+ self.client.get_logical_units()
+ self._assert_storage_services_method_called('GetLogicalUnits')
+
+ def test_map_logical_disk(self):
+ disk_id = 'disk_id'
+ lun = 0
+ host_id = 'host_id'
+ mapping_type = 'Client'
+ initiator_id = 'initiator_id'
+ target_id = 'target_id'
+ nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
+ self.client.map_logical_disk(
+ disk_id, nexus, lun, host_id, mapping_type)
+ self._assert_storage_services_method_called('MapLogicalDisk')
+
+ def test_unmap_logical_disk(self):
+ logical_disk_id = 'disk_id'
+ nexus = self.client.build_scsi_port_nexus_data('initiator_id',
+ 'target_id')
+ self.client.unmap_logical_disk(logical_disk_id, nexus)
+ self._assert_storage_services_method_called('UnmapLogicalDisk')
+
+
+FAKE_WSDL_DOCUMENT = """
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ws://mns-vsp-001:3794/IExecutiveServiceEx
+
+
+
+
+"""
+
+
+class FaultDefinitionsFilterTestCase(test.TestCase):
+ """Tests for the plugin to process the DataCore API WSDL document."""
+
+ @staticmethod
+ def _binding_operation_has_fault(document, operation_name):
+ for binding in document.getChildren('binding', wsdl.wsdlns):
+ for operation in binding.getChildren('operation', wsdl.wsdlns):
+ if operation.get('name') == operation_name:
+ fault = operation.getChildren('fault', wsdl.wsdlns)
+ if fault:
+ return True
+ return False
+
+ @staticmethod
+ def _port_type_operation_has_fault(document, operation_name):
+ for port_type in document.getChildren('portType', wsdl.wsdlns):
+ for operation in port_type.getChildren('operation', wsdl.wsdlns):
+ if operation.get('name') == operation_name:
+ fault = operation.getChildren('fault', wsdl.wsdlns)
+ if fault:
+ return True
+ return False
+
+ def _operation_has_fault(self, document, operation_name):
+ _binding_has_fault = self._binding_operation_has_fault(
+ document, operation_name)
+ _port_type_has_fault = self._port_type_operation_has_fault(
+ document, operation_name)
+ self.assertEqual(_binding_has_fault, _port_type_has_fault)
+ return _binding_has_fault
+
+ def test_parsed(self):
+ context = mock.Mock()
+ sax = parser.Parser()
+ wsdl_document = FAKE_WSDL_DOCUMENT
+ if isinstance(wsdl_document, str):
+ wsdl_document = wsdl_document.encode('utf-8')
+ context.document = sax.parse(string=wsdl_document).root()
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'StartExecutive'))
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'StopExecutive'))
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'ExecutiveStarted'))
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'ExecutiveStopped'))
+ plugin = api.FaultDefinitionsFilter()
+ plugin.parsed(context)
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'StartExecutive'))
+ self.assertTrue(self._operation_has_fault(context.document,
+ 'StopExecutive'))
+ self.assertFalse(self._operation_has_fault(context.document,
+ 'ExecutiveStarted'))
+ self.assertFalse(self._operation_has_fault(context.document,
+ 'ExecutiveStopped'))
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py
new file mode 100644
index 00000000000..095f5a512f7
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py
@@ -0,0 +1,773 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the base Driver for DataCore SANsymphony storage array."""
+
+from __future__ import division
+
+import abc
+import math
+from unittest import mock
+
+from oslo_utils import units
+
+from cinder import context
+from cinder import exception as cinder_exception
+from cinder.tests.unit import fake_constants
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
+from cinder.tests.unit import utils as testutils
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.datacore import driver as datacore_driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.san import san
+
+
+SERVER_GROUPS = [
+ mock.Mock(Id='server_group_id1',
+ OurGroup=True),
+ mock.Mock(Id='server_group_id2',
+ OurGroup=False),
+]
+
+SERVERS = [
+ mock.Mock(Id='server_id1',
+ State='Online'),
+ mock.Mock(Id='server_id2',
+ State='Online'),
+]
+
+DISK_POOLS = [
+ mock.Mock(Id='disk_pool_id1',
+ Caption='disk_pool1',
+ ServerId='server_id1',
+ PoolStatus='Running'),
+ mock.Mock(Id='disk_pool_id2',
+ Caption='disk_pool2',
+ ServerId='server_id2',
+ PoolStatus='Running'),
+ mock.Mock(Id='disk_pool_id3',
+ Caption='disk_pool3',
+ ServerId='server_id1',
+ PoolStatus='Offline'),
+ mock.Mock(Id='disk_pool_id4',
+ Caption='disk_pool4',
+ ServerId='server_id2',
+ PoolStatus='Unknown'),
+]
+
+DISK_POOL_PERFORMANCE = [
+ mock.Mock(ObjectId='disk_pool_id1',
+ PerformanceData=mock.Mock(BytesTotal=5 * units.Gi,
+ BytesAllocated=2 * units.Gi,
+ BytesAvailable=3 * units.Gi,
+ BytesReserved=0)),
+ mock.Mock(ObjectId='disk_pool_id2',
+ PerformanceData=mock.Mock(BytesTotal=5 * units.Gi,
+ BytesAllocated=3 * units.Gi,
+ BytesAvailable=1 * units.Gi,
+ BytesReserved=1 * units.Gi)),
+ mock.Mock(ObjectId='disk_pool_id3',
+ PerformanceData=None),
+ mock.Mock(ObjectId='disk_pool_id4',
+ PerformanceData=None),
+]
+
+STORAGE_PROFILES = [
+ mock.Mock(Id='storage_profile_id1',
+ Caption='storage_profile1'),
+ mock.Mock(Id='storage_profile_id2',
+ Caption='storage_profile2'),
+ mock.Mock(Id='storage_profile_id3',
+ Caption='storage_profile3'),
+]
+
+VIRTUAL_DISKS = [
+ mock.Mock(Id='virtual_disk_id1',
+ DiskStatus='Online',
+ IsServed=False,
+ Alias='virtual_disk_id1',
+ Size=mock.Mock(Value=1 * units.Gi),
+ FirstHostId='server_id1'),
+ mock.Mock(Id='virtual_disk_id2',
+ DiskStatus='Failed',
+ IsServed=False,
+ Alias='virtual_disk_id2',
+ Size=mock.Mock(Value=1 * units.Gi),
+ FirstHostId='server_id2'),
+ mock.Mock(Id='virtual_disk_id3',
+ DiskStatus='Online',
+ IsServed=True,
+ Alias='virtual_disk_id3',
+ Size=mock.Mock(Value=1 * units.Gi),
+ FirstHostId='server_id1',
+ SecondHostId='server_id2'),
+ mock.Mock(Id='virtual_disk_id4',
+ DiskStatus='Failed',
+ IsServed=False,
+ Alias='virtual_disk_id4',
+ Size=mock.Mock(Value=1 * units.Gi),
+ FirstHostId='server_id1',
+ SecondHostId='server_id2'),
+]
+
+
+EXT_VIRTUAL_DISKS = [
+ mock.Mock(Id='virtual_disk_id1',
+ DiskStatus='Online',
+ IsServed=False,
+ Alias='virtual_disk_id1',
+ Size=mock.Mock(Value=2 * units.Gi),
+ FirstHostId='server_id1'),
+]
+
+
+VIRTUAL_DISK_SNAPSHOTS = [
+ mock.Mock(Id='snapshot_id1',
+ State='Migrated',
+ Failure='NoFailure',
+ DestinationLogicalDiskId='logical_disk_id1'),
+ mock.Mock(Id='snapshot_id2',
+ State='Failed',
+ Failure='NotAccessible',
+ DestinationLogicalDiskId='logical_disk_id2'),
+ mock.Mock(Id='snapshot_id3',
+ State='Migrated',
+ Failure='NoFailure',
+ DestinationLogicalDiskId='logical_disk_id2'),
+]
+
+LOGICAL_DISKS = [
+ mock.Mock(Id='logical_disk_id1',
+ VirtualDiskId='virtual_disk_id1',
+ ServerHostId='server_id1',
+ PoolId='disk_pool_id1',
+ Size=mock.Mock(Value=1 * units.Gi)),
+ mock.Mock(Id='logical_disk_id2',
+ VirtualDiskId='virtual_disk_id2',
+ ServerHostId='server_id1',
+ PoolId='disk_pool_id3',
+ Size=mock.Mock(Value=1 * units.Gi)),
+ mock.Mock(Id='logical_disk_id3',
+ VirtualDiskId='virtual_disk_id3',
+ ServerHostId='server_id1',
+ PoolId='disk_pool_id1',
+ Size=mock.Mock(Value=1 * units.Gi)),
+ mock.Mock(Id='logical_disk_id4',
+ VirtualDiskId='virtual_disk_id3',
+ ServerHostId='server_id2',
+ PoolId='disk_pool_id2',
+ Size=mock.Mock(Value=1 * units.Gi)),
+ mock.Mock(Id='logical_disk_id5',
+ VirtualDiskId='virtual_disk_id4',
+ ServerHostId='server_id1',
+ PoolId='disk_pool_id3',
+ Size=mock.Mock(Value=1 * units.Gi)),
+ mock.Mock(Id='logical_disk_id6',
+ VirtualDiskId='virtual_disk_id4',
+ ServerHostId='server_id2',
+ PoolId='disk_pool_id4',
+ Size=mock.Mock(Value=1 * units.Gi)),
+]
+
+LOGICAL_UNITS = [
+ mock.Mock(VirtualTargetDeviceId='target_device_id1',
+ LogicalDiskId='logical_disk_id3'),
+ mock.Mock(VirtualTargetDeviceId='target_device_id2',
+ LogicalDiskId='logical_disk_id4'),
+]
+
+TARGET_DEVICES = [
+ mock.Mock(Id='target_device_id1',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id2',
+ InitiatorPortId='initiator_port_id1'),
+]
+
+CLIENTS = [
+ mock.Mock(Id='client_id1',
+ HostName='client_host_name1'),
+ mock.Mock(Id='client_id2',
+ HostName='client_host_name2'),
+]
+
+
+class DataCoreVolumeDriverTestCase(object):
+ """Tests for the base Driver for DataCore SANsymphony storage array."""
+
+ def setUp(self):
+ super(DataCoreVolumeDriverTestCase, self).setUp()
+ self.mock_client = mock.Mock()
+ self.mock_client.get_servers.return_value = SERVERS
+ self.mock_client.get_disk_pools.return_value = DISK_POOLS
+ (self.mock_client.get_performance_by_type
+ .return_value) = DISK_POOL_PERFORMANCE
+ self.mock_client.get_virtual_disks.return_value = VIRTUAL_DISKS
+ self.mock_client.get_storage_profiles.return_value = STORAGE_PROFILES
+ self.mock_client.get_snapshots.return_value = VIRTUAL_DISK_SNAPSHOTS
+ self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS
+ self.mock_client.get_clients.return_value = CLIENTS
+ self.mock_client.get_server_groups.return_value = SERVER_GROUPS
+ self.mock_object(datacore_driver.api,
+ 'DataCoreClient',
+ return_value=self.mock_client)
+ self.context = context.get_admin_context()
+
+ self.volume_a = fake_volume.fake_volume_obj(
+ self.context,
+ **{'name': u'volume_1',
+ 'volume_type_id': None,
+ 'id': fake_constants.VOLUME_ID,
+ 'size': 1})
+ self.volume_ext = fake_volume.fake_volume_obj(
+ self.context,
+ **{'name': u'volume_1',
+ 'volume_type_id': None,
+ 'id': fake_constants.VOLUME2_ID,
+ 'size': 2})
+ self.snapshot_a = fake_snapshot.fake_snapshot_obj(
+ self.context,
+ **{'name': u'snapshot_1',
+ 'id': fake_constants.SNAPSHOT_ID,
+ 'size': 1})
+
+ @staticmethod
+ @abc.abstractmethod
+ def init_driver(config):
+ raise NotImplementedError()
+
+ @staticmethod
+ def create_configuration():
+ config = conf.Configuration(None)
+ config.append_config_values(san.san_opts)
+ config.append_config_values(datacore_driver.datacore_opts)
+ return config
+
+ def setup_default_configuration(self):
+ config = self.create_configuration()
+ config.volume_backend_name = 'DataCore'
+ config.san_ip = '127.0.0.1'
+ config.san_login = 'dcsadmin'
+ config.san_password = 'password'
+ config.datacore_api_timeout = 0
+ config.datacore_disk_failed_delay = 0
+ return config
+
+ def test_do_setup(self):
+ config = self.setup_default_configuration()
+ self.init_driver(config)
+
+ def test_do_setup_failed(self):
+ config = self.setup_default_configuration()
+ config.san_ip = None
+ self.assertRaises(cinder_exception.InvalidInput,
+ self.init_driver,
+ config)
+
+ config = self.setup_default_configuration()
+ config.san_login = None
+ self.assertRaises(cinder_exception.InvalidInput,
+ self.init_driver,
+ config)
+
+ config = self.setup_default_configuration()
+ config.san_password = None
+ self.assertRaises(cinder_exception.InvalidInput,
+ self.init_driver,
+ config)
+
+ def test_get_volume_stats(self):
+ aggregation = [(getattr(perf.PerformanceData, 'BytesTotal', 0),
+ getattr(perf.PerformanceData, 'BytesAvailable', 0),
+ getattr(perf.PerformanceData, 'BytesReserved', 0),)
+ for perf in DISK_POOL_PERFORMANCE]
+
+ total, available, reserved = map(sum, zip(*aggregation))
+ free = (available + reserved) / units.Gi
+ reserved = 100.0 * reserved / total
+ reserved = math.ceil(reserved)
+ total /= units.Gi
+ provisioned = sum(disk.Size.Value for disk in LOGICAL_DISKS)
+ provisioned /= units.Gi
+ ratio = 2.0
+
+ config = self.setup_default_configuration()
+ config.max_over_subscription_ratio = ratio
+ driver = self.init_driver(config)
+ expected_volume_stats = {
+ 'vendor_name': 'DataCore',
+ 'QoS_support': False,
+ 'total_capacity_gb': total,
+ 'free_capacity_gb': free,
+ 'provisioned_capacity_gb': provisioned,
+ 'reserved_percentage': reserved,
+ 'max_over_subscription_ratio': ratio,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'online_extend_support': False,
+ 'volume_backend_name': driver.get_volume_backend_name(),
+ 'driver_version': driver.get_version(),
+ 'storage_protocol': driver.STORAGE_PROTOCOL,
+ }
+ volume_stats = driver.get_volume_stats(refresh=True)
+ self.assertDictEqual(expected_volume_stats, volume_stats)
+ volume_stats_cached = driver.get_volume_stats(refresh=False)
+ self.assertEqual(volume_stats, volume_stats_cached)
+
+ def test_create_volume(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ def test_create_volume_mirrored_disk_type_specified(self):
+ virtual_disk = VIRTUAL_DISKS[2]
+ self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'mirrored'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume_type = {
+ 'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'mirrored'}
+ }
+ get_volume_type = self.mock_object(datacore_driver.volume_types,
+ 'get_volume_type')
+ get_volume_type.return_value = volume_type
+ volume = self.volume_a
+ volume['volume_type_id'] = 'volume_type_id'
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ def test_create_volume_profile_specified(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+ config = self.setup_default_configuration()
+ config.datacore_storage_profile = 'storage_profile1'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ volume_type = {
+ 'extra_specs': {
+ driver.DATACORE_STORAGE_PROFILE_KEY: 'storage_profile2'
+ }
+ }
+ get_volume_type = self.mock_object(datacore_driver.volume_types,
+ 'get_volume_type')
+ get_volume_type.return_value = volume_type
+ volume = self.volume_a
+ volume['volume_type_id'] = 'volume_type_id'
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ def test_create_volume_pool_specified(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_pools = ['disk_pool1']
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ volume_type = {
+ 'extra_specs': {driver.DATACORE_DISK_POOLS_KEY: 'disk_pool2'}
+ }
+ get_volume_type = self.mock_object(datacore_driver.volume_types,
+ 'get_volume_type')
+ get_volume_type.return_value = volume_type
+ volume = self.volume_a
+ volume['volume_type_id'] = 'volume_type_id'
+ result = driver.create_volume(volume)
+ self.assertIn('provider_location', result)
+ self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+ def test_create_volume_failed(self):
+ def fail_with_datacore_fault(*args):
+ raise datacore_exception.DataCoreFaultException(
+ reason="General error.")
+
+ (self.mock_client.create_virtual_disk_ex2
+ .side_effect) = fail_with_datacore_fault
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ self.assertRaises(datacore_exception.DataCoreFaultException,
+ driver.create_volume,
+ volume)
+
+ def test_create_volume_unknown_disk_type_specified(self):
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'unknown'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume_type = {
+ 'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'unknown'}
+ }
+ get_volume_type = self.mock_object(datacore_driver.volume_types,
+ 'get_volume_type')
+ get_volume_type.return_value = volume_type
+ volume = self.volume_a
+ volume['volume_type_id'] = 'volume_type_id'
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ def test_create_volume_unknown_profile_specified(self):
+ config = self.setup_default_configuration()
+ config.datacore_storage_profile = 'unknown'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume_type = {
+ 'extra_specs': {driver.DATACORE_STORAGE_PROFILE_KEY: 'unknown'}
+ }
+ get_volume_type = self.mock_object(datacore_driver.volume_types,
+ 'get_volume_type')
+ get_volume_type.return_value = volume_type
+ volume = self.volume_a
+ volume['volume_type_id'] = 'volume_type_id'
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ def test_create_volume_on_failed_pool(self):
+ config = self.setup_default_configuration()
+ config.datacore_disk_pools = ['disk_pool3', 'disk_pool4']
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_volume_await_online_timed_out(self):
+ virtual_disk = VIRTUAL_DISKS[1]
+ self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+ config = self.setup_default_configuration()
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume,
+ volume)
+
+ def test_extend_volume(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ self.assertIsNone(driver.extend_volume(volume, 2147483648))
+
+ def test_extend_volume_failed_not_found(self):
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = 'wrong_virtual_disk_id'
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.extend_volume,
+ volume,
+ 2147483648)
+
+ def test_delete_volume(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ driver.delete_volume(volume)
+
+ def test_delete_volume_assigned(self):
+ self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS
+ self.mock_client.get_logical_units.return_value = LOGICAL_UNITS
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ virtual_disk = VIRTUAL_DISKS[2]
+ volume.provider_location = virtual_disk.Id
+ driver.delete_volume(volume)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_snapshot(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ snapshot = self.snapshot_a
+ snapshot.volume = volume
+ result = driver.create_snapshot(snapshot)
+ self.assertIn('provider_location', result)
+
+ def test_create_snapshot_on_failed_pool(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ config = self.setup_default_configuration()
+ config.datacore_disk_pools = ['disk_pool3', 'disk_pool4']
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ snapshot = self.snapshot_a
+ snapshot.volume = volume
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_snapshot,
+ snapshot)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_snapshot_await_migrated_timed_out(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[1]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ snapshot = self.snapshot_a
+ snapshot.volume = volume
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_snapshot,
+ snapshot)
+
+ def test_delete_snapshot(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ snapshot = self.snapshot_a
+ snapshot.provider_location = virtual_disk.Id
+ driver.delete_snapshot(snapshot)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_volume_from_snapshot(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ snapshot = self.snapshot_a
+ snapshot.provider_location = virtual_disk.Id
+ result = driver.create_volume_from_snapshot(volume, snapshot)
+ self.assertIn('provider_location', result)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_volume_from_snapshot_mirrored_disk_type_specified(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'mirrored'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ snapshot = self.snapshot_a
+ snapshot.provider_location = virtual_disk.Id
+ result = driver.create_volume_from_snapshot(volume, snapshot)
+ self.assertIn('provider_location', result)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_volume_from_snapshot_on_failed_pool(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'mirrored'
+ config.datacore_disk_pools = ['disk_pool1', 'disk_pool4']
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ snapshot = self.snapshot_a
+ snapshot.provider_location = virtual_disk.Id
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume_from_snapshot,
+ volume,
+ snapshot)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_volume_from_snapshot_await_online_timed_out(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ snapshot_virtual_disk = VIRTUAL_DISKS[1]
+ (self.mock_client.set_virtual_disk_size
+ .return_value) = snapshot_virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ snapshot = self.snapshot_a
+ snapshot.provider_location = virtual_disk.Id
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_volume_from_snapshot,
+ volume,
+ snapshot)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_cloned_volume(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ src_vref = self.volume_a
+ src_vref.provider_location = virtual_disk.Id
+ result = driver.create_cloned_volume(volume, src_vref)
+ self.assertIn('provider_location', result)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_cloned_volume_mirrored_disk_type_specified(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'mirrored'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ src_vref = self.volume_a
+ src_vref.provider_location = virtual_disk.Id
+ result = driver.create_cloned_volume(volume, src_vref)
+ self.assertIn('provider_location', result)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_cloned_volume_on_failed_pool(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ config = self.setup_default_configuration()
+ config.datacore_disk_type = 'mirrored'
+ config.datacore_disk_pools = ['disk_pool1', 'disk_pool4']
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ src_vref = self.volume_a
+ src_vref.provider_location = virtual_disk.Id
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_cloned_volume,
+ volume,
+ src_vref)
+
+ @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
+ new=testutils.ZeroIntervalLoopingCall)
+ def test_create_cloned_volume_await_online_timed_out(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ snapshot_virtual_disk = VIRTUAL_DISKS[1]
+ (self.mock_client.set_virtual_disk_size
+ .return_value) = snapshot_virtual_disk
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ src_vref = self.volume_a
+ src_vref.provider_location = virtual_disk.Id
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.create_cloned_volume,
+ volume,
+ src_vref)
+
+ def test_terminate_connection(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ client = CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ connector = {'host': client.HostName, 'wwpns': ['100000109bddf539']}
+ driver.terminate_connection(volume, connector)
+
+ def test_terminate_connection_connector_is_none(self):
+ virtual_disk = VIRTUAL_DISKS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ driver.terminate_connection(volume, None)
+
+ def test_manage_existing(self):
+ volume = self.volume_a
+ driver = self.init_driver(self.setup_default_configuration())
+ ret = driver.manage_existing(
+ volume, self.existing_ref)
+ self.assertEqual("virtual_disk_id1", ret['provider_location'])
+
+ def test_manage_existing_get_size(self):
+ volume = self.volume_a
+ driver = self.init_driver(self.setup_default_configuration())
+ driver.manage_existing_get_size(
+ volume, self.existing_ref)
+
+ def test_manage_existing_snapshot(self):
+ snapshot = self.snapshot_a
+ driver = self.init_driver(self.setup_default_configuration())
+ ret = driver.manage_existing_snapshot(
+ snapshot, self.existing_ref)
+ self.assertEqual("virtual_disk_id1", ret['provider_location'])
+
+ def test_manage_existing_snapshot_get_size(self):
+ snapshot = self.snapshot_a
+ driver = self.init_driver(self.setup_default_configuration())
+ driver.manage_existing_snapshot_get_size(
+ snapshot, self.existing_ref)
+
+ def test_create_extended_cloned_volume(self):
+ virtual_disk = EXT_VIRTUAL_DISKS[0]
+ self.mock_client.get_virtual_disks.return_value = EXT_VIRTUAL_DISKS
+ virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+ self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_ext
+ src_vref = self.volume_a
+ src_vref.provider_location = virtual_disk.Id
+ result = driver.create_cloned_volume(volume, src_vref)
+ self.assertIn('provider_location', result)
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py
new file mode 100644
index 00000000000..9bcdb08c051
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py
@@ -0,0 +1,291 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the Fibre Channel Driver for DataCore SANsymphony
+storage array.
+"""
+
+from unittest import mock
+
+from cinder import exception as cinder_exception
+from cinder.tests.unit import test
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver
+from cinder.volume.drivers.datacore import fc
+
+
+PORTS = [
+ mock.Mock(Id='initiator_port_id1',
+ PortType='FibreChannel',
+ PortMode='Initiator',
+ PortName='AA-AA-AA-AA-AA-AA-AA-AA',
+ HostId='client_id1'),
+ mock.Mock(Id='initiator_port_id2',
+ PortType='FibreChannel',
+ PortMode='Initiator',
+ PortName='BB-BB-BB-BB-BB-BB-BB-BB'),
+ mock.Mock(__class__=mock.Mock(__name__='ServerFcPortData'),
+ Id='target_port_id1',
+ PortType='FibreChannel',
+ PortMode='Target',
+ PortName='CC-CC-CC-CC-CC-CC-CC-CC',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend"),
+ StateInfo=mock.Mock(State="LoopLinkUp")
+ ),
+ mock.Mock(Id='target_port_id2',
+ PortType='FibreChannel',
+ PortMode='Target',
+ PortName='DD-DD-DD-DD-DD-DD-DD-DD',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend"),
+ StateInfo=mock.Mock(State="LoopLinkUp")),
+]
+
+LOGICAL_UNITS = [
+ mock.Mock(VirtualTargetDeviceId='target_device_id1',
+ Lun=mock.Mock(Quad=4)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id2',
+ Lun=mock.Mock(Quad=3)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id3',
+ Lun=mock.Mock(Quad=2)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id4',
+ Lun=mock.Mock(Quad=1)),
+]
+
+TARGET_DEVICES = [
+ mock.Mock(Id='target_device_id1',
+ TargetPortId='target_port_id1',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id2',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id3',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id4',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id2'),
+]
+
+
+class FibreChannelVolumeDriverTestCase(
+ test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase):
+ """Tests for the FC Driver for DataCore SANsymphony storage array."""
+
+ existing_ref = {
+ 'source-name': 'virtual_disk_id1'}
+
+ def setUp(self):
+ super(FibreChannelVolumeDriverTestCase, self).setUp()
+ self.mock_client.get_ports.return_value = PORTS
+ (self.mock_client.build_scsi_port_nexus_data
+ .side_effect) = self._build_nexus_data
+ self.mock_client.map_logical_disk.side_effect = self._map_logical_disk
+
+ @staticmethod
+ def _build_nexus_data(initiator_port_id, target_port_id):
+ return mock.Mock(InitiatorPortId=initiator_port_id,
+ TargetPortId=target_port_id)
+
+ @staticmethod
+ def _map_logical_disk(logical_disk_id, nexus, *args):
+ target_device_id = next((
+ device.Id for device in TARGET_DEVICES
+ if device.TargetPortId == nexus.TargetPortId
+ and device.InitiatorPortId == nexus.InitiatorPortId), None)
+ return next(unit for unit in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id)
+
+ @staticmethod
+ def init_driver(config):
+ driver = fc.FibreChannelVolumeDriver(configuration=config)
+ driver.do_setup(None)
+ return driver
+
+ def test_validate_connector(self):
+ driver = self.init_driver(self.setup_default_configuration())
+ connector = {
+ 'host': 'host_name',
+ 'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA'],
+ }
+ driver.validate_connector(connector)
+
+ def test_validate_connector_failed(self):
+ driver = self.init_driver(self.setup_default_configuration())
+ connector = {}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ connector = {'host': 'host_name'}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ connector = {'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA']}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ def test_initialize_connection(self):
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Initiator']
+ connector = {
+ 'host': client.HostName,
+ 'wwpns': initiator_wwpns,
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('fibre_channel', result['driver_volume_type'])
+
+ target_wwns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_wwn'][0], target_wwns[0])
+
+ target_wwn = result['data']['target_wwn'][0]
+ target_port_id = next((
+ port.Id for port
+ in PORTS
+ if port.PortName.replace('-', '').lower() == target_wwn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port_id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+
+ def test_initialize_connection_unknown_client(self):
+ client = test_datacore_driver.CLIENTS[0]
+ self.mock_client.register_client.return_value = client
+ (self.mock_client.get_clients
+ .return_value) = test_datacore_driver.CLIENTS[1:]
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Initiator']
+ connector = {
+ 'host': client.HostName,
+ 'wwpns': initiator_wwpns,
+ }
+
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('fibre_channel', result['driver_volume_type'])
+
+ target_wwns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_wwn'][0], target_wwns[0])
+
+ target_wwn = result['data']['target_wwn'][0]
+ target_port_id = next((
+ port.Id for port
+ in PORTS
+ if port.PortName.replace('-', '').lower() == target_wwn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port_id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+
+ def test_initialize_connection_failed_not_found(self):
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = 'wrong_virtual_disk_id'
+ initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Initiator']
+ connector = {
+ 'host': client.HostName,
+ 'wwpns': initiator_wwpns,
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_failed_initiator_not_found(self):
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ connector = {
+ 'host': client.HostName,
+ 'wwpns': ['0000000000000000'],
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_failed_on_serve(self):
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ config = self.setup_default_configuration()
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ config.datacore_fc_unallowed_targets = [
+ port.PortName for port in PORTS if port.PortMode == 'Target'
+ ]
+ initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+ in PORTS
+ if port.PortMode == 'Initiator']
+ connector = {
+ 'host': client.HostName,
+ 'wwpns': initiator_wwpns,
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py
new file mode 100644
index 00000000000..53534244ec1
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py
@@ -0,0 +1,587 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the iSCSI Driver for DataCore SANsymphony storage array."""
+
+from unittest import mock
+
+from cinder import exception as cinder_exception
+from cinder.tests.unit import test
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_passwd
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import iscsi
+
+
+ISCSI_PORT_STATE_INFO_READY = mock.Mock(
+ PortalsState=mock.Mock(
+ PortalStateInfo=[mock.Mock(State='Ready')]
+ )
+)
+
+ISCSI_PORT_CONFIG_INFO = mock.Mock(
+ PortalsConfig=mock.Mock(
+ iScsiPortalConfigInfo=[mock.Mock(
+ Address=mock.Mock(Address='127.0.0.1'), TcpPort='3260')]
+ )
+)
+
+PORTS = [
+ mock.Mock(Id='initiator_port_id1',
+ PortType='iSCSI',
+ PortMode='Initiator',
+ PortName='iqn.1993-08.org.debian:1:1',
+ HostId='client_id1'),
+ mock.Mock(Id='initiator_port_id2',
+ PortType='iSCSI',
+ PortMode='Initiator',
+ PortName='iqn.1993-08.org.debian:1:2'),
+ mock.Mock(__class__=mock.Mock(__name__='ServeriScsiPortData'),
+ Id='target_port_id1',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-1',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='None'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO),
+ mock.Mock(Id='target_port_id2',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-2',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='None'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO),
+]
+
+LOGICAL_UNITS = [
+ mock.Mock(VirtualTargetDeviceId='target_device_id1',
+ Lun=mock.Mock(Quad=4)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id2',
+ Lun=mock.Mock(Quad=3)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id3',
+ Lun=mock.Mock(Quad=2)),
+ mock.Mock(VirtualTargetDeviceId='target_device_id4',
+ Lun=mock.Mock(Quad=1)),
+]
+
+TARGET_DEVICES = [
+ mock.Mock(Id='target_device_id1',
+ TargetPortId='target_port_id1',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id2',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id3',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id1'),
+ mock.Mock(Id='target_device_id4',
+ TargetPortId='target_port_id2',
+ InitiatorPortId='initiator_port_id2'),
+]
+
+
+class ISCSIVolumeDriverTestCase(
+ test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase):
+ """Tests for the iSCSI Driver for DataCore SANsymphony storage array."""
+
+ existing_ref = {
+ 'source-name': 'virtual_disk_id1'}
+
+ def setUp(self):
+ super(ISCSIVolumeDriverTestCase, self).setUp()
+ self.mock_client.get_ports.return_value = PORTS
+ (self.mock_client.build_scsi_port_nexus_data
+ .side_effect) = self._build_nexus_data
+ self.mock_client.map_logical_disk.side_effect = self._map_logical_disk
+
+ @staticmethod
+ def _build_nexus_data(initiator_port_id, target_port_id):
+ return mock.Mock(InitiatorPortId=initiator_port_id,
+ TargetPortId=target_port_id)
+
+ @staticmethod
+ def _map_logical_disk(logical_disk_id, nexus, *args):
+ target_device_id = next((
+ device.Id for device in TARGET_DEVICES
+ if device.TargetPortId == nexus.TargetPortId
+ and device.InitiatorPortId == nexus.InitiatorPortId), None)
+ return next(unit for unit in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id)
+
+ @staticmethod
+ def init_driver(config):
+ driver = iscsi.ISCSIVolumeDriver(configuration=config)
+ driver.do_setup(None)
+ return driver
+
+ @staticmethod
+ def create_configuration():
+ config = super(ISCSIVolumeDriverTestCase,
+ ISCSIVolumeDriverTestCase).create_configuration()
+ config.append_config_values(iscsi.datacore_iscsi_opts)
+ return config
+
+ def test_do_setup_failed(self):
+ super(ISCSIVolumeDriverTestCase, self).test_do_setup_failed()
+
+ config = self.setup_default_configuration()
+ config.use_chap_auth = True
+ config.san_ip = ''
+ config.datacore_iscsi_chap_storage = '/var/lib/cinder/.datacore_chap'
+ self.assertRaises(cinder_exception.InvalidInput,
+ self.init_driver,
+ config)
+
+ def test_validate_connector(self):
+ driver = self.init_driver(self.setup_default_configuration())
+ connector = {
+ 'host': 'host_name',
+ 'initiator': 'iqn.1993-08.org.debian:1:1',
+ }
+ driver.validate_connector(connector)
+
+ def test_validate_connector_failed(self):
+ driver = self.init_driver(self.setup_default_configuration())
+ connector = {}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ connector = {'host': 'host_name'}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ connector = {'initiator': 'iqn.1993-08.org.debian:1:1'}
+ self.assertRaises(cinder_exception.InvalidConnectorException,
+ driver.validate_connector,
+ connector)
+
+ def test_initialize_connection(self):
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('iscsi', result['driver_volume_type'])
+
+ target_iqn = [port.PortName for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_iqn'], target_iqn)
+
+ target_iqn = result['data']['target_iqn']
+ target_port = next((
+ port for port
+ in PORTS
+ if port.PortName == target_iqn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port.Id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+
+ def test_initialize_connection_unknown_client(self):
+ client = test_datacore_driver.CLIENTS[0]
+ self.mock_client.register_client.return_value = client
+ (self.mock_client.get_clients
+ .return_value) = test_datacore_driver.CLIENTS[1:]
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('iscsi', result['driver_volume_type'])
+
+ target_iqn = [port.PortName for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_iqn'], target_iqn)
+
+ target_iqn = result['data']['target_iqn']
+ target_port = next((
+ port for port
+ in PORTS
+ if port.PortName == target_iqn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port.Id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+
+ def test_initialize_connection_unknown_initiator(self):
+ self.mock_client.register_port.return_value = PORTS[0]
+ self.mock_client.get_ports.return_value = PORTS[1:]
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('iscsi', result['driver_volume_type'])
+
+ target_iqn = [port.PortName for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_iqn'], target_iqn)
+
+ target_iqn = result['data']['target_iqn']
+ target_port = next((
+ port for port
+ in PORTS
+ if port.PortName == target_iqn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port.Id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+
+ def test_initialize_connection_failed_not_found(self):
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = 'wrong_virtual_disk_id'
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_failed_target_not_found(self):
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ config = self.setup_default_configuration()
+ config.datacore_iscsi_unallowed_targets = [
+ port.PortName for port in PORTS if port.PortMode == 'Target'
+ ]
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_failed_on_map(self):
+ def fail_with_datacore_fault(*args):
+ raise datacore_exception.DataCoreFaultException(
+ reason="General error.")
+
+ (self.mock_client.map_logical_disk
+ .side_effect) = fail_with_datacore_fault
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ self.assertRaises(datacore_exception.DataCoreFaultException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_chap(self):
+ mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage')
+ mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage()
+ target_port = mock.Mock(
+ Id='target_port_id1',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-1',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='None'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO,
+ iSCSINodes=mock.Mock(Node=[]))
+ ports = PORTS[:2]
+ ports.append(target_port)
+ self.mock_client.get_ports.return_value = ports
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ config = self.setup_default_configuration()
+ config.use_chap_auth = True
+ config.datacore_iscsi_chap_storage = 'fake_file_path'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('iscsi', result['driver_volume_type'])
+
+ target_iqn = [port.PortName for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_iqn'], target_iqn)
+
+ target_iqn = result['data']['target_iqn']
+ target_port = next((
+ port for port
+ in PORTS
+ if port.PortName == target_iqn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port.Id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+ self.assertEqual('CHAP', result['data']['auth_method'])
+ self.assertEqual(initiator_iqn, result['data']['auth_username'])
+ self.assertIsNotNone(result['data']['auth_password'])
+
+ def test_initialize_connection_chap_failed_check(self):
+ target_port = mock.Mock(
+ __class__=mock.Mock(__name__='ServeriScsiPortData'),
+ Id='target_port_id2',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-2',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='CHAP'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO)
+ ports = PORTS[:2]
+ ports.append(target_port)
+ self.mock_client.get_ports.return_value = ports
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+ self.mock_client.get_logical_units.return_value = LOGICAL_UNITS
+ self.mock_client.get_target_domains.return_value = []
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ driver = self.init_driver(self.setup_default_configuration())
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ self.assertRaises(cinder_exception.VolumeDriverException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_chap_failed_on_set_port_properties(self):
+ def fail_with_datacore_fault(*args):
+ raise datacore_exception.DataCoreFaultException(
+ reason="General error.")
+
+ mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage')
+ mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage()
+ target_port = mock.Mock(
+ __class__=mock.Mock(__name__='ServeriScsiPortData'),
+ Id='target_port_id1',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-1',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='None'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO,
+ iSCSINodes=mock.Mock(Node=[]))
+ ports = PORTS[:2]
+ ports.append(target_port)
+ self.mock_client.get_ports.return_value = ports
+ (self.mock_client.set_server_port_properties
+ .side_effect) = fail_with_datacore_fault
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ config = self.setup_default_configuration()
+ config.use_chap_auth = True
+ config.datacore_iscsi_chap_storage = 'fake_file_path'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ self.assertRaises(datacore_exception.DataCoreFaultException,
+ driver.initialize_connection,
+ volume,
+ connector)
+
+ def test_initialize_connection_chap_username_password(self):
+ mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage')
+ mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage()
+ target_port = mock.Mock(
+ Id='target_port_id1',
+ PortType='iSCSI',
+ PortMode='Target',
+ PortName='iqn.2000-08.com.datacore:server-1-1',
+ HostId='server_id1',
+ PresenceStatus='Present',
+ ServerPortProperties=mock.Mock(Role="Frontend",
+ Authentication='None'),
+ IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+ PortConfigInfo=ISCSI_PORT_CONFIG_INFO,
+ iSCSINodes=mock.Mock(Node=[]))
+ ports = PORTS[:2]
+ ports.append(target_port)
+ self.mock_client.get_ports.return_value = ports
+ self.mock_client.get_logical_units.return_value = []
+ self.mock_client.get_target_domains.return_value = []
+ self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+ virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+ client = test_datacore_driver.CLIENTS[0]
+ config = self.setup_default_configuration()
+ config.use_chap_auth = True
+ config.chap_username = 'datacore'
+ config.chap_password = 'datacore123456'
+ driver = self.init_driver(config)
+ volume = self.volume_a
+ volume.provider_location = virtual_disk.Id
+ initiator_iqn = PORTS[0].PortName
+ connector = {
+ 'host': client.HostName,
+ 'initiator': initiator_iqn
+ }
+ result = driver.initialize_connection(volume, connector)
+ self.assertEqual('iscsi', result['driver_volume_type'])
+
+ target_iqn = [port.PortName for port
+ in PORTS
+ if port.PortMode == 'Target']
+ self.assertIn(result['data']['target_iqn'], target_iqn)
+
+ target_iqn = result['data']['target_iqn']
+ target_port = next((
+ port for port
+ in PORTS
+ if port.PortName == target_iqn), None)
+ target_device_id = next((
+ device.Id for device
+ in TARGET_DEVICES
+ if device.TargetPortId == target_port.Id), None)
+ target_lun = next((
+ unit.Lun.Quad for unit
+ in LOGICAL_UNITS
+ if unit.VirtualTargetDeviceId == target_device_id), None)
+ self.assertEqual(target_lun, result['data']['target_lun'])
+
+ self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+ self.assertFalse(result['data']['target_discovered'])
+ self.assertEqual(volume.id, result['data']['volume_id'])
+ self.assertEqual('rw', result['data']['access_mode'])
+ self.assertEqual('CHAP', result['data']['auth_method'])
+ self.assertEqual('datacore', result['data']['auth_username'])
+ self.assertEqual('datacore123456', result['data']['auth_password'])
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py
new file mode 100644
index 00000000000..389ad3a56ed
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the password storage."""
+
+import collections
+import io
+import json
+import os
+import stat
+from unittest import mock
+
+from cinder.tests.unit import test
+from cinder.volume.drivers.datacore import passwd
+
+
+class FakeFileStorage(object):
+ """Mock FileStorage class."""
+ def __init__(self):
+ self._storage = {
+ 'resource1': {
+ 'user1': 'resource1-user1',
+ 'user2': 'resource1-user2',
+ },
+ 'resource2': {
+ 'user1': 'resource2-user1',
+ }
+ }
+
+ def open(self):
+ return self
+
+ def load(self):
+ return self._storage
+
+ def save(self, storage):
+ self._storage = storage
+
+ def close(self):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+
+class PasswordFileStorageTestCase(test.TestCase):
+ """Tests for the password storage."""
+
+ def test_get_password(self):
+ fake_file_storage = FakeFileStorage()
+ passwords = fake_file_storage.load()
+ resource = next(iter(passwords.keys()))
+ user, expected = next(iter(passwords[resource].items()))
+
+ self._mock_file_storage(fake_file_storage)
+ password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+ result = password_storage.get_password(resource, user)
+ self.assertEqual(expected, result)
+
+ result = password_storage.get_password(resource.upper(), user)
+ self.assertIsNone(result)
+
+ def test_set_password(self):
+ fake_file_storage = FakeFileStorage()
+ user = 'user3'
+ resource1 = 'resource2'
+ password1 = 'resource2-user3'
+ resource2 = 'resource3'
+ password2 = 'resource3-user3'
+
+ self._mock_file_storage(fake_file_storage)
+ password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+ password_storage.set_password(resource1, user, password1)
+ passwords = fake_file_storage.load()
+ self.assertIn(resource1, passwords)
+ self.assertIn(user, passwords[resource1])
+ self.assertEqual(password1, passwords[resource1][user])
+
+ password_storage.set_password(resource2, user, password2)
+ passwords = fake_file_storage.load()
+ self.assertIn(resource2, passwords)
+ self.assertIn(user, passwords[resource2])
+ self.assertEqual(password2, passwords[resource2][user])
+
+ def test_delete_password(self):
+ fake_file_storage = FakeFileStorage()
+ passwords = fake_file_storage.load()
+ resource1, resource2 = 'resource1', 'resource2'
+ user1, res1 = next(iter(passwords[resource1].items()))
+ user2, res2 = next(iter(passwords[resource2].items()))
+
+ self._mock_file_storage(fake_file_storage)
+ password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+ password_storage.delete_password(resource1, user1)
+ passwords = fake_file_storage.load()
+ self.assertIn(resource1, passwords)
+ self.assertNotIn(user1, passwords[resource1])
+
+ password_storage.delete_password(resource2, user2)
+ passwords = fake_file_storage.load()
+ self.assertNotIn(resource2, passwords)
+
+ def _mock_file_storage(self, fake_file_storage):
+ self.mock_object(passwd, 'FileStorage', return_value=fake_file_storage)
+
+
+class FileStorageTestCase(test.TestCase):
+ """Test for the file storage."""
+
+ def test_open(self):
+ fake_file_path = 'file_storage.data'
+ self.mock_object(passwd.os.path, 'isfile', return_value=True)
+ self.mock_object(passwd.os.path, 'isdir', return_value=True)
+ mock_open = self.mock_object(passwd, 'open', mock.mock_open())
+
+ file_storage = passwd.FileStorage(fake_file_path)
+ file_storage.open()
+ mock_open.assert_called_once_with(fake_file_path, 'r+')
+
+ def test_open_not_existing(self):
+ fake_file_path = '/fake_path/file_storage.data'
+ fake_dir_name = os.path.dirname(fake_file_path)
+ mock_chmod_calls = [
+ mock.call(fake_dir_name,
+ stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP),
+ mock.call(fake_file_path, stat.S_IRUSR | stat.S_IWUSR)
+ ]
+ mock_open_calls = [
+ mock.call(fake_file_path, 'w'),
+ mock.call(fake_file_path, 'r+'),
+ ]
+
+ self.mock_object(passwd.os.path, 'isfile', return_value=False)
+ self.mock_object(passwd.os.path, 'isdir', return_value=False)
+ mock_makedirs = self.mock_object(passwd.os, 'makedirs')
+ mock_chmod = self.mock_object(passwd.os, 'chmod')
+ mock_open = self.mock_object(
+ passwd, 'open', return_value=mock.MagicMock())
+
+ file_storage = passwd.FileStorage(fake_file_path)
+ file_storage.open()
+ mock_makedirs.assert_called_with(fake_dir_name)
+ mock_chmod.assert_has_calls(mock_chmod_calls, any_order=True)
+ mock_open.assert_has_calls(mock_open_calls, any_order=True)
+
+ def test_open_not_closed(self):
+ fake_file_path = 'file_storage.data'
+ fake_file = mock.MagicMock()
+ mock_open_calls = [
+ mock.call(fake_file_path, 'r+'),
+ mock.call(fake_file_path, 'r+'),
+ ]
+ self.mock_object(passwd.os.path, 'isfile', return_value=True)
+ self.mock_object(passwd.os.path, 'isdir', return_value=True)
+ mock_open = self.mock_object(passwd, 'open', return_value=fake_file)
+
+ file_storage = passwd.FileStorage(fake_file_path)
+ file_storage.open()
+ file_storage.open()
+ mock_open.assert_has_calls(mock_open_calls)
+ fake_file.close.assert_called_once_with()
+
+ def test_load(self):
+ passwords = {
+ 'resource1': {
+ 'user1': 'resource1-user1',
+ 'user2': 'resource1-user2',
+ },
+ 'resource2': {
+ 'user1': 'resource2-user1',
+ 'user2': 'resource2-user2'
+ }
+ }
+ fake_file_name = 'file_storage.data'
+ fake_file_content = json.dumps(passwords)
+ fake_file = self._get_fake_file(fake_file_content)
+ fake_os_stat = self._get_fake_os_stat(1)
+
+ self._mock_file_open(fake_file, fake_os_stat)
+
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ result = file_storage.load()
+ self.assertEqual(passwords, result)
+
+ def test_load_empty_file(self):
+ fake_file_name = 'file_storage.data'
+ fake_file = self._get_fake_file()
+ fake_os_stat = self._get_fake_os_stat(0)
+
+ self._mock_file_open(fake_file, fake_os_stat)
+
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ result = file_storage.load()
+ expected = {}
+ self.assertEqual(expected, result)
+
+ def test_load_malformed_file(self):
+ fake_file_name = 'file_storage.data'
+ fake_file = self._get_fake_file('[1, 2, 3]')
+ fake_os_stat = self._get_fake_os_stat(1)
+
+ self._mock_file_open(fake_file, fake_os_stat)
+
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ self.assertRaises(ValueError, file_storage.load)
+
+ def test_save(self):
+ fake_file_name = 'file_storage.data'
+ fake_file = self._get_fake_file('')
+ fake_os_stat = self._get_fake_os_stat(0)
+
+ self._mock_file_open(fake_file, fake_os_stat)
+
+ passwords = {
+ 'resource1': {
+ 'user1': 'resource1-user1',
+ 'user2': 'resource1-user2',
+ },
+ 'resource2': {
+ 'user1': 'resource2-user1',
+ 'user2': 'resource2-user2'
+ }
+ }
+ fake_file_content = json.dumps(passwords)
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ file_storage.save(passwords)
+ self.assertEqual(fake_file_content, fake_file.getvalue())
+
+ def test_save_not_dictionary(self):
+ fake_file_name = 'file_storage.data'
+ fake_file = self._get_fake_file('')
+ fake_os_stat = self._get_fake_os_stat(0)
+
+ self._mock_file_open(fake_file, fake_os_stat)
+
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ self.assertRaises(TypeError, file_storage.save, [])
+
+ def test_close(self):
+ fake_file_name = 'file_storage.data'
+ fake_file = mock.MagicMock()
+
+ self.mock_object(passwd.os.path, 'isfile', return_value=True)
+ self.mock_object(passwd.os.path, 'isdir', return_value=True)
+ self.mock_object(passwd, 'open', return_value=fake_file)
+
+ file_storage = passwd.FileStorage(fake_file_name)
+ file_storage.open()
+ file_storage.close()
+ fake_file.close.assert_called_once_with()
+
+ def _mock_file_open(self, fake_file, fake_os_stat):
+ self.mock_object(passwd.os.path, 'isfile', return_value=True)
+ self.mock_object(passwd.os.path, 'isdir', return_value=True)
+ self.mock_object(passwd.os, 'stat', return_value=fake_os_stat)
+ self.mock_object(passwd, 'open', return_value=fake_file)
+
+ @staticmethod
+ def _get_fake_file(content=None):
+ return io.StringIO(content)
+
+ @staticmethod
+ def _get_fake_os_stat(st_size):
+ os_stat = collections.namedtuple('fake_os_stat', ['st_size'])
+ os_stat.st_size = st_size
+ return os_stat
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py
new file mode 100644
index 00000000000..2b4b21f629b
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for utilities and helper functions."""
+
+from cinder.tests.unit import test
+from cinder.volume.drivers.datacore import utils
+
+
+class GenericUtilsTestCase(test.TestCase):
+ """Tests for the generic utilities and helper functions."""
+
+ def test_build_network_address(self):
+ ipv4_address = '127.0.0.1'
+ ipv6_address = '::1'
+ host_name = 'localhost'
+ port = 3498
+ self.assertEqual('%s:%s' % (ipv4_address, port),
+ utils.build_network_address(ipv4_address, port))
+ self.assertEqual('[%s]:%s' % (ipv6_address, port),
+ utils.build_network_address(ipv6_address, port))
+ self.assertEqual('%s:%s' % (host_name, port),
+ utils.build_network_address(host_name, port))
+
+ def test_get_first(self):
+ disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+ disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+ disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+ disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+ test_source = [disk_a, disk_b, disk_c, disk_d]
+
+ first = utils.get_first(lambda item: item['id'] == 'disk-c',
+ test_source)
+ self.assertEqual(disk_c, first)
+
+ self.assertRaises(StopIteration,
+ utils.get_first,
+ lambda item: item['type'] == 'Dual',
+ test_source)
+
+ def test_get_first_or_default(self):
+ disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+ disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+ disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+ disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+ test_source = [disk_a, disk_b, disk_c, disk_d]
+
+ first = utils.get_first_or_default(lambda item: item['size'] == 1,
+ test_source,
+ None)
+ self.assertEqual(disk_b, first)
+
+ default = utils.get_first_or_default(lambda item: item['size'] == 15,
+ test_source,
+ None)
+ self.assertIsNone(default)
+
+ def test_get_distinct_by(self):
+ disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+ disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+ disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+ disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+ test_source = [disk_a, disk_b, disk_c, disk_d]
+
+ distinct_values = utils.get_distinct_by(lambda item: item['type'],
+ test_source)
+ self.assertEqual([disk_a, disk_c], distinct_values)
diff --git a/cinder/volume/drivers/datacore/api.py b/cinder/volume/drivers/datacore/api.py
new file mode 100644
index 00000000000..baaa7a960b6
--- /dev/null
+++ b/cinder/volume/drivers/datacore/api.py
@@ -0,0 +1,1065 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Classes to invoke DataCore SANsymphony API."""
+
+import copy
+import socket
+import sys
+import uuid
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import importutils
+import suds
+from suds import client as suds_client
+from suds import plugin
+from suds.sax import attribute
+from suds.sax import element
+from suds import wsdl
+from suds import wsse
+from suds import xsd
+from suds.xsd.doctor import Import
+from suds.xsd.doctor import ImportDoctor
+
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.datacore import exception as datacore_exceptions
+from cinder.volume.drivers.datacore import utils as datacore_utils
+
+websocket = importutils.try_import('websocket')
+
+LOG = logging.getLogger(__name__)
+
+
+class FaultDefinitionsFilter(plugin.DocumentPlugin):
+ """Plugin to process the DataCore API WSDL document.
+
+ The document plugin removes fault definitions for callback operations
+ from the DataCore API WSDL.
+ """
+
+ def parsed(self, context):
+ document = context.document
+ tns = self._get_tns(document)
+
+ message_qrefs = set()
+ for message in self._get_wsdl_messages(document):
+ message_qrefs.add((message.get('name'), tns[1]))
+
+ bindings = self._get_wsdl_operation_bindings(document)
+
+ for port_type in self._get_wsdl_port_types(document):
+ for operation in self._get_wsdl_operations(port_type):
+ self._filter_faults(
+ document, operation, bindings, message_qrefs, tns)
+
+ @staticmethod
+ def _get_tns(document):
+ target_namespace = document.get('targetNamespace')
+ prefix = document.findPrefix(target_namespace) or 'tns'
+ return prefix, target_namespace
+
+ @staticmethod
+ def _get_wsdl_port_types(document):
+ return document.getChildren('portType', wsdl.wsdlns)
+
+ @staticmethod
+ def _get_wsdl_operations(port_type):
+ return port_type.getChildren('operation', wsdl.wsdlns)
+
+ @staticmethod
+ def _get_wsdl_messages(document):
+ return document.getChildren('message', wsdl.wsdlns)
+
+ @staticmethod
+ def _get_wsdl_operation_bindings(document):
+ bindings = []
+ for binding in document.getChildren('binding', wsdl.wsdlns):
+ operations = {}
+ for operation in binding.getChildren('operation', wsdl.wsdlns):
+ operations[operation.get('name')] = operation
+ bindings.append(operations)
+ return bindings
+
+ @staticmethod
+ def _filter_faults(document, operation, operation_bindings,
+ message_qrefs, tns):
+ filtered_faults = {}
+ for fault in operation.getChildren('fault', wsdl.wsdlns):
+ fault_message = fault.get('message')
+ qref = xsd.qualify(fault_message, document, tns)
+ if qref not in message_qrefs:
+ filtered_faults[fault.get('name')] = fault
+ for fault in filtered_faults.values():
+ operation.remove(fault)
+ if filtered_faults:
+ for binding in operation_bindings:
+ filtered_binding_faults = []
+ faults = binding[operation.get('name')].getChildren(
+ 'fault', wsdl.wsdlns)
+ for binding_fault in faults:
+ if binding_fault.get('name') in filtered_faults:
+ filtered_binding_faults.append(binding_fault)
+ for binding_fault in filtered_binding_faults:
+ binding[operation.get('name')].remove(binding_fault)
+
+
+class DataCoreClient(object):
+ """DataCore SANsymphony client."""
+
+ API_RETRY_INTERVAL = 10
+
+ DATACORE_EXECUTIVE_PORT = '3794'
+
+ STORAGE_SERVICES = 'IStorageServices'
+ STORAGE_SERVICES_BINDING = 'CustomBinding_IStorageServices'
+
+ EXECUTIVE_SERVICE = 'IExecutiveServiceEx'
+ EXECUTIVE_SERVICE_BINDING = 'CustomBinding_IExecutiveServiceEx'
+
+ NS_WSA = ('wsa', 'http://www.w3.org/2005/08/addressing')
+ WSA_ANONYMOUS = 'http://www.w3.org/2005/08/addressing/anonymous'
+ MUST_UNDERSTAND = attribute.Attribute('SOAP-ENV:mustUnderstand', '1')
+
+ # Namespaces that are defined within DataCore API WSDL
+ NS_DATACORE_EXECUTIVE = ('http://schemas.datacontract.org/2004/07/'
+ 'DataCore.Executive')
+ NS_DATACORE_EXECUTIVE_SCSI = ('http://schemas.datacontract.org/2004/07/'
+ 'DataCore.Executive.Scsi')
+ NS_DATACORE_EXECUTIVE_ISCSI = ('http://schemas.datacontract.org/2004/07/'
+ 'DataCore.Executive.iSCSI')
+ NS_SERIALIZATION_ARRAYS = ('http://schemas.microsoft.com/2003/10/'
+ 'Serialization/Arrays')
+
+ # Fully qualified names of objects that are defined within
+ # DataCore API WSDL
+ O_ACCESS_TOKEN = '{%s}AccessToken' % NS_DATACORE_EXECUTIVE_ISCSI
+ O_ARRAY_OF_PERFORMANCE_TYPE = ('{%s}ArrayOfPerformanceType'
+ % NS_DATACORE_EXECUTIVE)
+ O_ARRAY_OF_STRING = '{%s}ArrayOfstring' % NS_SERIALIZATION_ARRAYS
+ O_CLIENT_MACHINE_TYPE = '{%s}ClientMachineType' % NS_DATACORE_EXECUTIVE
+ O_DATA_SIZE = '{%s}DataSize' % NS_DATACORE_EXECUTIVE
+ O_LOGICAL_DISK_ROLE = '{%s}LogicalDiskRole' % NS_DATACORE_EXECUTIVE
+ O_LOGICAL_UNIT_TYPE = '{%s}LogicalUnitType' % NS_DATACORE_EXECUTIVE
+ O_MIRROR_RECOVERY_PRIORITY = ('{%s}MirrorRecoveryPriority'
+ % NS_DATACORE_EXECUTIVE)
+ O_PATH_POLICY = '{%s}PathPolicy' % NS_DATACORE_EXECUTIVE
+ O_PERFORMANCE_TYPE = '{%s}PerformanceType' % NS_DATACORE_EXECUTIVE
+ O_POOL_VOLUME_TYPE = '{%s}PoolVolumeType' % NS_DATACORE_EXECUTIVE
+ O_SNAPSHOT_TYPE = '{%s}SnapshotType' % NS_DATACORE_EXECUTIVE
+ O_SCSI_MODE = '{%s}ScsiMode' % NS_DATACORE_EXECUTIVE_SCSI
+ O_SCSI_PORT_DATA = '{%s}ScsiPortData' % NS_DATACORE_EXECUTIVE
+ O_SCSI_PORT_NEXUS_DATA = '{%s}ScsiPortNexusData' % NS_DATACORE_EXECUTIVE
+ O_SCSI_PORT_TYPE = '{%s}ScsiPortType' % NS_DATACORE_EXECUTIVE_SCSI
+ O_VIRTUAL_DISK_DATA = '{%s}VirtualDiskData' % NS_DATACORE_EXECUTIVE
+ O_VIRTUAL_DISK_STATUS = '{%s}VirtualDiskStatus' % NS_DATACORE_EXECUTIVE
+ O_VIRTUAL_DISK_SUB_TYPE = '{%s}VirtualDiskSubType' % NS_DATACORE_EXECUTIVE
+ O_VIRTUAL_DISK_TYPE = '{%s}VirtualDiskType' % NS_DATACORE_EXECUTIVE
+
+ def __init__(self, host, username, password, timeout):
+ if websocket is None:
+ msg = _("Failed to import websocket-client python module."
+ " Please, ensure the module is installed.")
+ raise datacore_exceptions.DataCoreException(msg)
+
+ self.timeout = timeout
+
+ executive_service_net_addr = datacore_utils.build_network_address(
+ host, self.DATACORE_EXECUTIVE_PORT)
+ executive_service_endpoint = self._build_service_endpoint(
+ executive_service_net_addr, self.EXECUTIVE_SERVICE)
+
+ security_options = wsse.Security()
+ username_token = wsse.UsernameToken(username, password)
+ security_options.tokens.append(username_token)
+
+ imp = Import('http://www.w3.org/2001/XMLSchema',
+ location='http://www.w3.org/2001/XMLSchema.xsd')
+ imp.filter.add(
+ 'http://schemas.microsoft.com/2003/10/Serialization/Arrays')
+ imp.filter.add('http://schemas.microsoft.com/2003/10/Serialization/')
+ imp.filter.add(
+ 'http://schemas.datacontract.org/2004/07/DataCore.Executive')
+
+ self._executive_service_client = suds_client.Client(
+ executive_service_endpoint['http_endpoint'] + '?singlewsdl',
+ nosend=True,
+ timeout=self.timeout,
+ wsse=security_options,
+ plugins=[FaultDefinitionsFilter()],
+ doctor=ImportDoctor(imp))
+
+ self._update_storage_services_endpoint(executive_service_endpoint)
+
+ storage_services_endpoint = self._get_storage_services_endpoint()
+
+ self._storage_services_client = suds_client.Client(
+ storage_services_endpoint['http_endpoint'] + '?singlewsdl',
+ nosend=True,
+ timeout=self.timeout,
+ wsse=security_options,
+ plugins=[FaultDefinitionsFilter()], doctor=ImportDoctor(imp))
+
+ self._update_executive_service_endpoints(storage_services_endpoint)
+
+ @staticmethod
+ def _get_list_data(obj, attribute_name):
+ return getattr(obj, attribute_name, [])
+
+ @staticmethod
+ def _build_service_endpoint(network_address, path):
+ return {
+ 'network_address': network_address,
+ 'http_endpoint': '%s://%s/%s' % ('http', network_address, path),
+ 'ws_endpoint': '%s://%s/%s' % ('ws', network_address, path),
+ }
+
+ @cinder_utils.synchronized('datacore-api-request_context')
+ def _get_soap_context(self, service_client, service_binding, method,
+ message_id, *args, **kwargs):
+ soap_action = (service_client.wsdl.services[0].port(service_binding)
+ .methods[method].soap.action)
+
+ soap_headers = self._get_soap_headers(soap_action, message_id)
+
+ service_client.set_options(soapheaders=soap_headers)
+ context = service_client.service[service_binding][method](
+ *args, **kwargs)
+
+ return context
+
+ def _get_soap_headers(self, soap_action, message_id):
+ headers = [
+ element.Element('Action', ns=self.NS_WSA).setText(
+ soap_action.replace('"', '')).append(self.MUST_UNDERSTAND),
+
+ element.Element('To', ns=self.NS_WSA).setText(
+ self.WSA_ANONYMOUS).append(self.MUST_UNDERSTAND),
+
+ element.Element('MessageID', ns=self.NS_WSA).setText(message_id),
+
+ element.Element('ReplyTo', ns=self.NS_WSA).insert(
+ element.Element('Address', ns=self.NS_WSA).setText(
+ self.WSA_ANONYMOUS)),
+ ]
+ return headers
+
+ def _process_request(self, service_client, service_binding,
+ service_endpoint, method, *args, **kwargs):
+ max_date = '9999-12-31T23:59:59.9999999'
+ r_date = '9999-12-31T23:59:59.9'
+ message_id = uuid.uuid4().urn
+
+ context = self._get_soap_context(
+ service_client, service_binding,
+ method, message_id, *args, **kwargs)
+
+ channel = None
+ try:
+ channel = websocket.create_connection(
+ service_endpoint,
+ timeout=self.timeout,
+ subprotocols=['soap'],
+ header=['soap-content-type: text/xml'])
+ channel.send(context.envelope)
+ response = channel.recv()
+ if not isinstance(response, str):
+ response = response.decode('utf-8')
+ response = response.replace(max_date, r_date)
+ response = response.encode('utf-8')
+ return context.process_reply(response)
+ except (socket.error, websocket.WebSocketException) as e:
+ error = datacore_exceptions.DataCoreConnectionException(reason=e)
+ raise error.with_traceback(sys.exc_info()[2])
+ except suds.WebFault as e:
+ fault = datacore_exceptions.DataCoreFaultException(reason=e)
+ raise fault.with_traceback(sys.exc_info()[2])
+ finally:
+ if channel and channel.connected:
+ try:
+ channel.close()
+ except (socket.error, websocket.WebSocketException) as e:
+ LOG.debug("Closing a connection to "
+ "DataCore server failed. %s", e)
+
+ def _invoke_storage_services(self, method, *args, **kwargs):
+
+ @cinder_utils.retry(
+ datacore_exceptions.DataCoreConnectionException,
+ interval=self.API_RETRY_INTERVAL,
+ retries=10,
+ wait_random=True)
+ def retry_call():
+ storage_services_endpoint = self._get_storage_services_endpoint()
+ try:
+ result = self._process_request(
+ self._storage_services_client,
+ self.STORAGE_SERVICES_BINDING,
+ storage_services_endpoint['ws_endpoint'],
+ method, *args, **kwargs)
+ return result
+ except datacore_exceptions.DataCoreConnectionException:
+ with excutils.save_and_reraise_exception():
+ self._update_api_endpoints()
+
+ return retry_call()
+
+ def _update_api_endpoints(self):
+ executive_service_endpoints = self._get_executive_service_endpoints()
+ for endpoint in executive_service_endpoints:
+ try:
+ self._update_storage_services_endpoint(endpoint)
+ break
+ except datacore_exceptions.DataCoreConnectionException as e:
+ LOG.warning("Failed to update DataCore Server Group "
+ "endpoints. %s.", e)
+
+ storage_services_endpoint = self._get_storage_services_endpoint()
+ try:
+ self._update_executive_service_endpoints(
+ storage_services_endpoint)
+ except datacore_exceptions.DataCoreConnectionException as e:
+ LOG.warning("Failed to update DataCore Server Group "
+ "endpoints. %s.", e)
+
+ @cinder_utils.synchronized('datacore-api-storage_services_endpoint')
+ def _get_storage_services_endpoint(self):
+ if self._storage_services_endpoint:
+ return copy.copy(self._storage_services_endpoint)
+ return None
+
+ @cinder_utils.synchronized('datacore-api-storage_services_endpoint')
+ def _update_storage_services_endpoint(self, executive_service_endpoint):
+ controller_address = self._process_request(
+ self._executive_service_client,
+ self.EXECUTIVE_SERVICE_BINDING,
+ executive_service_endpoint['ws_endpoint'],
+ 'GetControllerAddress')
+
+ if not controller_address:
+ msg = _("Could not determine controller node.")
+ raise datacore_exceptions.DataCoreConnectionException(reason=msg)
+
+ controller_host = controller_address.rsplit(':', 1)[0].strip('[]')
+ controller_net_addr = datacore_utils.build_network_address(
+ controller_host,
+ self.DATACORE_EXECUTIVE_PORT)
+
+ self._storage_services_endpoint = self._build_service_endpoint(
+ controller_net_addr,
+ self.STORAGE_SERVICES)
+
+ @cinder_utils.synchronized('datacore-api-executive_service_endpoints')
+ def _get_executive_service_endpoints(self):
+ if self._executive_service_endpoints:
+ return self._executive_service_endpoints[:]
+ return []
+
+ @cinder_utils.synchronized('datacore-api-executive_service_endpoints')
+ def _update_executive_service_endpoints(self, storage_services_endpoint):
+ endpoints = []
+ nodes = self._get_list_data(
+ self._process_request(self._storage_services_client,
+ self.STORAGE_SERVICES_BINDING,
+ storage_services_endpoint['ws_endpoint'],
+ 'GetNodes'),
+ 'RegionNodeData')
+
+ if not nodes:
+ msg = _("Could not determine executive nodes.")
+ raise datacore_exceptions.DataCoreConnectionException(reason=msg)
+
+ for node in nodes:
+ host = node.HostAddress.rsplit(':', 1)[0].strip('[]')
+ endpoint = self._build_service_endpoint(
+ datacore_utils.build_network_address(
+ host, self.DATACORE_EXECUTIVE_PORT),
+ self.EXECUTIVE_SERVICE)
+ endpoints.append(endpoint)
+
+ self._executive_service_endpoints = endpoints
+
+ def get_server_groups(self):
+ """Get all the server groups in the configuration.
+
+ :return: A list of server group data.
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetServerGroups'),
+ 'ServerHostGroupData')
+
+ def get_servers(self):
+ """Get all the server hosts in the configuration.
+
+ :return: A list of server host data
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetServers'),
+ 'ServerHostData')
+
+ def get_disk_pools(self):
+ """Get all the pools in the server group.
+
+ :return: A list of disk pool data
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetDiskPools'),
+ 'DiskPoolData')
+
+ def get_logical_disks(self):
+ """Get all the logical disks defined in the system.
+
+ :return: A list of logical disks
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetLogicalDisks'),
+ 'LogicalDiskData')
+
+ def create_pool_logical_disk(self, pool_id, pool_volume_type, size,
+ min_quota=None, max_quota=None):
+ """Create the pool logical disk.
+
+ :param pool_id: Pool id
+ :param pool_volume_type: Type, either striped or spanned
+ :param size: Size
+ :param min_quota: Min quota
+ :param max_quota: Max quota
+ :return: New logical disk data
+ """
+
+ volume_type = getattr(self._storage_services_client.factory
+ .create(self.O_POOL_VOLUME_TYPE),
+ pool_volume_type)
+
+ data_size = (self._storage_services_client.factory
+ .create(self.O_DATA_SIZE))
+ data_size.Value = size
+
+ data_size_min_quota = None
+ if min_quota:
+ data_size_min_quota = (self._storage_services_client.factory
+ .create(self.O_DATA_SIZE))
+ data_size_min_quota.Value = min_quota
+
+ data_size_max_quota = None
+ if max_quota:
+ data_size_max_quota = (self._storage_services_client.factory
+ .create(self.O_DATA_SIZE))
+ data_size_max_quota.Value = max_quota
+
+ return self._invoke_storage_services('CreatePoolLogicalDisk',
+ poolId=pool_id,
+ type=volume_type,
+ size=data_size,
+ minQuota=data_size_min_quota,
+ maxQuota=data_size_max_quota)
+
+ def delete_logical_disk(self, logical_disk_id):
+ """Delete the logical disk.
+
+ :param logical_disk_id: Logical disk id
+ """
+
+ self._invoke_storage_services('DeleteLogicalDisk',
+ logicalDiskId=logical_disk_id)
+
+ def get_logical_disk_chunk_allocation_map(self, logical_disk_id):
+ """Get the logical disk chunk allocation map.
+
+ The logical disk allocation map details all the physical disk chunks
+ that are currently allocated to this logical disk.
+
+ :param logical_disk_id: Logical disk id
+ :return: A list of member allocation maps, restricted to chunks
+ allocated on to this logical disk
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetLogicalDiskChunkAllocationMap',
+ logicalDiskId=logical_disk_id),
+ 'MemberAllocationInfoData')
+
+ def get_next_virtual_disk_alias(self, base_alias):
+ """Get the next available (unused) virtual disk alias.
+
+ :param base_alias: Base string of the new alias
+ :return: New alias
+ """
+
+ return self._invoke_storage_services('GetNextVirtualDiskAlias',
+ baseAlias=base_alias)
+
+ def get_virtual_disks(self):
+ """Get all the virtual disks in the configuration.
+
+ :return: A list of virtual disk's data
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetVirtualDisks'),
+ 'VirtualDiskData')
+
+ def build_virtual_disk_data(self, virtual_disk_alias, virtual_disk_type,
+ size, description, storage_profile_id):
+ """Create VirtualDiskData object.
+
+ :param virtual_disk_alias: User-visible alias of the virtual disk,
+ which must be unique
+ :param virtual_disk_type: Virtual disk type
+ :param size: Virtual disk size
+ :param description: A user-readable description of the virtual disk
+ :param storage_profile_id: Virtual disk storage profile
+ :return: VirtualDiskData object
+ """
+
+ vd_data = (self._storage_services_client.factory
+ .create(self.O_VIRTUAL_DISK_DATA))
+ vd_data.Size = (self._storage_services_client.factory
+ .create(self.O_DATA_SIZE))
+ vd_data.Size.Value = size
+ vd_data.Alias = virtual_disk_alias
+ vd_data.Description = description
+ vd_data.Type = getattr(self._storage_services_client.factory
+ .create(self.O_VIRTUAL_DISK_TYPE),
+ virtual_disk_type)
+ vd_data.SubType = getattr(self._storage_services_client.factory
+ .create(self.O_VIRTUAL_DISK_SUB_TYPE),
+ 'Standard')
+ vd_data.DiskStatus = getattr(self._storage_services_client.factory
+ .create(self.O_VIRTUAL_DISK_STATUS),
+ 'Online')
+ vd_data.RecoveryPriority = getattr(
+ self._storage_services_client.factory
+ .create(self.O_MIRROR_RECOVERY_PRIORITY),
+ 'Unset')
+ vd_data.StorageProfileId = storage_profile_id
+
+ return vd_data
+
+ def create_virtual_disk_ex2(self, virtual_disk_data, first_logical_disk_id,
+ second_logical_disk_id, add_redundancy):
+ """Create a virtual disk specifying both the logical disks.
+
+ :param virtual_disk_data: Virtual disk's properties
+ :param first_logical_disk_id: Id of the logical disk to use
+ :param second_logical_disk_id: Id of the second logical disk to use
+ :param add_redundancy: If True, the mirror has redundant mirror paths
+ :return: New virtual disk's data
+ """
+
+ return self._invoke_storage_services(
+ 'CreateVirtualDiskEx2',
+ virtualDisk=virtual_disk_data,
+ firstLogicalDiskId=first_logical_disk_id,
+ secondLogicalDiskId=second_logical_disk_id,
+ addRedundancy=add_redundancy)
+
+ def set_virtual_disk_size(self, virtual_disk_id, size):
+ """Change the size of a virtual disk.
+
+ :param virtual_disk_id: Id of the virtual disk
+ :param size: New size
+ :return: Virtual disk's data
+ """
+
+ data_size = (self._storage_services_client.factory
+ .create(self.O_DATA_SIZE))
+ data_size.Value = size
+
+ return self._invoke_storage_services('SetVirtualDiskSize',
+ virtualDiskId=virtual_disk_id,
+ size=data_size)
+
+ def delete_virtual_disk(self, virtual_disk_id, delete_logical_disks):
+ """Delete a virtual disk.
+
+ :param virtual_disk_id: Id of the virtual disk
+ :param delete_logical_disks: If True, delete the associated
+ logical disks
+ """
+
+ self._invoke_storage_services('DeleteVirtualDisk',
+ virtualDiskId=virtual_disk_id,
+ deleteLogicalDisks=delete_logical_disks)
+
+ def serve_virtual_disks_to_host(self, host_id, virtual_disks):
+ """Serve multiple virtual disks to a specified host.
+
+ :param host_id: Id of the host machine
+ :param virtual_disks: A list of virtual disks to serve
+ :return: A list of the virtual disks actually served to the host
+ """
+
+ virtual_disk_array = (self._storage_services_client.factory
+ .create(self.O_ARRAY_OF_STRING))
+ virtual_disk_array.string = virtual_disks
+
+ return self._get_list_data(
+ self._invoke_storage_services('ServeVirtualDisksToHost',
+ hostId=host_id,
+ virtualDisks=virtual_disk_array),
+ 'VirtualLogicalUnitData')
+
+ def unserve_virtual_disks_from_host(self, host_id, virtual_disks):
+ """Unserve multiple virtual disks from a specified host.
+
+ :param host_id: Id of the host machine
+ :param virtual_disks: A list of virtual disks to unserve
+ """
+
+ virtual_disk_array = (self._storage_services_client.factory
+ .create(self.O_ARRAY_OF_STRING))
+ virtual_disk_array.string = virtual_disks
+
+ self._invoke_storage_services('UnserveVirtualDisksFromHost',
+ hostId=host_id,
+ virtualDisks=virtual_disk_array)
+
+ def unserve_virtual_disks_from_port(self, port_id, virtual_disks):
+ """Unserve multiple virtual disks from a specified initiator port.
+
+ :param port_id: Id of the initiator port
+ :param virtual_disks: A list of virtual disks to unserve
+ """
+
+ virtual_disk_array = (self._storage_services_client.factory
+ .create(self.O_ARRAY_OF_STRING))
+ virtual_disk_array.string = virtual_disks
+
+ self._invoke_storage_services('UnserveVirtualDisksFromPort',
+ portId=port_id,
+ virtualDisks=virtual_disk_array)
+
+ def bind_logical_disk(self, virtual_disk_id, logical_disk_id, role,
+ create_mirror_mappings, create_client_mappings,
+ add_redundancy):
+ """Bind (add) a logical disk to a virtual disk.
+
+ :param virtual_disk_id: Id of the virtual disk to bind to
+ :param logical_disk_id: Id of the logical disk being bound
+ :param role: logical disk's role
+ :param create_mirror_mappings: If True, automatically create the
+ mirror mappings to this disk, assuming
+ there is already another logical disk
+ bound
+ :param create_client_mappings: If True, automatically create mappings
+ from mapped hosts to the new disk
+ :param add_redundancy: If True, the mirror has redundant mirror paths
+ :return: Updated virtual disk data
+ """
+
+ logical_disk_role = getattr(self._storage_services_client.factory
+ .create(self.O_LOGICAL_DISK_ROLE),
+ role)
+
+ return self._invoke_storage_services(
+ 'BindLogicalDisk',
+ virtualDiskId=virtual_disk_id,
+ logicalDiskId=logical_disk_id,
+ role=logical_disk_role,
+ createMirrorMappings=create_mirror_mappings,
+ createClientMappings=create_client_mappings,
+ addRedundancy=add_redundancy)
+
+ def get_snapshots(self):
+ """Get all the snapshots on all the servers in the region.
+
+ :return: A list of snapshot data.
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetSnapshots'),
+ 'SnapshotData')
+
+ def create_snapshot(self, virtual_disk_id, name, description,
+ destination_pool_id, snapshot_type,
+ duplicate_disk_id, storage_profile_id):
+ """Create a snapshot relationship.
+
+ :param virtual_disk_id: Virtual disk id
+ :param name: Name of snapshot
+ :param description: Description
+ :param destination_pool_id: Destination pool id
+ :param snapshot_type: Type of snapshot
+ :param duplicate_disk_id: If set to True then the destination virtual
+ disk's SCSI id will be a duplicate of the
+ source's
+ :param storage_profile_id: Specifies the destination virtual disk's
+ storage profile
+ :return: New snapshot data
+ """
+
+ st_type = getattr(self._storage_services_client.factory
+ .create(self.O_SNAPSHOT_TYPE),
+ snapshot_type)
+
+ return self._invoke_storage_services(
+ 'CreateSnapshot',
+ virtualDiskId=virtual_disk_id,
+ name=name,
+ description=description,
+ destinationPoolId=destination_pool_id,
+ type=st_type,
+ duplicateDiskId=duplicate_disk_id,
+ storageProfileId=storage_profile_id)
+
+ def delete_snapshot(self, snapshot_id):
+ """Delete the snapshot.
+
+ :param snapshot_id: Snapshot id
+ """
+
+ self._invoke_storage_services('DeleteSnapshot', snapshotId=snapshot_id)
+
+ def get_storage_profiles(self):
+ """Get all the all the defined storage profiles.
+
+ :return: A list of storage profiles
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetStorageProfiles'),
+ 'StorageProfileData')
+
+ def designate_map_store(self, pool_id):
+ """Designate which pool the snapshot mapstore will be allocated from.
+
+ :param pool_id: Pool id
+ :return: Updated server host data, which includes the mapstore pool id
+ """
+
+ return self._invoke_storage_services('DesignateMapStore',
+ poolId=pool_id)
+
+ def get_performance_by_type(self, performance_types):
+ """Get performance data for specific types of performance counters.
+
+ :param performance_types: A list of performance counter types
+ :return: A list of performance data points
+ """
+
+ prfm_type_array = (self._storage_services_client.factory
+ .create(self.O_ARRAY_OF_PERFORMANCE_TYPE))
+ prfm_type_array.PerformanceType = list(
+ getattr(self._storage_services_client.factory
+ .create(self.O_PERFORMANCE_TYPE),
+ performance_type)
+ for performance_type in performance_types)
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetPerformanceByType',
+ types=prfm_type_array),
+ 'CollectionPointData')
+
+ def get_ports(self):
+ """Get all ports in the configuration.
+
+ :return: A list of SCSI ports
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetPorts'),
+ 'ScsiPortData')
+
+ def build_scsi_port_data(self, host_id, port_name, port_mode, port_type):
+ """Create ScsiPortData object that represents SCSI port, of any type.
+
+ :param host_id: Id of the port's host computer
+ :param port_name: Unique name of the port.
+ :param port_mode: Mode of port: initiator or target
+ :param port_type: Type of port, Fc, iSCSI or loopback
+ :return: ScsiPortData object
+ """
+
+ scsi_port_data = (self._storage_services_client.factory
+ .create(self.O_SCSI_PORT_DATA))
+ scsi_port_data.HostId = host_id
+ scsi_port_data.PortName = port_name
+ scsi_port_data.PortMode = getattr(self._storage_services_client.factory
+ .create(self.O_SCSI_MODE),
+ port_mode)
+ scsi_port_data.PortType = getattr(self._storage_services_client.factory
+ .create(self.O_SCSI_PORT_TYPE),
+ port_type)
+
+ return scsi_port_data
+
+ def register_port(self, scsi_port_data):
+ """Register a port in the configuration.
+
+ :param scsi_port_data: Port data
+ :return: Updated port data
+ """
+
+ return self._invoke_storage_services('RegisterPort',
+ port=scsi_port_data)
+
+ def assign_port(self, client_id, port_id):
+ """Assign a port to a client.
+
+ :param client_id: Client id
+ :param port_id: Port id
+ :return: Updated port data,
+ which will now have its host id set to the client id
+ """
+
+ return self._invoke_storage_services('AssignPort',
+ clientId=client_id,
+ portId=port_id)
+
+ def set_server_port_properties(self, port_id, properties):
+ """Set a server port's properties.
+
+ :param port_id: Port id
+ :param properties: New properties
+ :return: Updated port data
+ """
+
+ return self._invoke_storage_services('SetServerPortProperties',
+ portId=port_id,
+ properties=properties)
+
+ def build_access_token(self, initiator_node_name, initiator_username,
+ initiator_password, mutual_authentication,
+ target_username, target_password):
+ """Create an AccessToken object.
+
+ :param initiator_node_name: Initiator node name
+ :param initiator_username: Initiator user name
+ :param initiator_password: Initiator password
+ :param mutual_authentication: If True the target and the initiator
+ authenticate each other.
+ A separate secret is set for each target
+ and for each initiator in the storage
+ area network (SAN).
+ :param target_username: Target user name
+ :param target_password: Target password
+ :return: AccessToken object
+ """
+
+ access_token = (self._storage_services_client.factory
+ .create(self.O_ACCESS_TOKEN))
+ access_token.InitiatorNodeName = initiator_node_name
+ access_token.InitiatorUsername = initiator_username
+ access_token.InitiatorPassword = initiator_password
+ access_token.MutualAuthentication = mutual_authentication
+ access_token.TargetUsername = target_username
+ access_token.TargetPassword = target_password
+
+ return access_token
+
+ def set_access_token(self, iscsi_port_id, access_token):
+ """Set the access token.
+
+ The access token allows access to a specific network node
+ from a specific iSCSI port.
+
+ :param iscsi_port_id: Id of the initiator iSCSI port
+ :param access_token: Access token to be validated
+ :return: Port data
+ """
+
+ return self._invoke_storage_services('SetAccessToken',
+ iScsiPortId=iscsi_port_id,
+ inputToken=access_token)
+
+ def get_clients(self):
+ """Get all the clients in the configuration.
+
+ :return: A list of client data
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetClients'),
+ 'ClientHostData')
+
+ def register_client(self, host_name, description, machine_type,
+ mode, preferred_server_ids):
+ """Register the client, creating a client object in the configuration.
+
+ :param host_name: Name of the client
+ :param description: Description
+ :param machine_type: Type of client
+ :param mode: Path policy mode of the client
+ :param preferred_server_ids: Preferred server ids
+ :return: New client data
+ """
+
+ client_machine_type = getattr(self._storage_services_client.factory
+ .create(self.O_CLIENT_MACHINE_TYPE),
+ machine_type)
+ client_mode = getattr(self._storage_services_client.factory
+ .create(self.O_PATH_POLICY),
+ mode)
+
+ return self._invoke_storage_services(
+ 'RegisterClient',
+ hostName=host_name,
+ description=description,
+ type=client_machine_type,
+ mode=client_mode,
+ preferredServerIds=preferred_server_ids)
+
+ def set_client_capabilities(self, client_id, mpio, alua):
+ """Set the client capabilities for MPIO and ALUA.
+
+ :param client_id: Client id
+ :param mpio: If set to True then MPIO-capable
+ :param alua: If set to True then ALUA-capable
+ :return: Updated client data
+ """
+
+ return self._invoke_storage_services('SetClientCapabilities',
+ clientId=client_id,
+ mpio=mpio,
+ alua=alua)
+
+ def get_target_domains(self):
+ """Get all the target domains in the configuration.
+
+ :return: A list of target domains
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetTargetDomains'),
+ 'VirtualTargetDomainData')
+
+ def create_target_domain(self, initiator_host_id, target_host_id):
+ """Create a target domain given a pair of hosts, target and initiator.
+
+ :param initiator_host_id: Id of the initiator host machine
+ :param target_host_id: Id of the target host server
+ :return: New target domain
+ """
+
+ return self._invoke_storage_services('CreateTargetDomain',
+ initiatorHostId=initiator_host_id,
+ targetHostId=target_host_id)
+
+ def delete_target_domain(self, target_domain_id):
+ """Delete a target domain.
+
+ :param target_domain_id: Target domain id
+ """
+
+ self._invoke_storage_services('DeleteTargetDomain',
+ targetDomainId=target_domain_id)
+
+ def get_target_devices(self):
+ """Get all the target devices in the configuration.
+
+ :return: A list of target devices
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetTargetDevices'),
+ 'VirtualTargetDeviceData')
+
+ def build_scsi_port_nexus_data(self, initiator_port_id, target_port_id):
+ """Create a ScsiPortNexusData object.
+
+ Nexus is a pair of ports that can communicate, one being the initiator,
+ the other the target
+
+ :param initiator_port_id: Id of the initiator port
+ :param target_port_id: Id of the target port
+ :return: ScsiPortNexusData object
+ """
+
+ scsi_port_nexus_data = (self._storage_services_client.factory
+ .create(self.O_SCSI_PORT_NEXUS_DATA))
+ scsi_port_nexus_data.InitiatorPortId = initiator_port_id
+ scsi_port_nexus_data.TargetPortId = target_port_id
+
+ return scsi_port_nexus_data
+
+ def create_target_device(self, target_domain_id, nexus):
+ """Create a target device, given a target domain and a nexus.
+
+ :param target_domain_id: Target domain id
+ :param nexus: Nexus, or pair of ports
+ :return: New target device
+ """
+
+ return self._invoke_storage_services('CreateTargetDevice',
+ targetDomainId=target_domain_id,
+ nexus=nexus)
+
+ def delete_target_device(self, target_device_id):
+ """Delete a target device.
+
+ :param target_device_id: Target device id
+ """
+
+ self._invoke_storage_services('DeleteTargetDevice',
+ targetDeviceId=target_device_id)
+
+ def get_next_free_lun(self, target_device_id):
+ """Find the next unused LUN number for a specified target device.
+
+ :param target_device_id: Target device id
+ :return: LUN number
+ """
+
+ return self._invoke_storage_services('GetNextFreeLun',
+ targetDeviceId=target_device_id)
+
+ def get_logical_units(self):
+ """Get all the mappings configured in the system.
+
+ :return: A list of mappings
+ """
+
+ return self._get_list_data(
+ self._invoke_storage_services('GetLogicalUnits'),
+ 'VirtualLogicalUnitData')
+
+ def map_logical_disk(self, logical_disk_id, nexus, lun,
+ initiator_host_id, mapping_type):
+ """Map a logical disk to a host.
+
+ :param logical_disk_id: Id of the logical disk
+ :param nexus: Nexus, or pair of ports
+ :param lun: Logical Unit Number
+ :param initiator_host_id: Id of the initiator host machine
+ :param mapping_type: Type of mapping
+ :return: New mapping
+ """
+
+ logical_unit_type = getattr(self._storage_services_client.factory
+ .create(self.O_LOGICAL_UNIT_TYPE),
+ mapping_type)
+
+ return self._invoke_storage_services('MapLogicalDisk',
+ logicalDiskId=logical_disk_id,
+ nexus=nexus,
+ lun=lun,
+ initiatorHostId=initiator_host_id,
+ mappingType=logical_unit_type)
+
+ def unmap_logical_disk(self, logical_disk_id, nexus):
+ """Unmap a logical disk mapped with a specified nexus.
+
+ :param logical_disk_id: Id of the logical disk
+ :param nexus: Nexus, or pair of ports
+ """
+
+ self._invoke_storage_services('UnmapLogicalDisk',
+ logicalDiskId=logical_disk_id,
+ nexusData=nexus)
diff --git a/cinder/volume/drivers/datacore/driver.py b/cinder/volume/drivers/datacore/driver.py
new file mode 100644
index 00000000000..0cb38b56a42
--- /dev/null
+++ b/cinder/volume/drivers/datacore/driver.py
@@ -0,0 +1,845 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Base Driver for DataCore SANsymphony storage array."""
+
+import math
+import time
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_service import loopingcall
+from oslo_utils import excutils
+from oslo_utils import units
+
+from cinder import context as cinder_context
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder import utils as cinder_utils
+from cinder.volume import configuration
+from cinder.volume import driver
+from cinder.volume.drivers.datacore import api
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import utils as datacore_utils
+from cinder.volume.drivers.san import san
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+datacore_opts = [
+ cfg.StrOpt('datacore_disk_type',
+ default='single',
+ choices=['single', 'mirrored'],
+ help='DataCore virtual disk type (single/mirrored). '
+ 'Mirrored virtual disks require two storage servers in '
+ 'the server group.'),
+ cfg.StrOpt('datacore_storage_profile',
+ default=None,
+ help='DataCore virtual disk storage profile.'),
+ cfg.ListOpt('datacore_disk_pools',
+ default=[],
+ help='List of DataCore disk pools that can be used '
+ 'by volume driver.'),
+ cfg.IntOpt('datacore_api_timeout',
+ default=300,
+ min=1,
+ help='Seconds to wait for a response from a '
+ 'DataCore API call.'),
+ cfg.IntOpt('datacore_disk_failed_delay',
+ default=300,
+ min=0,
+ help='Seconds to wait for DataCore virtual '
+ 'disk to come out of the "Failed" state.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(datacore_opts, group=configuration.SHARED_CONF_GROUP)
+
+
+@interface.volumedriver
+class DataCoreVolumeDriver(driver.VolumeDriver):
+ """DataCore SANsymphony base volume driver."""
+
+ STORAGE_PROTOCOL = 'N/A'
+
+ AWAIT_DISK_ONLINE_INTERVAL = 10
+ AWAIT_SNAPSHOT_ONLINE_INTERVAL = 10
+ AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 5
+
+ DATACORE_SINGLE_DISK = 'single'
+ DATACORE_MIRRORED_DISK = 'mirrored'
+ VOLUME_TYPE_STRIPED = 'Striped'
+ VOLUME_TYPE_SPANNED = 'Spanned'
+
+ DATACORE_DISK_TYPE_KEY = 'datacore:disk_type'
+ DATACORE_STORAGE_PROFILE_KEY = 'datacore:storage_profile'
+ DATACORE_DISK_POOLS_KEY = 'datacore:disk_pools'
+
+ VALID_VOLUME_TYPE_KEYS = (DATACORE_DISK_TYPE_KEY,
+ DATACORE_STORAGE_PROFILE_KEY,
+ DATACORE_DISK_POOLS_KEY,)
+
+ def __init__(self, *args, **kwargs):
+ super(DataCoreVolumeDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(san.san_opts)
+ self.configuration.append_config_values(datacore_opts)
+ self._api = None
+ self._default_volume_options = None
+
+ def do_setup(self, context):
+ """Perform validations and establish connection to server.
+
+ :param context: Context information
+ """
+
+ required_params = [
+ 'san_ip',
+ 'san_login',
+ 'san_password',
+ ]
+ for param in required_params:
+ if not getattr(self.configuration, param, None):
+ raise cinder_exception.InvalidInput(_("%s not set.") % param)
+
+ self._api = api.DataCoreClient(
+ self.configuration.san_ip,
+ self.configuration.san_login,
+ self.configuration.san_password,
+ self.configuration.datacore_api_timeout)
+
+ disk_type = self.configuration.datacore_disk_type
+ if disk_type:
+ disk_type = disk_type.lower()
+ storage_profile = self.configuration.datacore_storage_profile
+ if storage_profile:
+ storage_profile = storage_profile.lower()
+ disk_pools = self.configuration.datacore_disk_pools
+ if disk_pools:
+ disk_pools = [pool.lower() for pool in disk_pools]
+
+ self._default_volume_options = {
+ self.DATACORE_DISK_TYPE_KEY: disk_type,
+ self.DATACORE_STORAGE_PROFILE_KEY: storage_profile,
+ self.DATACORE_DISK_POOLS_KEY: disk_pools,
+ }
+
+ def check_for_setup_error(self):
+ pass
+
+ def get_volume_backend_name(self):
+ """Get volume backend name of the volume service.
+
+ :return: Volume backend name
+ """
+
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ return (backend_name or 'DataCore' + self.__class__.__name__)
+
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: Volume object
+ :return: Dictionary of changes to the volume object to be persisted
+ """
+
+ volume_options = self._get_volume_options(volume)
+
+ disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
+ if disk_type == self.DATACORE_MIRRORED_DISK:
+ logical_disk_count = 2
+ virtual_disk_type = 'MultiPathMirrored'
+ elif disk_type == self.DATACORE_SINGLE_DISK:
+ logical_disk_count = 1
+ virtual_disk_type = 'NonMirrored'
+ else:
+ msg = _("Virtual disk type '%s' is not valid.") % disk_type
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ profile_id = self._get_storage_profile_id(
+ volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
+
+ pools = datacore_utils.get_distinct_by(
+ lambda pool: pool.ServerId,
+ self._get_available_disk_pools(
+ volume_options[self.DATACORE_DISK_POOLS_KEY]))
+
+ if len(pools) < logical_disk_count:
+ msg = _("Suitable disk pools were not found for "
+ "creating virtual disk.")
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ disk_size = self._get_size_in_bytes(volume.size)
+
+ logical_disks = []
+ virtual_disk = None
+ try:
+ for logical_disk_pool in pools[:logical_disk_count]:
+ logical_disks.append(
+ self._api.create_pool_logical_disk(
+ logical_disk_pool.Id,
+ self.VOLUME_TYPE_STRIPED, disk_size))
+
+ virtual_disk_data = self._api.build_virtual_disk_data(
+ volume.id,
+ virtual_disk_type,
+ disk_size,
+ volume.display_name,
+ profile_id)
+
+ virtual_disk = self._api.create_virtual_disk_ex2(
+ virtual_disk_data,
+ logical_disks[0].Id,
+ logical_disks[1].Id if logical_disk_count == 2 else None,
+ True)
+
+ virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Creation of volume %(volume)s failed.",
+ {'volume': volume.id})
+ try:
+ if virtual_disk:
+ self._api.delete_virtual_disk(virtual_disk.Id, True)
+ else:
+ for logical_disk in logical_disks:
+ self._api.delete_logical_disk(logical_disk.Id)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after failed "
+ "creation of volume %(volume)s: %(error)s.",
+ {'volume': volume.id, 'error': e})
+
+ return {'provider_location': virtual_disk.Id}
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ :param volume: Volume object
+ :param snapshot: Snapshot object
+ :return: Dictionary of changes to the volume object to be persisted
+ """
+ return self._create_volume_from(volume, snapshot)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates volume clone.
+
+ :param volume: New Volume object
+ :param src_vref: Volume object that must be cloned
+ :return: Dictionary of changes to the volume object to be persisted
+ """
+
+ return self._create_volume_from(volume, src_vref)
+
+ def extend_volume(self, volume, new_size):
+ """Extend an existing volume's size.
+
+ :param volume: Volume object
+ :param new_size: new size in GB to extend this volume to
+ """
+
+ virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+ self._set_virtual_disk_size(virtual_disk,
+ self._get_size_in_bytes(new_size))
+ virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
+
+ def delete_volume(self, volume):
+ """Deletes a volume.
+
+ :param volume: Volume object
+ """
+
+ virtual_disk = self._get_virtual_disk_for(volume)
+ if virtual_disk:
+ if virtual_disk.IsServed:
+ logical_disks = self._api.get_logical_disks()
+ logical_units = self._api.get_logical_units()
+ target_devices = self._api.get_target_devices()
+ logical_disks = [disk.Id for disk in logical_disks
+ if disk.VirtualDiskId == virtual_disk.Id]
+ logical_unit_devices = [unit.VirtualTargetDeviceId
+ for unit in logical_units
+ if unit.LogicalDiskId in logical_disks]
+ initiator_ports = set(device.InitiatorPortId
+ for device in target_devices
+ if device.Id in logical_unit_devices)
+ for port in initiator_ports:
+ self._api.unserve_virtual_disks_from_port(
+ port, [virtual_disk.Id])
+ self._api.delete_virtual_disk(virtual_disk.Id, True)
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot.
+
+ :param snapshot: Snapshot object
+ :return: Dictionary of changes to the snapshot object to be persisted
+ """
+
+ src_virtual_disk = self._get_virtual_disk_for(snapshot.volume,
+ raise_not_found=True)
+
+ volume_options = self._get_volume_options(snapshot.volume)
+ profile_name = volume_options[self.DATACORE_STORAGE_PROFILE_KEY]
+ profile_id = self._get_storage_profile_id(profile_name)
+ pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
+
+ if src_virtual_disk.DiskStatus != 'Online':
+ LOG.warning("Attempting to make a snapshot from virtual disk "
+ "%(disk)s that is in %(state)s state.",
+ {'disk': src_virtual_disk.Id,
+ 'state': src_virtual_disk.DiskStatus})
+
+ snapshot_virtual_disk = self._create_virtual_disk_copy(
+ src_virtual_disk,
+ snapshot.id,
+ snapshot.display_name,
+ profile_id=profile_id,
+ pool_names=pool_names)
+ snapshot_virtual_disk = self._await_virtual_disk_online(
+ snapshot_virtual_disk.Id)
+
+ return {'provider_location': snapshot_virtual_disk.Id}
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot.
+
+ :param snapshot: Snapshot object
+ """
+
+ snapshot_virtual_disk = self._get_virtual_disk_for(snapshot)
+ if snapshot_virtual_disk:
+ self._api.delete_virtual_disk(snapshot_virtual_disk.Id, True)
+
+ def ensure_export(self, context, volume):
+ pass
+
+ def create_export(self, context, volume, connector):
+ pass
+
+ def remove_export(self, context, volume):
+ pass
+
+ def unserve_virtual_disks_from_host(self, volume, connector):
+ virtual_disk = self._get_virtual_disk_for(volume)
+ if virtual_disk:
+ if connector is None:
+ clients = self._api.get_clients()
+ else:
+ clients = [self._get_client(connector['host'],
+ create_new=False)]
+
+ server_group = self._get_our_server_group()
+
+ @cinder_utils.synchronized(
+ 'datacore-backend-%s' % server_group.Id, external=True)
+ def unserve_virtual_disk(client_id):
+ self._api.unserve_virtual_disks_from_host(
+ client_id, [virtual_disk.Id])
+
+ for client in clients:
+ unserve_virtual_disk(client.Id)
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Disallow connection from connector.
+
+ :param volume: Volume object
+ :param connector: Connector information
+ """
+ self.unserve_virtual_disks_from_host(volume, connector)
+
+ def manage_existing(self, volume, existing_ref):
+ return self.manage_existing_object(volume, existing_ref, "volume")
+
+ def manage_existing_get_size(self, volume, existing_ref):
+ return self.manage_existing_object_get_size(volume, existing_ref,
+ "volume")
+
+ def manage_existing_snapshot(self, snapshot, existing_ref):
+ return self.manage_existing_object(snapshot, existing_ref, "snapshot")
+
+ def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
+ return self.manage_existing_object_get_size(snapshot, existing_ref,
+ "snapshot")
+
+ def manage_existing_object(self, existing_object, existing_ref,
+ object_type):
+ if 'source-name' not in existing_ref:
+ reason = _('Reference must contain source-name element.')
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref, reason=reason)
+
+ vd_alias = existing_ref['source-name']
+ virtual_disk = datacore_utils.get_first_or_default(
+ lambda disk: disk.Alias == vd_alias,
+ self._api.get_virtual_disks(),
+ None)
+
+ if not virtual_disk:
+ kwargs = {'existing_ref': vd_alias,
+ 'reason': 'Specified Virtual disk does not exist.'}
+ raise cinder_exception.ManageExistingInvalidReference(**kwargs)
+
+ return {'provider_location': virtual_disk.Id}
+
+ def manage_existing_object_get_size(self, existing_object, existing_ref,
+ object_type):
+ if 'source-name' not in existing_ref:
+ reason = _('Reference must contain source-name element.')
+ raise cinder_exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref, reason=reason)
+
+ vd_alias = existing_ref['source-name']
+ virtual_disk = datacore_utils.get_first_or_default(
+ lambda disk: disk.Alias == vd_alias,
+ self._api.get_virtual_disks(),
+ None)
+
+ if not virtual_disk:
+ kwargs = {'existing_ref': vd_alias,
+ 'reason': 'Specified Virtual disk does not exist.'}
+ raise cinder_exception.ManageExistingInvalidReference(**kwargs)
+ return(self._get_size_in_gigabytes(virtual_disk.Size.Value))
+
+ def _update_volume_stats(self):
+ performance_data = self._api.get_performance_by_type(
+ ['DiskPoolPerformance'])
+ total = 0
+ available = 0
+ reserved = 0
+ for performance in performance_data:
+ missing_perf_data = []
+
+ if hasattr(performance.PerformanceData, 'BytesTotal'):
+ total += performance.PerformanceData.BytesTotal
+ else:
+ missing_perf_data.append('BytesTotal')
+
+ if hasattr(performance.PerformanceData, 'BytesAvailable'):
+ available += performance.PerformanceData.BytesAvailable
+ else:
+ missing_perf_data.append('BytesAvailable')
+
+ if hasattr(performance.PerformanceData, 'BytesReserved'):
+ reserved += performance.PerformanceData.BytesReserved
+ else:
+ missing_perf_data.append('BytesReserved')
+
+ if missing_perf_data:
+ LOG.warning("Performance data %(data)s is missing for "
+ "disk pool %(pool)s",
+ {'data': missing_perf_data,
+ 'pool': performance.ObjectId})
+ provisioned = 0
+ logical_disks = self._api.get_logical_disks()
+ for disk in logical_disks:
+ if getattr(disk, 'PoolId', None):
+ provisioned += disk.Size.Value
+ total_capacity_gb = self._get_size_in_gigabytes(total)
+ free = available + reserved
+ free_capacity_gb = self._get_size_in_gigabytes(free)
+ provisioned_capacity_gb = self._get_size_in_gigabytes(provisioned)
+ reserved_percentage = 100.0 * reserved / total if total else 0.0
+ reserved_percentage = math.ceil(reserved_percentage)
+ ratio = self.configuration.max_over_subscription_ratio
+ stats_data = {
+ 'vendor_name': 'DataCore',
+ 'QoS_support': False,
+ 'volume_backend_name': self.get_volume_backend_name(),
+ 'driver_version': self.get_version(),
+ 'storage_protocol': self.STORAGE_PROTOCOL,
+ 'total_capacity_gb': total_capacity_gb,
+ 'free_capacity_gb': free_capacity_gb,
+ 'provisioned_capacity_gb': provisioned_capacity_gb,
+ 'reserved_percentage': reserved_percentage,
+ 'max_over_subscription_ratio': ratio,
+ 'thin_provisioning_support': True,
+ 'thick_provisioning_support': False,
+ 'online_extend_support': False,
+ }
+ self._stats = stats_data
+
+ def _get_our_server_group(self):
+ server_group = datacore_utils.get_first(lambda group: group.OurGroup,
+ self._api.get_server_groups())
+
+ return server_group
+
+ def _get_volume_options_from_type(self, type_id, default_options):
+ options = dict(default_options.items())
+ if type_id:
+ admin_context = cinder_context.get_admin_context()
+ volume_type = volume_types.get_volume_type(admin_context, type_id)
+ specs = dict(volume_type).get('extra_specs')
+
+ for key, value in specs.items():
+ if key in self.VALID_VOLUME_TYPE_KEYS:
+ if key == self.DATACORE_DISK_POOLS_KEY:
+ options[key] = [v.strip().lower()
+ for v in value.split(',')]
+ else:
+ options[key] = value.lower()
+
+ return options
+
+ def _get_volume_options(self, volume):
+ type_id = volume.volume_type_id
+
+ volume_options = self._get_volume_options_from_type(
+ type_id, self._default_volume_options)
+
+ return volume_options
+
+ def _get_online_servers(self):
+ servers = self._api.get_servers()
+ online_servers = [server for server in servers
+ if server.State == 'Online']
+ return online_servers
+
+ def _get_available_disk_pools(self, disk_pool_names=None):
+ online_servers = [server.Id for server in self._get_online_servers()]
+
+ pool_performance = {
+ performance.ObjectId: performance.PerformanceData for performance
+ in self._api.get_performance_by_type(['DiskPoolPerformance'])}
+
+ disk_pools = self._api.get_disk_pools()
+
+ lower_disk_pool_names = ([name.lower() for name in disk_pool_names]
+ if disk_pool_names else [])
+
+ available_disk_pools = [
+ pool for pool in disk_pools
+ if (self._is_pool_healthy(pool, pool_performance,
+ online_servers) and
+ (not lower_disk_pool_names or
+ pool.Caption.lower() in lower_disk_pool_names))]
+
+ available_disk_pools.sort(
+ key=lambda p: pool_performance[p.Id].BytesAvailable, reverse=True)
+
+ return available_disk_pools
+
+ def _get_virtual_disk_for(self, obj, raise_not_found=False):
+ disk_id = obj.get('provider_location')
+
+ virtual_disk = datacore_utils.get_first_or_default(
+ lambda disk: disk.Id == disk_id,
+ self._api.get_virtual_disks(),
+ None)
+ if not virtual_disk:
+ msg = (_("Virtual disk not found for %(object)s %(object_id)s.")
+ % {'object': obj.__class__.__name__.lower(),
+ 'object_id': obj['id']})
+ if raise_not_found:
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+ else:
+ LOG.warning(msg)
+
+ return virtual_disk
+
+ def _set_virtual_disk_size(self, virtual_disk, new_size):
+ return self._api.set_virtual_disk_size(virtual_disk.Id, new_size)
+
+ def _get_storage_profile(self, profile_name, raise_not_found=False):
+ profiles = self._api.get_storage_profiles()
+ profile = datacore_utils.get_first_or_default(
+ lambda p: p.Caption.lower() == profile_name.lower(),
+ profiles,
+ None)
+ if not profile and raise_not_found:
+ msg = (_("Specified storage profile %s not found.")
+ % profile_name)
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ return profile
+
+ def _get_storage_profile_id(self, profile_name):
+ profile_id = None
+ if profile_name:
+ profile = self._get_storage_profile(profile_name,
+ raise_not_found=True)
+ profile_id = profile.Id
+ return profile_id
+
+ def _await_virtual_disk_online(self, virtual_disk_id):
+ def inner(start_time):
+ disk_failed_delay = self.configuration.datacore_disk_failed_delay
+ virtual_disk = datacore_utils.get_first(
+ lambda disk: disk.Id == virtual_disk_id,
+ self._api.get_virtual_disks())
+ if virtual_disk.DiskStatus == 'Online':
+ raise loopingcall.LoopingCallDone(virtual_disk)
+ elif (
+ virtual_disk.DiskStatus != 'FailedRedundancy' and
+ time.time() - start_time >= disk_failed_delay):
+ msg = (_("Virtual disk %(disk)s did not come out of the "
+ "%(state)s state after %(timeout)s seconds.")
+ % {'disk': virtual_disk.Id,
+ 'state': virtual_disk.DiskStatus,
+ 'timeout': disk_failed_delay})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
+ return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
+
+ def _create_volume_from(self, volume, src_obj):
+ src_virtual_disk = self._get_virtual_disk_for(src_obj,
+ raise_not_found=True)
+ if src_virtual_disk.DiskStatus != 'Online':
+ LOG.warning("Attempting to create a volume from virtual disk "
+ "%(disk)s that is in %(state)s state.",
+ {'disk': src_virtual_disk.Id,
+ 'state': src_virtual_disk.DiskStatus})
+
+ volume_options = self._get_volume_options(volume)
+ profile_id = self._get_storage_profile_id(
+ volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
+ pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
+
+ volume_virtual_disk = self._create_virtual_disk_copy(
+ src_virtual_disk,
+ volume.id,
+ volume.display_name,
+ profile_id=profile_id,
+ pool_names=pool_names)
+
+ volume_logical_disk = datacore_utils.get_first(
+ lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id,
+ self._api.get_logical_disks())
+
+ try:
+ disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
+ if disk_type == self.DATACORE_MIRRORED_DISK:
+ pools = self._get_available_disk_pools(pool_names)
+ selected_pool = datacore_utils.get_first_or_default(
+ lambda pool: (pool.ServerId !=
+ volume_logical_disk.ServerHostId and
+ pool.Id != volume_logical_disk.PoolId),
+ pools, None)
+ if selected_pool:
+ logical_disk = self._api.create_pool_logical_disk(
+ selected_pool.Id,
+ self.VOLUME_TYPE_STRIPED,
+ volume_virtual_disk.Size.Value)
+ self._api.bind_logical_disk(volume_virtual_disk.Id,
+ logical_disk.Id,
+ 'Second',
+ True,
+ False,
+ True)
+ else:
+ msg = _("Can not create mirrored virtual disk. "
+ "Suitable disk pools not found.")
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+ volume_virtual_disk = self._await_virtual_disk_online(
+ volume_virtual_disk.Id)
+ try:
+ source_size = src_obj.size
+ except AttributeError:
+ source_size = src_obj.volume_size
+ if volume.size > source_size:
+ self._set_virtual_disk_size(volume_virtual_disk,
+ self._get_size_in_bytes(
+ volume.size))
+ volume_virtual_disk = datacore_utils.get_first(
+ lambda disk: disk.Id == volume_virtual_disk.Id,
+ self._api.get_virtual_disks())
+ volume_virtual_disk = self._await_virtual_disk_size_change(
+ volume_virtual_disk.Id,
+ self._get_size_in_bytes(source_size))
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Creation of volume %(volume)s failed.",
+ {'volume': volume.id})
+ try:
+ self._api.delete_virtual_disk(volume_virtual_disk.Id, True)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after failed "
+ "creation of volume %(volume)s: %(error)s.",
+ {'volume': volume.id, 'error': e})
+
+ return {'provider_location': volume_virtual_disk.Id}
+
+ def _create_full_snapshot(self, description, name, pool_names, profile_id,
+ src_virtual_disk):
+ pools = self._get_available_disk_pools(pool_names)
+ destination_pool = datacore_utils.get_first_or_default(
+ lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId or
+ pool.ServerId == src_virtual_disk.SecondHostId),
+ pools, None)
+
+ if not destination_pool:
+ msg = _("Suitable snapshot destination disk pool not found for "
+ "virtual disk %s.") % src_virtual_disk.Id
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+ server = datacore_utils.get_first(
+ lambda srv: srv.Id == destination_pool.ServerId,
+ self._api.get_servers())
+ if not server.SnapshotMapStorePoolId:
+ self._api.designate_map_store(destination_pool.Id)
+ snapshot = self._api.create_snapshot(src_virtual_disk.Id,
+ name,
+ description,
+ destination_pool.Id,
+ 'Full',
+ False,
+ profile_id)
+ return snapshot
+
+ def _await_snapshot_migrated(self, snapshot_id):
+ def inner():
+ snapshot_data = datacore_utils.get_first(
+ lambda snapshot: snapshot.Id == snapshot_id,
+ self._api.get_snapshots())
+ if snapshot_data.State == 'Migrated':
+ raise loopingcall.LoopingCallDone(snapshot_data)
+ elif (snapshot_data.State != 'Healthy' and
+ snapshot_data.Failure != 'NoFailure'):
+ msg = (_("Full migration of snapshot %(snapshot)s failed. "
+ "Snapshot is in %(state)s state.")
+ % {'snapshot': snapshot_data.Id,
+ 'state': snapshot_data.State})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ loop = loopingcall.FixedIntervalLoopingCall(inner)
+ time.sleep(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL)
+ return loop.start(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL,
+ self.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY).wait()
+
+ def _create_virtual_disk_copy(self, src_virtual_disk, name, description,
+ profile_id=None, pool_names=None):
+ snapshot = self._create_full_snapshot(
+ description, name, pool_names, profile_id, src_virtual_disk)
+
+ try:
+ snapshot = self._await_snapshot_migrated(snapshot.Id)
+ self._api.delete_snapshot(snapshot.Id)
+ self._await_snapshot_split_state_change(snapshot)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Split operation failed for snapshot "
+ "%(snapshot)s.", {'snapshot': snapshot.Id})
+ try:
+ logical_disk_copy = datacore_utils.get_first(
+ lambda disk: (disk.Id ==
+ snapshot.DestinationLogicalDiskId),
+ self._api.get_logical_disks())
+
+ virtual_disk_copy = datacore_utils.get_first(
+ lambda disk: (disk.Id ==
+ logical_disk_copy.VirtualDiskId),
+ self._api.get_virtual_disks())
+
+ self._api.delete_virtual_disk(virtual_disk_copy.Id, True)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after failed "
+ "split of snapshot %(snapshot)s: %(error)s.",
+ {'snapshot': snapshot.Id, 'error': e})
+
+ logical_disk_copy = datacore_utils.get_first(
+ lambda disk: disk.Id == snapshot.DestinationLogicalDiskId,
+ self._api.get_logical_disks())
+
+ virtual_disk_copy = datacore_utils.get_first(
+ lambda disk: disk.Id == logical_disk_copy.VirtualDiskId,
+ self._api.get_virtual_disks())
+
+ return virtual_disk_copy
+
+ def _get_client(self, name, create_new=False):
+ client_hosts = self._api.get_clients()
+
+ client = datacore_utils.get_first_or_default(
+ lambda host: host.HostName.split('.')[0] == name.split('.')[0],
+ client_hosts, None)
+
+ if create_new:
+ if not client:
+ client = self._api.register_client(
+ name, None, 'Other', 'PreferredServer', None)
+ self._api.set_client_capabilities(client.Id, True, True)
+
+ return client
+
+ @staticmethod
+ def _is_pool_healthy(pool, pool_performance, online_servers):
+ if (pool.PoolStatus == 'Running' and
+ hasattr(pool_performance[pool.Id], 'BytesAvailable') and
+ pool.ServerId in online_servers):
+ return True
+ return False
+
+ @staticmethod
+ def _get_size_in_bytes(size_in_gigabytes):
+ return size_in_gigabytes * units.Gi
+
+ @staticmethod
+ def _get_size_in_gigabytes(size_in_bytes):
+ return size_in_bytes / float(units.Gi)
+
+ def _await_virtual_disk_size_change(self, virtual_disk_id, old_size):
+ def inner(start_time):
+ disk_failed_delay = self.configuration.datacore_disk_failed_delay
+ virtual_disk = datacore_utils.get_first(
+ lambda disk: disk.Id == virtual_disk_id,
+ self._api.get_virtual_disks())
+ if virtual_disk.DiskStatus == 'Online' \
+ and virtual_disk.Size.Value > old_size:
+ raise loopingcall.LoopingCallDone(virtual_disk)
+ elif (virtual_disk.DiskStatus != 'FailedRedundancy' and
+ time.time() - start_time >= disk_failed_delay):
+ msg = (_("Virtual disk %(disk)s did not come out of the "
+ "%(state)s state after %(timeout)s seconds.")
+ % {'disk': virtual_disk.Id,
+ 'state': virtual_disk.DiskStatus,
+ 'timeout': disk_failed_delay})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
+ time.sleep(self.AWAIT_DISK_ONLINE_INTERVAL)
+ return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
+
+ def _await_snapshot_split_state_change(self, split_snapshot):
+ def inner(start_time):
+ disk_failed_delay = self.configuration.datacore_disk_failed_delay
+ snapshot_found = False
+ snapshot_list = self._api.get_snapshots()
+ if not snapshot_list:
+ raise loopingcall.LoopingCallDone()
+ for entry in snapshot_list:
+ if entry.Caption == split_snapshot.Caption:
+ snapshot_found = True
+ break
+ if not snapshot_found:
+ raise loopingcall.LoopingCallDone()
+ if (time.time() - start_time >= disk_failed_delay):
+ msg = (_("Split Snapshot disk %(disk)s did not happened "
+ "after %(timeout)s seconds.")
+ % {'disk': split_snapshot.Caption,
+ 'timeout': disk_failed_delay})
+ LOG.error(msg)
+ raise loopingcall.LoopingCallDone()
+
+ inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
+ return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
diff --git a/cinder/volume/drivers/datacore/exception.py b/cinder/volume/drivers/datacore/exception.py
new file mode 100644
index 00000000000..d70cda047c5
--- /dev/null
+++ b/cinder/volume/drivers/datacore/exception.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Exception definitions."""
+
+from cinder import exception
+from cinder.i18n import _
+
+
+class DataCoreException(exception.VolumeBackendAPIException):
+ """Base DataCore Exception."""
+
+ message = _('DataCore exception.')
+
+
+class DataCoreConnectionException(DataCoreException):
+ """Thrown when there are connection problems during a DataCore API call."""
+
+ message = _('Failed to connect to DataCore Server Group: %(reason)s.')
+
+
+class DataCoreFaultException(DataCoreException):
+ """Thrown when there are faults during a DataCore API call."""
+
+ message = _('DataCore Server Group reported an error: %(reason)s.')
diff --git a/cinder/volume/drivers/datacore/fc.py b/cinder/volume/drivers/datacore/fc.py
new file mode 100644
index 00000000000..1ead6c95942
--- /dev/null
+++ b/cinder/volume/drivers/datacore/fc.py
@@ -0,0 +1,376 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Fibre Channel Driver for DataCore SANsymphony storage array."""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder.common import constants
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder import utils as cinder_utils
+from cinder.volume import configuration
+from cinder.volume.drivers.datacore import driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import utils as datacore_utils
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+datacore_fc_opts = [
+ cfg.ListOpt('datacore_fc_unallowed_targets',
+ default=[],
+ help='List of FC targets that cannot be used to attach '
+ 'volume. To prevent the DataCore FibreChannel '
+ 'volume driver from using some front-end targets '
+ 'in volume attachment, specify this option and list '
+ 'the iqn and target machine for each target as '
+ 'the value, such as '
+ ', , '
+ '.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(datacore_fc_opts, group=configuration.SHARED_CONF_GROUP)
+
+
+@interface.volumedriver
+class FibreChannelVolumeDriver(driver.DataCoreVolumeDriver):
+ """DataCore SANsymphony Fibre Channel volume driver.
+
+ Version history:
+
+ .. code-block:: none
+
+ 1.0.0 - Initial driver
+ 2.0.0 - Reintroduce the driver
+
+ """
+
+ VERSION = '2.0.0'
+ STORAGE_PROTOCOL = constants.FC
+ CI_WIKI_NAME = 'DataCore_CI'
+
+ def __init__(self, *args, **kwargs):
+ super(FibreChannelVolumeDriver, self).__init__(*args, **kwargs)
+ self.configuration = kwargs.get('configuration', None)
+ if self.configuration:
+ self.configuration.append_config_values(datacore_fc_opts)
+
+ @classmethod
+ def get_driver_options(cls):
+ additional_opts = cls._get_oslo_driver_opts(
+ 'san_ip', 'san_login', 'san_password')
+ return driver.datacore_opts + datacore_fc_opts + additional_opts
+
+ def validate_connector(self, connector):
+ """Fail if connector doesn't contain all the data needed by the driver.
+
+ :param connector: Connector information
+ """
+
+ required_data = ['host', 'wwpns']
+ for required in required_data:
+ if required not in connector:
+ LOG.error("The volume driver requires %(data)s "
+ "in the connector.", {'data': required})
+ raise cinder_exception.InvalidConnectorException(
+ missing=required)
+
+ def _build_initiator_target_map(self, connector):
+ target_wwns = []
+ init_targ_map = {}
+ initiator_wwns = []
+
+ if connector:
+ initiator_wwns = connector['wwpns']
+ fc_target_ports = self._get_frontend_fc_target_ports(
+ self._api.get_ports())
+ for target_port in fc_target_ports:
+ target_wwns.append(
+ target_port.PortName.replace('-', '').lower())
+ for initiator in initiator_wwns:
+ init_targ_map[initiator] = target_wwns
+
+ return init_targ_map, target_wwns
+
+ def initialize_connection(self, volume, connector):
+ """Allow connection to connector and return connection info.
+
+ :param volume: Volume object
+ :param connector: Connector information
+ :return: Connection information
+ """
+
+ LOG.debug("Initialize connection for volume %(volume)s for "
+ "connector %(connector)s.",
+ {'volume': volume.id, 'connector': connector})
+
+ virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+
+ if virtual_disk.DiskStatus != 'Online':
+ LOG.warning("Attempting to attach virtual disk %(disk)s "
+ "that is in %(state)s state.",
+ {'disk': virtual_disk.Id,
+ 'state': virtual_disk.DiskStatus})
+
+ server_group = self._get_our_server_group()
+
+ @cinder_utils.synchronized(
+ 'datacore-backend-%s' % server_group.Id, external=True)
+ def serve_virtual_disk():
+ available_ports = self._api.get_ports()
+
+ connector_wwpns = list(wwpn.replace('-', '').lower()
+ for wwpn in connector['wwpns'])
+
+ fc_initiator = self._get_initiator(connector['host'],
+ connector_wwpns,
+ available_ports)
+ if not fc_initiator:
+ msg = (_("Suitable initiator not found for "
+ "virtual disk %(disk)s for volume %(volume)s.")
+ % {'disk': virtual_disk.Id, 'volume': volume.id})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ fc_targets = self._get_targets(virtual_disk, available_ports)
+ if not fc_targets:
+ msg = (_("Suitable targets not found for "
+ "virtual disk %(disk)s for volume %(volume)s.")
+ % {'disk': virtual_disk.Id, 'volume': volume.id})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ virtual_logical_units = self._map_virtual_disk(
+ virtual_disk, fc_targets, fc_initiator)
+ return fc_targets, virtual_logical_units
+
+ targets, logical_units = serve_virtual_disk()
+
+ init_targ_map, target_wwns = self._build_initiator_target_map(
+ connector)
+ info_backend = {'driver_volume_type': 'fibre_channel',
+ 'data': {
+ 'target_discovered': False,
+ 'target_lun': logical_units[targets[0]].Lun.Quad,
+ 'target_wwn': target_wwns,
+ 'volume_id': volume.id,
+ 'access_mode': 'rw',
+ 'initiator_target_map': init_targ_map}}
+
+ fczm_utils.add_fc_zone(info_backend)
+
+ LOG.debug("Connection data: %s", info_backend)
+
+ return info_backend
+
+ def terminate_connection(self, volume, connector, **kwargs):
+
+ init_targ_map, target_wwns = self._build_initiator_target_map(
+ connector)
+ info = {'driver_volume_type': 'fibre_channel', 'data': {}}
+ info['data'] = {'target_wwn': target_wwns,
+ 'initiator_target_map': init_targ_map}
+
+ # First unserve the virtual disk from Host
+ super().unserve_virtual_disks_from_host(volume, connector)
+
+ fczm_utils.remove_fc_zone(info)
+
+ return info
+
+ def _get_initiator(self, host, connector_wwpns, available_ports):
+ wwpn_list = []
+ for wwp in connector_wwpns:
+ wwpn_list.append('-'.join(
+ a + b for a, b in zip(*[iter(wwp.upper())] * 2)))
+
+ client = self._get_client(host, create_new=True)
+ valid_initiator = self._valid_fc_initiator(wwpn_list, available_ports)
+ if not valid_initiator:
+ return []
+
+ fc_initiator_ports = self._get_host_fc_initiator_ports(
+ client, available_ports)
+ fc_initiator = datacore_utils.get_first_or_default(
+ lambda port: True if (port.PortName in wwpn_list) else False,
+ fc_initiator_ports,
+ None)
+
+ if not fc_initiator:
+ for wwn in wwpn_list:
+ for port in available_ports:
+ if (port.PortName == wwn and
+ port.PortType == 'FibreChannel' and
+ port.PortMode == 'Initiator' and
+ port.Connected):
+ scsi_port_data = self._api.build_scsi_port_data(
+ client.Id, wwn, 'Initiator', 'FibreChannel')
+ fc_initiator = self._api.register_port(scsi_port_data)
+ return fc_initiator
+ return fc_initiator
+
+ @staticmethod
+ def _get_host_fc_initiator_ports(host, ports):
+ return [port for port in ports if
+ port.PortType == 'FibreChannel' and port.PortMode ==
+ 'Initiator' and port.HostId == host.Id]
+
+ def _get_targets(self, virtual_disk, available_ports):
+ unallowed_targets = self.configuration.datacore_fc_unallowed_targets
+ fc_target_ports = self._get_frontend_fc_target_ports(
+ available_ports)
+ server_port_map = {}
+
+ for target_port in fc_target_ports:
+ if target_port.HostId in server_port_map:
+ server_port_map[target_port.HostId].append(target_port)
+ else:
+ server_port_map[target_port.HostId] = [target_port]
+ fc_targets = []
+ if virtual_disk.FirstHostId in server_port_map:
+ fc_targets += server_port_map[virtual_disk.FirstHostId]
+ if virtual_disk.SecondHostId in server_port_map:
+ fc_targets += server_port_map[virtual_disk.SecondHostId]
+
+ return [target for target in fc_targets
+ if target.PortName not in unallowed_targets]
+
+ @staticmethod
+ def _is_fc_frontend_port(port):
+ if (port.PortType == 'FibreChannel' and
+ port.PortMode == 'Target' and
+ port.HostId):
+ if port.PresenceStatus == 'Present':
+ port_roles = port.ServerPortProperties.Role.split()
+ port_state = port.StateInfo.State
+ if 'Frontend' in port_roles and port_state == 'LoopLinkUp':
+ return True
+ return False
+
+ def _get_frontend_fc_target_ports(self, ports):
+ return [target_port for target_port in ports
+ if self._is_fc_frontend_port(target_port)]
+
+ def _map_virtual_disk(self, virtual_disk, targets, initiator):
+ logical_disks = self._api.get_logical_disks()
+
+ logical_units = {}
+ created_mapping = {}
+ created_devices = []
+ created_domains = []
+ try:
+ for target in targets:
+ target_domain = self._get_target_domain(target, initiator)
+ if not target_domain:
+ target_domain = self._api.create_target_domain(
+ initiator.HostId, target.HostId)
+ created_domains.append(target_domain)
+
+ nexus = self._api.build_scsi_port_nexus_data(
+ initiator.Id, target.Id)
+
+ target_device = self._get_target_device(
+ target_domain, target, initiator)
+ if not target_device:
+ target_device = self._api.create_target_device(
+ target_domain.Id, nexus)
+ created_devices.append(target_device)
+
+ logical_disk = self._get_logical_disk_on_host(
+ virtual_disk.Id, target.HostId, logical_disks)
+ logical_unit = self._get_logical_unit(
+ logical_disk, target_device)
+ if not logical_unit:
+ logical_unit = self._create_logical_unit(
+ logical_disk, nexus, target_device)
+ created_mapping[logical_unit] = target_device
+ logical_units[target] = logical_unit
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Mapping operation for virtual disk %(disk)s "
+ "failed with error.",
+ {'disk': virtual_disk.Id})
+ try:
+ for logical_unit in created_mapping:
+ nexus = self._api.build_scsi_port_nexus_data(
+ created_mapping[logical_unit].InitiatorPortId,
+ created_mapping[logical_unit].TargetPortId)
+ self._api.unmap_logical_disk(
+ logical_unit.LogicalDiskId, nexus)
+ for target_device in created_devices:
+ self._api.delete_target_device(target_device.Id)
+ for target_domain in created_domains:
+ self._api.delete_target_domain(target_domain.Id)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after "
+ "failed mapping operation: %s.", e)
+
+ return logical_units
+
+ def _get_target_domain(self, target, initiator):
+ target_domains = self._api.get_target_domains()
+ target_domain = datacore_utils.get_first_or_default(
+ lambda domain: (domain.InitiatorHostId == initiator.HostId and
+ domain.TargetHostId == target.HostId),
+ target_domains, None)
+ return target_domain
+
+ def _get_target_device(self, target_domain, target, initiator):
+ target_devices = self._api.get_target_devices()
+ target_device = datacore_utils.get_first_or_default(
+ lambda device: (device.TargetDomainId == target_domain.Id and
+ device.InitiatorPortId == initiator.Id and
+ device.TargetPortId == target.Id),
+ target_devices, None)
+ return target_device
+
+ def _get_logical_unit(self, logical_disk, target_device):
+ logical_units = self._api.get_logical_units()
+ logical_unit = datacore_utils.get_first_or_default(
+ lambda unit: (unit.LogicalDiskId == logical_disk.Id and
+ unit.VirtualTargetDeviceId == target_device.Id),
+ logical_units, None)
+ return logical_unit
+
+ def _create_logical_unit(self, logical_disk, nexus, target_device):
+ free_lun = self._api.get_next_free_lun(target_device.Id)
+ logical_unit = self._api.map_logical_disk(logical_disk.Id,
+ nexus,
+ free_lun,
+ logical_disk.ServerHostId,
+ 'Client')
+ return logical_unit
+
+ @staticmethod
+ def _get_logical_disk_on_host(virtual_disk_id,
+ host_id, logical_disks):
+ logical_disk = datacore_utils.get_first(
+ lambda disk: (disk.ServerHostId == host_id and
+ disk.VirtualDiskId == virtual_disk_id),
+ logical_disks)
+ return logical_disk
+
+ @staticmethod
+ def _valid_fc_initiator(wwpn_list, available_ports):
+ for port in available_ports:
+ if (port.PortType == 'FibreChannel' and
+ port.PortMode == 'Initiator'):
+ if (port.PortName in wwpn_list):
+ return True
+ return False
diff --git a/cinder/volume/drivers/datacore/iscsi.py b/cinder/volume/drivers/datacore/iscsi.py
new file mode 100644
index 00000000000..c95ac8b1d98
--- /dev/null
+++ b/cinder/volume/drivers/datacore/iscsi.py
@@ -0,0 +1,446 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""iSCSI Driver for DataCore SANsymphony storage array."""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder.common import constants
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder import utils as cinder_utils
+from cinder.volume import configuration
+from cinder.volume.drivers.datacore import driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import passwd
+from cinder.volume.drivers.datacore import utils as datacore_utils
+from cinder.volume import volume_utils as volume_utils
+
+LOG = logging.getLogger(__name__)
+
+datacore_iscsi_opts = [
+ cfg.ListOpt('datacore_iscsi_unallowed_targets',
+ default=[],
+ help='List of iSCSI targets that cannot be used to attach '
+ 'volume. To prevent the DataCore iSCSI volume driver '
+ 'from using some front-end targets in volume attachment, '
+ 'specify this option and list the iqn and target machine '
+ 'for each target as the value, such as '
+ ', , '
+ '.'),
+ cfg.StrOpt('datacore_iscsi_chap_storage',
+ default='$state_path/.datacore_chap',
+ help='Fully qualified file name where dynamically generated '
+ 'iSCSI CHAP secrets are stored.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(datacore_iscsi_opts, group=configuration.SHARED_CONF_GROUP)
+
+
+@interface.volumedriver
+class ISCSIVolumeDriver(driver.DataCoreVolumeDriver):
+ """DataCore SANsymphony iSCSI volume driver.
+
+ Version history:
+
+ .. code-block:: none
+
+ 1.0.0 - Initial driver
+ 2.0.0 - Reintroduce the driver
+
+ """
+
+ VERSION = '2.0.0'
+ STORAGE_PROTOCOL = constants.ISCSI
+ CI_WIKI_NAME = 'DataCore_CI'
+
+ def __init__(self, *args, **kwargs):
+ super(ISCSIVolumeDriver, self).__init__(*args, **kwargs)
+ self.configuration = kwargs.get('configuration', None)
+ if self.configuration:
+ self.configuration.append_config_values(datacore_iscsi_opts)
+ self._password_storage = None
+
+ @classmethod
+ def get_driver_options(cls):
+ additional_opts = cls._get_oslo_driver_opts(
+ 'san_ip', 'san_login', 'san_password')
+ return driver.datacore_opts + datacore_iscsi_opts + additional_opts
+
+ def do_setup(self, context):
+ """Perform validations and establish connection to server.
+
+ :param context: Context information
+ """
+
+ super(ISCSIVolumeDriver, self).do_setup(context)
+
+ self._password_storage = passwd.PasswordFileStorage(
+ self.configuration.datacore_iscsi_chap_storage)
+
+ def validate_connector(self, connector):
+ """Fail if connector doesn't contain all the data needed by the driver.
+
+ :param connector: Connector information
+ """
+
+ required_data = ['host', 'initiator']
+ for required in required_data:
+ if required not in connector:
+ LOG.error("The volume driver requires %(data)s "
+ "in the connector.", {'data': required})
+ raise cinder_exception.InvalidConnectorException(
+ missing=required)
+
+ def initialize_connection(self, volume, connector):
+ """Allow connection to connector and return connection info.
+
+ :param volume: Volume object
+ :param connector: Connector information
+ :return: Connection information
+ """
+
+ LOG.debug("Initialize connection for volume %(volume)s for "
+ "connector %(connector)s.",
+ {'volume': volume.id, 'connector': connector})
+
+ virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+
+ if virtual_disk.DiskStatus != 'Online':
+ LOG.warning("Attempting to attach virtual disk %(disk)s "
+ "that is in %(state)s state.",
+ {'disk': virtual_disk.Id,
+ 'state': virtual_disk.DiskStatus})
+
+ server_group = self._get_our_server_group()
+
+ @cinder_utils.synchronized(
+ 'datacore-backend-%s' % server_group.Id, external=True)
+ def serve_virtual_disk():
+ available_ports = self._api.get_ports()
+
+ iscsi_initiator = self._get_initiator(connector['host'],
+ connector['initiator'],
+ available_ports)
+ iscsi_targets = self._get_targets(virtual_disk, available_ports)
+
+ if not iscsi_targets:
+ msg = (_("Suitable targets not found for "
+ "virtual disk %(disk)s for volume %(volume)s.")
+ % {'disk': virtual_disk.Id, 'volume': volume.id})
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ auth_params = self._setup_iscsi_chap_authentication(
+ iscsi_targets, iscsi_initiator)
+
+ virtual_logical_units = self._map_virtual_disk(
+ virtual_disk, iscsi_targets, iscsi_initiator)
+
+ return iscsi_targets, virtual_logical_units, auth_params
+
+ targets, logical_units, chap_params = serve_virtual_disk()
+
+ target_portal = datacore_utils.build_network_address(
+ targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[
+ 0].Address.Address,
+ targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[
+ 0].TcpPort)
+
+ connection_data = {}
+
+ if chap_params:
+ connection_data['auth_method'] = 'CHAP'
+ connection_data['auth_username'] = chap_params[0]
+ connection_data['auth_password'] = chap_params[1]
+
+ connection_data['target_discovered'] = False
+ connection_data['target_iqn'] = targets[0].PortName
+ connection_data['target_portal'] = target_portal
+ connection_data['target_lun'] = logical_units[targets[0]].Lun.Quad
+ connection_data['volume_id'] = volume.id
+ connection_data['access_mode'] = 'rw'
+
+ LOG.debug("Connection data: %s", connection_data)
+
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': connection_data,
+ }
+
+ def _map_virtual_disk(self, virtual_disk, targets, initiator):
+ logical_disks = self._api.get_logical_disks()
+
+ logical_units = {}
+ created_mapping = {}
+ created_devices = []
+ created_domains = []
+ try:
+ for target in targets:
+ target_domain = self._get_target_domain(target, initiator)
+ if not target_domain:
+ target_domain = self._api.create_target_domain(
+ initiator.HostId, target.HostId)
+ created_domains.append(target_domain)
+
+ nexus = self._api.build_scsi_port_nexus_data(
+ initiator.Id, target.Id)
+
+ target_device = self._get_target_device(
+ target_domain, target, initiator)
+ if not target_device:
+ target_device = self._api.create_target_device(
+ target_domain.Id, nexus)
+ created_devices.append(target_device)
+
+ logical_disk = self._get_logical_disk_on_host(
+ virtual_disk.Id, target.HostId, logical_disks)
+
+ logical_unit = self._get_logical_unit(
+ logical_disk, target_device)
+ if not logical_unit:
+ logical_unit = self._create_logical_unit(
+ logical_disk, nexus, target_device)
+ created_mapping[logical_unit] = target_device
+ logical_units[target] = logical_unit
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Mapping operation for virtual disk %(disk)s "
+ "failed with error.",
+ {'disk': virtual_disk.Id})
+ try:
+ for logical_unit in created_mapping:
+ nexus = self._api.build_scsi_port_nexus_data(
+ created_mapping[logical_unit].InitiatorPortId,
+ created_mapping[logical_unit].TargetPortId)
+ self._api.unmap_logical_disk(
+ logical_unit.LogicalDiskId, nexus)
+ for target_device in created_devices:
+ self._api.delete_target_device(target_device.Id)
+ for target_domain in created_domains:
+ self._api.delete_target_domain(target_domain.Id)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after "
+ "failed mapping operation: %s.", e)
+
+ return logical_units
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ super().unserve_virtual_disks_from_host(volume, connector)
+
+ def _get_target_domain(self, target, initiator):
+ target_domains = self._api.get_target_domains()
+ target_domain = datacore_utils.get_first_or_default(
+ lambda domain: (domain.InitiatorHostId == initiator.HostId and
+ domain.TargetHostId == target.HostId),
+ target_domains, None)
+ return target_domain
+
+ def _get_target_device(self, target_domain, target, initiator):
+ target_devices = self._api.get_target_devices()
+ target_device = datacore_utils.get_first_or_default(
+ lambda device: (device.TargetDomainId == target_domain.Id and
+ device.InitiatorPortId == initiator.Id and
+ device.TargetPortId == target.Id),
+ target_devices, None)
+ return target_device
+
+ def _get_logical_unit(self, logical_disk, target_device):
+ logical_units = self._api.get_logical_units()
+ logical_unit = datacore_utils.get_first_or_default(
+ lambda unit: (unit.LogicalDiskId == logical_disk.Id and
+ unit.VirtualTargetDeviceId == target_device.Id),
+ logical_units, None)
+ return logical_unit
+
+ def _create_logical_unit(self, logical_disk, nexus, target_device):
+ free_lun = self._api.get_next_free_lun(target_device.Id)
+ logical_unit = self._api.map_logical_disk(logical_disk.Id,
+ nexus,
+ free_lun,
+ logical_disk.ServerHostId,
+ 'Client')
+ return logical_unit
+
+ def _check_iscsi_chap_configuration(self, chap, targets):
+ logical_units = self._api.get_logical_units()
+ target_devices = self._api.get_target_devices()
+
+ for logical_unit in logical_units:
+ target_device_id = logical_unit.VirtualTargetDeviceId
+ target_device = datacore_utils.get_first(
+ lambda device, key=target_device_id: device.Id == key,
+ target_devices)
+ target_port_id = target_device.TargetPortId
+ target = datacore_utils.get_first_or_default(
+ lambda target_port, key=target_port_id: target_port.Id == key,
+ targets,
+ None)
+ if (target and
+ chap == (target.ServerPortProperties.Authentication ==
+ 'None') and chap ==
+ (target.ServerPortProperties.Authentication == 'Default')):
+ msg = _("iSCSI CHAP authentication can't be configured for "
+ "target %s. Device exists that served through "
+ "this target.") % target.PortName
+ LOG.error(msg)
+ raise cinder_exception.VolumeDriverException(message=msg)
+
+ def _setup_iscsi_chap_authentication(self, targets, initiator):
+ iscsi_chap_enabled = self.configuration.use_chap_auth
+
+ self._check_iscsi_chap_configuration(iscsi_chap_enabled, targets)
+
+ server_group = self._get_our_server_group()
+ update_access_token = False
+ access_token = None
+ chap_secret = None
+ chap_username = initiator.PortName
+ if iscsi_chap_enabled:
+ authentication = 'CHAP'
+ chap_username = self.configuration.chap_username
+ if not chap_username:
+ chap_username = initiator.PortName
+ chap_secret = (self.configuration.chap_password or
+ self._password_storage.get_password(
+ server_group.Id, initiator.PortName))
+ if not chap_secret:
+ chap_secret = volume_utils.generate_password(length=15)
+ self._password_storage.set_password(
+ server_group.Id, initiator.PortName, chap_secret)
+ update_access_token = True
+ access_token = self._api.build_access_token(
+ initiator.PortName,
+ None,
+ None,
+ False,
+ chap_username,
+ chap_secret)
+ else:
+ authentication = 'None'
+ if self._password_storage:
+ try:
+ self._password_storage.delete_password(server_group.Id,
+ initiator.PortName)
+ except Exception:
+ pass
+
+ changed_targets = {}
+ try:
+ for target in targets:
+ if iscsi_chap_enabled:
+ target_iscsi_nodes = getattr(target.iSCSINodes, 'Node', [])
+ iscsi_node = datacore_utils.get_first_or_default(
+ lambda node: node.Name == initiator.PortName,
+ target_iscsi_nodes,
+ None)
+ if ((not iscsi_node) or not
+ (iscsi_node.AccessToken.TargetUsername) or
+ (update_access_token)):
+ self._api.set_access_token(target.Id, access_token)
+ properties = target.ServerPortProperties
+ if properties.Authentication != authentication:
+ changed_targets[target] = properties.Authentication
+ properties.Authentication = authentication
+ self._api.set_server_port_properties(
+ target.Id, properties)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Configuring of iSCSI CHAP authentication for "
+ "initiator %(initiator)s failed.",
+ {'initiator': initiator.PortName})
+ try:
+ for target in changed_targets:
+ properties = target.ServerPortProperties
+ properties.Authentication = changed_targets[target]
+ self._api.set_server_port_properties(
+ target.Id, properties)
+ except datacore_exception.DataCoreException as e:
+ LOG.warning("An error occurred on a cleanup after failed "
+ "configuration of iSCSI CHAP authentication "
+ "on initiator %(initiator)s: %(error)s.",
+ {'initiator': initiator.PortName, 'error': e})
+ if iscsi_chap_enabled:
+ return chap_username, chap_secret
+
+ def _get_initiator(self, host, iqn, available_ports):
+ client = self._get_client(host, create_new=True)
+
+ iscsi_initiator_ports = self._get_host_iscsi_initiator_ports(
+ client, available_ports)
+
+ iscsi_initiator = datacore_utils.get_first_or_default(
+ lambda port: port.PortName == iqn,
+ iscsi_initiator_ports,
+ None)
+ if not iscsi_initiator:
+ scsi_port_data = self._api.build_scsi_port_data(
+ client.Id, iqn, 'Initiator', 'iSCSI')
+ iscsi_initiator = self._api.register_port(scsi_port_data)
+ return iscsi_initiator
+
+ def _get_targets(self, virtual_disk, available_ports):
+ unallowed_targets = self.configuration.datacore_iscsi_unallowed_targets
+ iscsi_target_ports = self._get_frontend_iscsi_target_ports(
+ available_ports)
+ server_port_map = {}
+ for target_port in iscsi_target_ports:
+ if target_port.HostId in server_port_map:
+ server_port_map[target_port.HostId].append(target_port)
+ else:
+ server_port_map[target_port.HostId] = [target_port]
+ iscsi_targets = []
+ if virtual_disk.FirstHostId in server_port_map:
+ iscsi_targets += server_port_map[virtual_disk.FirstHostId]
+ if virtual_disk.SecondHostId in server_port_map:
+ iscsi_targets += server_port_map[virtual_disk.SecondHostId]
+ iscsi_targets = [target for target in iscsi_targets
+ if target.PortName not in unallowed_targets]
+ return iscsi_targets
+
+ @staticmethod
+ def _get_logical_disk_on_host(virtual_disk_id,
+ host_id, logical_disks):
+ logical_disk = datacore_utils.get_first(
+ lambda disk: (disk.ServerHostId == host_id and
+ disk.VirtualDiskId == virtual_disk_id),
+ logical_disks)
+ return logical_disk
+
+ @staticmethod
+ def _is_iscsi_frontend_port(port):
+ if (port.PortType == 'iSCSI' and port.PortMode == 'Target' and
+ port.HostId and port.PresenceStatus == 'Present' and
+ hasattr(port, 'IScsiPortStateInfo')):
+ port_roles = port.ServerPortProperties.Role.split()
+ port_state = (port.IScsiPortStateInfo.PortalsState
+ .PortalStateInfo[0].State)
+ if 'Frontend' in port_roles and port_state == 'Ready':
+ return True
+ return False
+
+ @staticmethod
+ def _get_frontend_iscsi_target_ports(ports):
+ return [target_port for target_port in ports
+ if ISCSIVolumeDriver._is_iscsi_frontend_port(target_port)]
+
+ @staticmethod
+ def _get_host_iscsi_initiator_ports(host, ports):
+ return [port for port in ports if
+ port.PortType == 'iSCSI' and port.PortMode == 'Initiator' and
+ port.HostId == host.Id]
diff --git a/cinder/volume/drivers/datacore/passwd.py b/cinder/volume/drivers/datacore/passwd.py
new file mode 100644
index 00000000000..15ad7b710dd
--- /dev/null
+++ b/cinder/volume/drivers/datacore/passwd.py
@@ -0,0 +1,165 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Password storage."""
+
+import contextlib
+import json
+import os
+import stat
+
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class FileStorage(object):
+ """Represents a file as a dictionary."""
+
+ def __init__(self, file_path):
+ self._file_path = file_path
+ self._file = None
+ self._is_open = False
+
+ def open(self):
+ """Open a file for simultaneous reading and writing.
+
+ If the specified file does not exist, it will be created
+ with the 0600 access permissions for the current user, if needed
+ the appropriate directories will be created with the 0750 access
+ permissions for the current user.
+ """
+
+ file_dir = os.path.dirname(self._file_path)
+ if file_dir and not os.path.isdir(file_dir):
+ os.makedirs(file_dir)
+ os.chmod(file_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
+ if not os.path.isfile(self._file_path):
+ open(self._file_path, 'w').close()
+ os.chmod(self._file_path, stat.S_IRUSR | stat.S_IWUSR)
+
+ if self._file:
+ self.close()
+ self._file = open(self._file_path, 'r+')
+ return self
+
+ def load(self):
+ """Reads the file and returns corresponded dictionary object.
+
+ :return: The dictionary that represents the file content.
+ """
+
+ storage = {}
+ if os.stat(self._file_path).st_size != 0:
+ storage = json.load(self._file)
+ if not isinstance(storage, dict):
+ msg = _('File %s has a malformed format.') % self._file_path
+ raise ValueError(msg)
+ return storage
+
+ def save(self, storage):
+ """Writes the specified dictionary to the file.
+
+ :param storage: Dictionary that should be written to the file.
+ """
+
+ if not isinstance(storage, dict):
+ msg = _('%s is not a dict.') % repr(storage)
+ raise TypeError(msg)
+
+ self._file.seek(0)
+ self._file.truncate()
+ json.dump(storage, self._file)
+
+ def close(self):
+ """Close the file."""
+
+ if self._file:
+ self._file.close()
+ self._file = None
+
+
+class PasswordFileStorage(object):
+ """Password storage implementation.
+
+ It stores passwords in a file in a clear text. The password file must be
+ secured by setting up file permissions.
+ """
+
+ def __init__(self, file_path):
+ self._file_path = file_path
+ self._file_storage = FileStorage(file_path)
+
+ def set_password(self, resource, username, password):
+ """Store the credential for the resource.
+
+ :param resource: Resource name for which credential will be stored
+ :param username: User name
+ :param password: Password
+ """
+
+ @cinder_utils.synchronized(
+ 'datacore-password_storage-' + self._file_path, external=True)
+ def _set_password():
+ with contextlib.closing(self._file_storage.open()) as storage:
+ passwords = storage.load()
+ if resource not in passwords:
+ passwords[resource] = {}
+ passwords[resource][username] = password
+ storage.save(passwords)
+
+ _set_password()
+
+ def get_password(self, resource, username):
+ """Returns the stored password for the resource.
+
+ If the password does not exist, it will return None
+
+ :param resource: Resource name for which credential was stored
+ :param username: User name
+ :return password: Password
+ """
+
+ @cinder_utils.synchronized(
+ 'datacore-password_storage-' + self._file_path, external=True)
+ def _get_password():
+ with contextlib.closing(self._file_storage.open()) as storage:
+ passwords = storage.load()
+ if resource in passwords:
+ return passwords[resource].get(username)
+
+ return _get_password()
+
+ def delete_password(self, resource, username):
+ """Delete the stored credential for the resource.
+
+ :param resource: Resource name for which credential was stored
+ :param username: User name
+ """
+
+ @cinder_utils.synchronized(
+ 'datacore-password_storage-' + self._file_path, external=True)
+ def _delete_password():
+ with contextlib.closing(self._file_storage.open()) as storage:
+ passwords = storage.load()
+ if resource in passwords and username in passwords[resource]:
+ del passwords[resource][username]
+ if not passwords[resource].keys():
+ del passwords[resource]
+ storage.save(passwords)
+
+ _delete_password()
diff --git a/cinder/volume/drivers/datacore/utils.py b/cinder/volume/drivers/datacore/utils.py
new file mode 100644
index 00000000000..d71b19a8ae0
--- /dev/null
+++ b/cinder/volume/drivers/datacore/utils.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities and helper functions."""
+
+from oslo_utils import netutils
+
+
+def build_network_address(host, port):
+ """Combines the specified host name or IP address with the specified port.
+
+ :param host: Host name or IP address in presentation (string) format
+ :param port: Port number
+ :return: The host name or IP address and port combination;
+ IPv6 addresses are enclosed in the square brackets
+ """
+ if netutils.is_valid_ipv6(host):
+ return '[%s]:%s' % (host, port)
+ else:
+ return '%s:%s' % (host, port)
+
+
+def get_first(predicate, source):
+ """Searches for an item that matches the conditions.
+
+ :param predicate: Defines the conditions of the item to search for
+ :param source: Iterable collection of items
+ :return: The first item that matches the conditions defined by the
+ specified predicate, if found; otherwise StopIteration is raised
+ """
+
+ return next(item for item in source if predicate(item))
+
+
+def get_first_or_default(predicate, source, default):
+ """Searches for an item that matches the conditions.
+
+ :param predicate: Defines the conditions of the item to search for
+ :param source: Iterable collection of items
+ :param default: Value that is returned if the iterator is exhausted
+ :return: The first item that matches the conditions defined by the
+ specified predicate, if found; otherwise the default value
+ """
+
+ try:
+ return get_first(predicate, source)
+ except StopIteration:
+ return default
+
+
+def get_distinct_by(key, source):
+ """Finds distinct items for the key and returns the result in a list.
+
+ :param key: Function computing a key value for each item
+ :param source: Iterable collection of items
+ :return: The list of distinct by the key value items
+ """
+
+ seen_keys = set()
+ return [item for item in source
+ if key(item) not in seen_keys and not seen_keys.add(key(item))]
diff --git a/doc/source/configuration/block-storage/drivers/datacore-volume-driver.rst b/doc/source/configuration/block-storage/drivers/datacore-volume-driver.rst
new file mode 100644
index 00000000000..28d1fd9add4
--- /dev/null
+++ b/doc/source/configuration/block-storage/drivers/datacore-volume-driver.rst
@@ -0,0 +1,370 @@
+==================================
+DataCore SANsymphony volume driver
+==================================
+
+DataCore SANsymphony volume driver provides OpenStack Compute instances with
+access to the SANsymphony(TM) Software-defined Storage Platform.
+
+When volumes are created in OpenStack, the driver creates corresponding
+virtual disks in the SANsymphony server group. When a volume is attached
+to an instance in OpenStack, a Linux host is registered and the
+corresponding virtual disk is served to the host in the SANsymphony
+server group.
+
+Requirements
+-------------
+
+* DataCore server group running SANsymphony software version 10 PSP6
+ or later.
+
+* OpenStack Integration has been tested with the OpenStack environment
+ installed on Ubuntu 20.04. For the list of qualified Linux host operating
+ system types, refer to the `Linux Host Configuration Guide `_
+ on the `DataCore Technical Support Web page `_.
+
+* If using multipath I/O, ensure that iSCSI ports are logged in on all
+ OpenStack Compute nodes. (All Fibre Channel ports will be logged in
+ automatically.)
+
+Python dependencies
+~~~~~~~~~~~~~~~~~~~
+
+* ``websocket-client>=0.32.0``
+
+ Install this package using pip:
+
+ .. code-block:: console
+
+ $ sudo pip install "websocket-client>=0.32.0"
+
+
+Configuration
+-------------
+
+The volume driver can be configured by editing the ``cinder.conf`` file.
+The options below can be configured either per server group or as extra
+specifications in a volume type configuration.
+
+Configuration options and default values:
+
+* ``datacore_disk_pools = None``
+
+ Sets the pools to use for the DataCore OpenStack Cinder Volume Driver. This
+ option acts like a filter and any number of pools may be specified. The list
+ of specified pools will be used to select the storage sources needed for
+ virtual disks; one for single or two for mirrored. Selection is based on
+ the pools with the most free space.
+
+ This option may also be specified as an extra specification of a volume
+ type.
+
+* ``datacore_disk_type = single``
+
+ Sets the SANsymphony virtual disk type (single or mirrored). **Single**
+ virtual disks are created by default. Specify **mirrored** to override this
+ behavior. Mirrored virtual disks require two DataCore Servers in the server
+ group.
+
+ This option may also be specified as an extra specification of a volume
+ type.
+
+* ``datacore_storage_profile = Normal``
+
+ Sets the storage profile of the virtual disk. The default setting is Normal.
+ Other valid values include the standard storage profiles (Critical, High,
+ Low, and Archive) and the names of custom profiles that have been created.
+
+ This option may also be specified as an extra specification of a volume
+ type.
+
+* ``datacore_api_timeout = 300``
+
+ Sets the number of seconds to wait for a response from a DataCore API call.
+
+ This option is used in the server group back-end configuration only.
+
+* ``datacore_disk_failed_delay = 300``
+
+ Sets the number of seconds to wait for the SANsymphony virtual disk to come
+ out of the "Failed" state.
+
+ This option is used in the server group back-end configuration only.
+
+* ``datacore_iscsi_unallowed_targets = []``
+
+ Sets a list of iSCSI targets that cannot be used to attach to the volume.
+ By default, the DataCore iSCSI volume driver attaches a volume through all
+ target ports with the Front-end role enabled, unlike the DataCore Fibre
+ Channel volume driver that attaches a volume only through target ports
+ connected to initiator.
+
+ To prevent the DataCore iSCSI volume driver from using some front-end
+ targets in volume attachment, specify this option and list the iqn and
+ target machine for each target as the value, such as ``,
+ , ``. For example,
+ ````.
+
+ This option is used in the server group back-end configuration only.
+
+* ``use_chap_auth = False``
+
+ Sets the CHAP authentication for the iSCSI targets that are used to serve
+ the volume. This option is disabled by default and will allow hosts
+ (OpenStack Compute nodes) to connect to iSCSI storage back-ends without
+ authentication. To enable CHAP authentication, which will prevent hosts
+ (OpenStack Compute nodes) from connecting to back-ends without
+ authentication, set this option to **True**.
+
+ In addition, specify the location where the DataCore volume driver will
+ store dynamically created CHAP secrets by setting the
+ **datacore_iscsi_chap_storage** option.
+
+ This option is used in the server group back-end configuration only.
+ The driver will enable CHAP only for involved target ports, therefore, not
+ all DataCore Servers may have CHAP configured. *Before enabling CHAP, ensure
+ that there are no SANsymphony volumes attached to any instances.*
+
+* ``datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap``
+
+ Sets the path to the iSCSI CHAP authentication password storage file.
+ **datacore_iscsi_chap_storage** is only used when **use_chap_auth = True**
+ and **chap_password** is not set. Default **datacore_iscsi_chap_storage**
+ value is $state_path/.datacore_chap.
+
+ *CHAP secrets are passed from OpenStack Block Storage to compute in clear
+ text. This communication should be secured to ensure that CHAP secrets are
+ not compromised. This can be done by setting up file permissions. Before
+ changing the CHAP configuration, ensure that there are no SANsymphony
+ volumes attached to any instances.*
+
+ This option is used in the server group back-end configuration only.
+
+Configuration Examples
+~~~~~~~~~~~~~~~~~~~~~~
+
+Examples of option configuration in the ``cinder.conf`` file.
+
+* An example using **datacore_disk_pools**, **datacore_disk_type**, and
+ **datacore_storage_profile** to create a mirrored virtual disk with a High
+ priority storage profile using specific pools:
+
+ .. code-block:: ini
+
+ volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver
+
+ san_ip =
+
+ san_login =
+
+ san_password =
+
+ datacore_disk_type = mirrored
+
+ datacore_disk_pools = Disk pool 1, Disk pool 2
+
+ datacore_storage_profile = High
+
+* An example using **datacore_iscsi_unallowed_targets** to prevent the volume
+ from using the specified targets:
+
+ .. code-block:: ini
+
+ volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver
+
+ san_ip =
+
+ san_login =
+
+ san_password =
+
+ datacore_iscsi_unallowed_targets = iqn.2000-08.com.datacore:mns-ssv-10-1,iqn.2000-08.com.datacore:mns-ssvdev-01-1
+
+* An example using **use_chap_auth** and **chap_username**
+ and **chap_password** to enable CHAP authentication:
+
+ .. code-block:: ini
+
+ volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver
+
+ use_chap_auth = True
+
+ chap_username = user1
+
+ chap_password = user1_password
+
+* An example using **use_chap_auth** and
+ **datacore_iscsi_chap_storage** to enable CHAP authentication and provide
+ the path to the CHAP password storage file:
+
+ .. code-block:: ini
+
+ volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver
+
+ use_chap_auth = True
+
+ datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap
+
+ DataCore volume driver stores CHAP secrets in clear text, and the password
+ file must be secured by setting up file permissions. The following example
+ shows how to create a password file and set up permissions. It assumes that
+ the cinder-volume service is running under the user `cinder`. Please note
+ that following steps are only required if the user wants to change the
+ default **datacore_iscsi_chap_storage** location.
+
+
+ .. code-block:: console
+
+ $ sudo mkdir /opt/user_dir/cinder -p
+
+ $ sudo /bin/sh -c "> /opt/user_dir/cinder/.datacore_chap"
+
+ $ sudo chown cinder:cinder /opt/user_dir/cinder
+
+ $ sudo chown cinder:cinder /opt/user_dir/cinder/.datacore_chap
+
+ $ sudo chmod -v 600 /opt/user_dir/cinder/.datacore_chap
+
+
+ CHAP will be enabled in SANsymphony after setting **use_chap_auth = True**.
+ **chap_username** and **chap_password** will be used if mentioned, if not
+ iSCSI initiator PortName will be used as chap_username with a random password,
+ and the credentials will be stored in **datacore_iscsi_chap_storage**
+ location.
+
+Creating Volume Types
+---------------------
+
+Volume types can be created with the DataCore disk type specified in
+the datacore:disk_type extra specification. In the following example, a volume
+type named mirrored_disk is created and the disk type is set to mirrored.
+
+.. code-block:: console
+
+ $ cinder type-create mirrored_disk
+
+ $ cinder type-key mirrored_disk set datacore:disk_type=mirrored
+
+In addition, volume specifications can also be declared as extra specifications
+for volume types. The example below sets additional configuration options for
+the volume type mirrored_disk; storage profile will be set to High and virtual
+disks will be created from Disk pool 1, Disk pool 2, or Disk pool 3.
+
+.. code-block:: console
+
+ $ cinder type-key mirrored_disk set datacore:storage_profile=High
+
+ $ cinder type-key mirrored_disk set "datacore:disk_pools=Disk pool 1, Disk pool 2, Disk pool 3"
+
+Configuring Multiple Storage Back Ends
+--------------------------------------
+
+OpenStack Block Storage can be configured to use several back-end storage
+solutions. Multiple back-end configuration allows you to configure different
+storage configurations for SANsymphony server groups. The configuration options
+for a group must be defined in the group.
+
+To enable multiple back ends:
+
+1. In the ``cinder.conf`` file, set the **enabled_backends** option to identify
+ the groups. One name is associated with each server group back-end
+ configuration. In the example below there are two groups, ``datacore-1``
+ and ``datacore-2``:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+
+ enabled_backends = datacore-1, datacore-2
+
+2. Define the back-end storage used by each server group in a separate section
+ (for example ``[datacore-1]``):
+
+ .. code-block:: ini
+
+ [datacore-1]
+
+ volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver
+
+ volume_backend_name = DataCore_iSCSI
+
+ san_ip =
+
+ san_login =
+
+ san_password =
+
+ use_chap_auth = True
+
+ chap_username =
+
+ chap_password =
+
+ datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap
+
+ datacore_iscsi_unallowed_targets = iqn.2000-08.com.datacore:mns-ssv-10-1
+
+ datacore_disk_type = mirrored
+
+ [datacore-2]
+
+ volume_driver = cinder.volume.drivers.datacore.fc.FibreChannelVolumeDriver
+
+ volume_backend_name = DataCore_FibreChannel
+
+ san_ip =
+
+ san_login =
+
+ san_password =
+
+ datacore_disk_type = mirrored
+
+ datacore_disk_pools = Disk pool 1, Disk pool 2
+
+ datacore_storage_profile = High
+
+3. Create the volume types
+
+ .. code-block:: ini
+
+ $ cinder type-create datacore_iscsi
+
+ $ cinder type-create datacore_fc
+
+4. Add an extra specification to link the volume type to a back-end name:
+
+ .. code-block:: ini
+
+ $ cinder type-key datacore_iscsi set volume_backend_name=DataCore_iSCSI
+
+ $ cinder type-key datacore_fc set volume_backend_name=DataCore_FibreChannel
+
+See `Configure multiple-storage back ends
+`__
+for additional information.
+
+Detaching Volumes and Terminating Instances
+-------------------------------------------
+
+Notes about the expected behavior of SANsymphony software when detaching
+volumes and terminating instances in OpenStack:
+
+1. When a volume is detached from a host in OpenStack, the virtual disk will be
+ unserved from the host in SANsymphony, but the virtual disk will not be
+ deleted.
+
+2. If all volumes are detached from a host in OpenStack, the host will remain
+ registered and all virtual disks will be unserved from that host in
+ SANsymphony. The virtual disks will not be deleted.
+
+3. If an instance is terminated in OpenStack, the virtual disk for the instance
+ will be unserved from the host and either be deleted or remain as unserved
+ virtual disk depending on the option selected when terminating.
+
+Support
+-------
+
+In the event that a support bundle is needed, the administrator should save
+the files from the ``/var/log`` folder on the Linux host and attach to DataCore
+Technical Support incident manually.
diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
index a379139b964..904b55a85ec 100644
--- a/doc/source/reference/support-matrix.ini
+++ b/doc/source/reference/support-matrix.ini
@@ -15,6 +15,9 @@
#####################################################################
# Drivers:
+[driver.datacore]
+title=DataCore Storage Driver (FC, iSCSI)
+
[driver.datera]
title=Datera Storage Driver (iSCSI)
@@ -223,6 +226,7 @@ notes=A vendor driver is considered supported if the vendor is
accurate results. If a vendor doesn't meet this requirement
the driver is marked unsupported and is removed if the problem
isn't resolved before the end of the subsequent release.
+driver.datacore=complete
driver.datera=complete
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -295,6 +299,7 @@ title=Extend an Attached Volume
status=optional
notes=Cinder supports the ability to extend a volume that is attached to
an instance, but not all drivers are able to do this.
+driver.datacore=missing
driver.datera=complete
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -370,6 +375,7 @@ notes=Vendor drivers that support Quality of Service (QoS) at the
with volume extra specs to control QoS settings at the storage
device on a per volume basis. Drivers that don't support this can
utilize frontend QoS via libvirt.
+driver.datacore=missing
driver.datera=complete
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=missing
@@ -444,6 +450,7 @@ notes=Vendor drivers that support volume replication can report this
capability to be utilized by the scheduler allowing users to request
replicated volumes via extra specs. Such drivers are also then able
to take advantage of Cinder's failover and failback commands.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -519,6 +526,7 @@ notes=Vendor drivers that support consistency groups are able to
deletion. Grouping the volumes ensures that operations are only
completed on the group of volumes, not individually, enabling the
creation of consistent snapshots across a group.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -593,6 +601,7 @@ notes=If a volume driver supports thin provisioning it means that it
will allow the scheduler to provision more storage space
than physically exists on the backend. This may also be called
'oversubscription'.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -668,6 +677,7 @@ notes=Storage assisted volume migration is like host assisted volume
assistance of the Cinder host. Vendor drivers that implement this
can migrate volumes completely through the storage backend's
functionality.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=missing
@@ -743,6 +753,7 @@ notes=Vendor drivers that report multi-attach support are able
It is important to note that a clustered file system that
supports multi-attach functionality is required to use multi-
attach functionality otherwise data corruption may occur.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -815,6 +826,7 @@ title=Revert to Snapshot
status=optional
notes=Vendor drivers that implement the driver assisted function to revert a
volume to the last snapshot taken.
+driver.datacore=complete
driver.datera=missing
driver.dell_emc_powermax=complete
driver.dell_emc_powerstore=complete
@@ -891,6 +903,7 @@ notes=Vendor drivers that support running in an active/active
that may impact an active/active configuration and that
the driver has been tested to function properly in such
a configuration.
+driver.datacore=missing
driver.datera=missing
driver.dell_emc_powermax=missing
driver.dell_emc_powerstore=missing
diff --git a/driver-requirements.txt b/driver-requirements.txt
index cc8da8eb53a..0240e7e7845 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -45,3 +45,6 @@ storpool.spopenstack>=2.2.1 # Apache-2.0
# Datera
dfs_sdk>=1.2.25 # Apache-2.0
+
+# DataCore SANsymphony
+websocket-client>=1.3.2 # LGPLv2+
diff --git a/releasenotes/notes/add-datacore-volume-driver-5c1802798425acc1.yaml b/releasenotes/notes/add-datacore-volume-driver-5c1802798425acc1.yaml
new file mode 100644
index 00000000000..c7213ddb32a
--- /dev/null
+++ b/releasenotes/notes/add-datacore-volume-driver-5c1802798425acc1.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Added iSCSI and Fibre Channel volume drivers for DataCore's
+ SANsymphony and Hyper-converged Virtual SAN storage.
diff --git a/setup.cfg b/setup.cfg
index e943862c3c5..6aed385bb82 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -77,7 +77,7 @@ wsgi_scripts =
[extras]
all =
- websocket-client>=0.32.0 # LGPLv2+
+ websocket-client>=1.3.2 # LGPLv2+
pyOpenSSL>=17.5.0 # Apache-2.0
storops>=0.5.10 # Apache-2.0
pywbem>=0.7.0 #LGPLv2.1+
@@ -91,7 +91,7 @@ all =
dfs-sdk>=1.2.25 # Apache-2.0
rbd-iscsi-client>=0.1.8 # Apache-2.0
datacore =
- websocket-client>=0.32.0 # LGPLv2+
+ websocket-client>=1.3.2 # LGPLv2+
powermax =
pyOpenSSL>=17.5.0 # Apache-2.0
vnx =