diff --git a/cinder/opts.py b/cinder/opts.py index 3e3228c45cc..6e25fc590f3 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -244,6 +244,7 @@ def list_opts(): cinder_volume_driver.scst_opts, cinder_volume_driver.backup_opts, cinder_volume_driver.image_opts, + cinder_volume_drivers_datera_dateraiscsi.d_opts, cinder_volume_drivers_fusionstorage_dsware.volume_opts, cinder_volume_drivers_infortrend_raidcmd_cli_commoncli. infortrend_opts, @@ -279,7 +280,6 @@ def list_opts(): cinder_volume_driver.nvmet_opts, cinder_volume_driver.scst_opts, cinder_volume_driver.image_opts, - cinder_volume_drivers_datera_dateraiscsi.d_opts, cinder_volume_drivers_dell_emc_powermax_common.powermax_opts, cinder_volume_drivers_dell_emc_sc_storagecentercommon. common_opts, diff --git a/cinder/tests/unit/volume/drivers/test_datera.py b/cinder/tests/unit/volume/drivers/test_datera.py index 1260c32eafe..00fd2b67851 100644 --- a/cinder/tests/unit/volume/drivers/test_datera.py +++ b/cinder/tests/unit/volume/drivers/test_datera.py @@ -1,4 +1,4 @@ -# Copyright 2017 Datera +# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,98 +13,85 @@ # License for the specific language governing permissions and limitations # under the License. +import sys from unittest import mock - -import six +import uuid from cinder import context from cinder import exception from cinder import test +from cinder import version from cinder.volume import configuration as conf -from cinder.volume.drivers.datera import datera_common as datc -from cinder.volume.drivers.datera import datera_iscsi as datera from cinder.volume import volume_types +sys.modules['dfs_sdk'] = mock.MagicMock() -datc.DEFAULT_SI_SLEEP = 0 -datc.DEFAULT_SI_SLEEP_API_2 = 0 -datc.DEFAULT_SNAP_SLEEP = 0 -URL_TEMPLATES = datera.datc.URL_TEMPLATES +from cinder.volume.drivers.datera import datera_iscsi as datera # noqa + +datera.datc.DEFAULT_SI_SLEEP = 0 +datera.datc.DEFAULT_SNAP_SLEEP = 0 OS_PREFIX = datera.datc.OS_PREFIX UNMANAGE_PREFIX = datera.datc.UNMANAGE_PREFIX +DateraAPIException = datera.datc.DateraAPIException -class DateraVolumeTestCasev2(test.TestCase): +class DateraVolumeTestCasev22(test.TestCase): def setUp(self): - super(DateraVolumeTestCasev2, self).setUp() - self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.san_ip = '127.0.0.1' - self.cfg.san_is_local = True - self.cfg.datera_api_token = 'secret' self.cfg.datera_api_port = '7717' - self.cfg.datera_num_replicas = '2' + self.cfg.san_is_local = True + self.cfg.datera_num_replicas = 1 self.cfg.datera_503_timeout = 0.01 self.cfg.datera_503_interval = 0.001 - self.cfg.datera_acl_allow_all = False self.cfg.datera_debug = False self.cfg.san_login = 'user' self.cfg.san_password = 'pass' - self.cfg.datera_tenant_id = 'test-tenant' + self.cfg.datera_tenant_id = '/root/test-tenant' self.cfg.driver_client_cert = None self.cfg.driver_client_cert_key = None self.cfg.datera_disable_profiler = False - self.cfg.driver_use_ssl = False + self.cfg.datera_ldap_server = "" + self.cfg.datera_volume_type_defaults = {} + self.cfg.datera_disable_template_override = False + self.cfg.datera_disable_extended_metadata = False + self.cfg.datera_enable_image_cache = False + self.cfg.datera_image_cache_volume_type_id = "" + self.cfg.filter_function = lambda: None + self.cfg.goodness_function = lambda: None + self.cfg.use_chap_auth = False + self.cfg.chap_username = "" + self.cfg.chap_password = "" + super(DateraVolumeTestCasev22, self).setUp() mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = datera.DateraDriver(execute=mock_exec, configuration=self.cfg) - self.driver.set_initialized() - self.driver.configuration.get = _config_getter - self.volume = _stub_volume() - self.driver._request = mock.Mock() - m = mock.Mock() - m.json.return_value = {'api_versions': ['v2']} - self.driver._request.return_value = m - self.mock_api = mock.Mock() - self.driver._issue_api_request = self.mock_api - self._apiv = "2" - self._tenant = None + self.driver.api = mock.MagicMock() + self.driver.apiv = "2.2" + self.driver.set_initialized() + # No-op config getter + self.driver.configuration.get = lambda *args, **kwargs: {} # self.addCleanup(self.api_patcher.stop) + self.driver.datera_version = "3.3.3" def test_volume_create_success(self): - self.mock_api.return_value = stub_single_ai - self.assertIsNone(self.driver.create_volume(self.volume)) + testvol = _stub_volume() + self.assertIsNone(self.driver.create_volume(testvol)) def test_volume_create_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - self.assertRaises(datc.DateraAPIException, - self.driver.create_volume, self.volume) - - def test_volume_create_delay(self): - """Verify after 1st retry volume becoming available is a success.""" - - def _progress_api_return(mock_api): - if mock_api.retry_count == 1: - _bad_vol_ai = stub_single_ai.copy() - _bad_vol_ai['storage_instances']['storage-1'][ - 'volumes']['volume-1']['op_status'] = 'unavailable' - return _bad_vol_ai - else: - self.mock_api.retry_count += 1 - return stub_single_ai - self.mock_api.retry_count = 0 - self.mock_api.return_value = _progress_api_return(self.mock_api) - self.assertEqual(1, self.mock_api.retry_count) - self.assertIsNone(self.driver.create_volume(self.volume)) + testvol = _stub_volume() + self.driver.api.app_instances.create.side_effect = DateraAPIException + self.assertRaises(DateraAPIException, + self.driver.create_volume, + testvol) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_extra_specs(self, mock_get_type): - self.mock_api.return_value = stub_single_ai mock_get_type.return_value = { 'name': u'The Best', 'qos_specs_id': None, @@ -131,1089 +118,354 @@ class DateraVolumeTestCasev2(test.TestCase): self.assertTrue(mock_get_type.called) def test_create_cloned_volume_success(self): - source_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo' - ) - self.assertIsNone(self.driver.create_cloned_volume(self.volume, - source_volume)) + testvol = _stub_volume() + ref = _stub_volume(id=str(uuid.uuid4())) + self.assertIsNone(self.driver.create_cloned_volume(testvol, ref)) def test_create_cloned_volume_success_larger(self): - cloned_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo', - size=2 - ) - - mock_extend = mock.Mock() - if self._apiv == '2': - self.driver._extend_volume_2 = mock_extend - self.driver.create_cloned_volume(cloned_volume, self.volume) - mock_extend.assert_called_with( - cloned_volume, cloned_volume['size']) - else: - self.driver._extend_volume_2_1 = mock_extend - self.driver.create_cloned_volume(cloned_volume, self.volume) - mock_extend.assert_called_with( - cloned_volume, cloned_volume['size']) + newsize = 2 + testvol = _stub_volume(size=newsize) + ref = _stub_volume(id=str(uuid.uuid4())) + mock_extend = mock.MagicMock() + self.driver._extend_volume_2_2 = mock_extend + self.driver._extend_volume_2_1 = mock_extend + self.driver.create_cloned_volume(testvol, ref) + mock_extend.assert_called_once_with(testvol, newsize) def test_create_cloned_volume_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - source_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo' - ) - self.assertRaises(datc.DateraAPIException, - self.driver.create_cloned_volume, self.volume, - source_volume) + testvol = _stub_volume() + ref = _stub_volume(id=str(uuid.uuid4())) + self.driver.api.app_instances.create.side_effect = DateraAPIException + self.assertRaises(DateraAPIException, + self.driver.create_cloned_volume, + testvol, + ref) def test_delete_volume_success(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - {}] - else: - self.mock_api.side_effect = [ - {}, - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - {}] - self.assertIsNone(self.driver.delete_volume(self.volume)) + testvol = _stub_volume() + self.driver.api.app_instances.delete.return_value = {} + self.assertIsNone(self.driver.delete_volume(testvol)) def test_delete_volume_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = exception.NotFound - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - {}, - exception.NotFound, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - ] - self.assertIsNone(self.driver.delete_volume(self.volume)) + testvol = _stub_volume() + self.driver.api.app_instances.list.side_effect = exception.NotFound + self.assertIsNone(self.driver.delete_volume(testvol)) def test_delete_volume_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - self.assertRaises(datc.DateraAPIException, - self.driver.delete_volume, self.volume) + testvol = _stub_volume() + self.driver.api.app_instances.list.side_effect = DateraAPIException + self.assertRaises(DateraAPIException, + self.driver.delete_volume, testvol) def test_ensure_export_success(self): - self.mock_api.side_effect = self._generate_fake_api_request() + testvol = _stub_volume() ctxt = context.get_admin_context() - self.assertIsNone(self.driver.ensure_export(ctxt, - self.volume, - None)) + self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None)) def test_ensure_export_fails(self): - self.mock_api.side_effect = datc.DateraAPIException + # This can't fail because it's a no-op + testvol = _stub_volume() ctxt = context.get_admin_context() - self.assertRaises(datc.DateraAPIException, - self.driver.ensure_export, ctxt, self.volume, None) + self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None)) def test_create_export_target_does_not_exist_success(self): - self.mock_api.side_effect = self._generate_fake_api_request( - targets_exist=False) - ctxt = context.get_admin_context() - self.assertIsNone(self.driver.create_export(ctxt, - self.volume, - None)) + testvol = _stub_volume() + aimock = mock.MagicMock() + simock = mock.MagicMock() + simock.reload.return_value = simock + aimock.storage_instances.list.return_value = [simock] + simock.op_state = "available" + self.driver.cvol_to_ai = mock.Mock() + self.driver.cvol_to_ai.return_value = aimock + self.assertIsNone(self.driver.create_export(None, testvol, None)) def test_create_export_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - ctxt = context.get_admin_context() - self.assertRaises(datc.DateraAPIException, + testvol = _stub_volume() + aimock = mock.MagicMock() + simock = mock.MagicMock() + simock.reload.return_value = simock + aimock.storage_instances.list.side_effect = DateraAPIException + simock.op_state = "available" + self.driver.cvol_to_ai = mock.Mock() + self.driver.cvol_to_ai.return_value = aimock + self.assertRaises(DateraAPIException, self.driver.create_export, - ctxt, - self.volume, + None, + testvol, None) def test_initialize_connection_success(self): - self.mock_api.side_effect = self._generate_fake_api_request() - connector = {} - - expected = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'volume_id': self.volume['id'], - 'target_iqn': ('iqn.2013-05.com.daterainc:tc:01:sn:' - '3bbb080aab7d9abc'), - 'target_portal': '172.28.41.63:3260', - 'target_lun': 0, - 'discard': False}} - self.assertEqual(expected, - self.driver.initialize_connection(self.volume, - connector)) + testvol = _stub_volume() + aimock = mock.MagicMock() + simock = mock.MagicMock() + simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"} + simock.reload.return_value = simock + aimock.storage_instances.list.return_value = [simock] + self.driver.cvol_to_ai = mock.Mock() + self.driver.cvol_to_ai.return_value = aimock + self.assertEqual(self.driver.initialize_connection(testvol, {}), + {'data': {'discard': False, + 'target_discovered': False, + 'target_iqn': 'test-iqn', + 'target_lun': 0, + 'target_portal': 'test-ip:3260', + 'volume_id': testvol['id']}, + 'driver_volume_type': 'iscsi'}) def test_initialize_connection_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - connector = {} - self.assertRaises(datc.DateraAPIException, + testvol = _stub_volume() + aimock = mock.MagicMock() + simock = mock.MagicMock() + simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"} + simock.reload.return_value = simock + aimock.storage_instances.list.side_effect = DateraAPIException + self.driver.cvol_to_ai = mock.Mock() + self.driver.cvol_to_ai.return_value = aimock + self.assertRaises(DateraAPIException, self.driver.initialize_connection, - self.volume, - connector) + testvol, + {}) def test_detach_volume_success(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] - else: - self.mock_api.side_effect = [ - {}, - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + aimock = mock.MagicMock() + aimock.set.return_value = {} + self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertIsNone(self.driver.detach_volume(ctxt, volume)) + self.assertIsNone(self.driver.detach_volume(ctxt, testvol)) def test_detach_volume_fails(self): - self.mock_api.side_effect = datc.DateraAPIException + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + aimock = mock.MagicMock() + aimock.set.side_effect = DateraAPIException + self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertRaises(datc.DateraAPIException, - self.driver.detach_volume, ctxt, volume) + self.assertRaises(DateraAPIException, + self.driver.detach_volume, + ctxt, testvol) def test_detach_volume_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = exception.NotFound - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - exception.NotFound, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + aimock = mock.MagicMock() + aimock.set.side_effect = exception.NotFound + self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertIsNone(self.driver.detach_volume(ctxt, volume)) + self.assertIsNone(self.driver.detach_volume(ctxt, testvol)) def test_create_snapshot_success(self): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.mock_api.side_effect = self._generate_fake_api_request() - self.assertIsNone(self.driver.create_snapshot(snapshot)) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + volmock = mock.MagicMock() + snapmock = mock.MagicMock() + snapmock.reload.return_value = snapmock + snapmock.uuid = testsnap['id'] + snapmock.op_state = "available" + volmock.snapshots.create.return_value = snapmock + self.driver.cvol_to_dvol = mock.MagicMock() + self.driver.cvol_to_dvol.return_value = volmock + self.assertIsNone(self.driver.create_snapshot(testsnap)) def test_create_snapshot_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(datc.DateraAPIException, - self.driver.create_snapshot, snapshot) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + self.driver.api.app_instances.list.side_effect = DateraAPIException + self.assertRaises(DateraAPIException, + self.driver.create_snapshot, + testsnap) def test_delete_snapshot_success(self): - if self._apiv == '2': - self.mock_api.return_value = stub_return_snapshots - else: - self.mock_api.return_value = stub_return_snapshots_21 - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertIsNone(self.driver.delete_snapshot(snapshot)) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + self.assertIsNone(self.driver.delete_snapshot(testsnap)) def test_delete_snapshot_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - stub_return_snapshots, - exception.NotFound] - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - exception.NotFound] - snapshot = _stub_snapshot(self.volume['id'], volume_id="test") - self.assertIsNone(self.driver.delete_snapshot(snapshot)) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + self.driver.cvol_to_dvol = mock.MagicMock() + aimock = mock.MagicMock() + aimock.snapshots.list.side_effect = exception.NotFound + self.driver.cvol_to_dvol.return_value = aimock + self.assertIsNone(self.driver.delete_snapshot(testsnap)) def test_delete_snapshot_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(datc.DateraAPIException, - self.driver.delete_snapshot, snapshot) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + self.driver.cvol_to_dvol = mock.MagicMock() + aimock = mock.MagicMock() + aimock.snapshots.list.side_effect = DateraAPIException + self.driver.cvol_to_dvol.return_value = aimock + self.assertRaises(DateraAPIException, + self.driver.delete_snapshot, + testsnap) def test_create_volume_from_snapshot_success(self): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - if self._apiv == '2': - self.mock_api.side_effect = [ - stub_return_snapshots, - list(stub_return_snapshots.values())[0], - None] - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - {'data': stub_return_snapshots_21['data'][0]}, - None] - self.assertIsNone( - self.driver.create_volume_from_snapshot(self.volume, snapshot)) - - @mock.patch.object(datera.DateraDriver, 'extend_volume') - def test_create_volume_from_snapshot_success_larger(self, mock_extend): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - extend_volume = _stub_volume(size=2) - - mock_extend = mock.Mock() - if self._apiv == '2': - self.driver._extend_volume_2 = mock_extend - self.mock_api.side_effect = [ - stub_return_snapshots, - list(stub_return_snapshots.values())[0], - None] - self.driver.create_volume_from_snapshot(extend_volume, snapshot) - mock_extend.assert_called_once_with(extend_volume, - extend_volume['size']) - else: - self.driver._extend_volume_2_1 = mock_extend - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - {'data': stub_return_snapshots_21['data'][0]}, - None] - self.driver.create_volume_from_snapshot(extend_volume, snapshot) - mock_extend.assert_called_once_with(extend_volume, - extend_volume['size']) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + testvol = _stub_volume() + volmock = mock.MagicMock() + snapmock = mock.MagicMock() + snapmock.reload.return_value = snapmock + snapmock.uuid = testsnap['id'] + snapmock.op_state = "available" + self.driver.cvol_to_dvol = mock.MagicMock() + self.driver.cvol_to_dvol.return_value = volmock + volmock.snapshots.list.return_value = [snapmock] + self.assertIsNone(self.driver.create_volume_from_snapshot( + testvol, testsnap)) def test_create_volume_from_snapshot_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(datc.DateraAPIException, - self.driver.create_volume_from_snapshot, self.volume, - snapshot) + testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) + testvol = _stub_volume() + self.driver.cvol_to_dvol = mock.MagicMock() + aimock = mock.MagicMock() + aimock.snapshots.list.side_effect = DateraAPIException + self.driver.cvol_to_dvol.return_value = aimock + self.assertRaises(DateraAPIException, + self.driver.create_volume_from_snapshot, + testvol, + testsnap) def test_extend_volume_success(self): - volume = _stub_volume(size=1) - self.mock_api.side_effect = [ - stub_get_export, - {'data': stub_get_export}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - {}, {}, {}, {}, {}, {}, stub_get_export, - {'data': stub_get_export}] - self.assertIsNone(self.driver.extend_volume(volume, 2)) + newsize = 2 + testvol = _stub_volume() + mockvol = mock.MagicMock() + mockvol.size = newsize + self.driver.cvol_to_dvol = mock.MagicMock() + self.driver.cvol_to_dvol.return_value = mockvol + self.driver._offline_flip_2_2 = mock.MagicMock() + self.driver._offline_flip_2_1 = mock.MagicMock() + self.assertIsNone(self.driver.extend_volume(testvol, newsize)) def test_extend_volume_fails(self): - self.mock_api.side_effect = datc.DateraAPIException - volume = _stub_volume(size=1) - self.assertRaises(datc.DateraAPIException, - self.driver.extend_volume, volume, 2) - - def test_login_successful(self): - self.mock_api.return_value = { - 'key': 'dd2469de081346c28ac100e071709403' - } - self.assertIsNone(self.driver.login()) - self.assertEqual(1, self.mock_api.call_count) - - def test_login_unsuccessful(self): - self.mock_api.side_effect = exception.NotAuthorized - self.assertRaises(exception.NotAuthorized, self.driver.login) - self.assertEqual(1, self.mock_api.call_count) + newsize = 2 + testvol = _stub_volume() + mockvol = mock.MagicMock() + mockvol.size = newsize + mockvol.set.side_effect = DateraAPIException + self.driver.cvol_to_dvol = mock.MagicMock() + self.driver.cvol_to_dvol.return_value = mockvol + self.driver._offline_flip_2_2 = mock.MagicMock() + self.driver._offline_flip_2_1 = mock.MagicMock() + self.assertRaises(DateraAPIException, + self.driver.extend_volume, + testvol, + newsize) def test_manage_existing(self): - self.mock_api.return_value = {} - if self._apiv == '2': - test_name = {"source-name": "test-app:test-si:test-vol"} - self.assertIsNone( - self.driver.manage_existing( - _stub_volume(), - test_name)) - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - test_name["source-name"].split(":")[0]), - method='put', - body={'name': OS_PREFIX + _stub_volume()['id']}, - api_version=self._apiv) - else: - tenant = 'tenant' - test_name = {"source-name": "{}:test-app:test-si:test-vol".format( - tenant)} - self.assertIsNone( - self.driver.manage_existing( - _stub_volume(), - test_name)) - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - test_name["source-name"].split(":")[1]), - method='put', - body={'name': OS_PREFIX + _stub_volume()['id']}, - api_version=self._apiv, - tenant='tenant') + existing_ref = {'source-name': "A:B:C:D"} + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + self.assertIsNone(self.driver.manage_existing(testvol, existing_ref)) def test_manage_existing_wrong_ref(self): - TEST_NAME = {"source-name": "incorrect-reference"} - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing, - _stub_volume(), - TEST_NAME) + existing_ref = {'source-name': "ABCD"} + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, + testvol, + existing_ref) def test_manage_existing_get_size(self): - TEST_NAME = {"source-name": "test-app:storage-1:volume-1"} - self.mock_api.side_effect = self._generate_fake_api_request() - self.assertEqual( - self.driver.manage_existing_get_size( - _stub_volume(), - TEST_NAME), 500) - if self._apiv == '2': - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - TEST_NAME["source-name"].split(":")[0]), - api_version=self._apiv) - else: - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - TEST_NAME["source-name"].split(":")[0]), - api_version=self._apiv, - tenant=self._tenant) + existing_ref = {'source-name': "A:B:C:D"} + testvol = _stub_volume() + volmock = mock.MagicMock() + volmock.size = testvol['size'] + self.driver.cvol_to_dvol = mock.MagicMock() + self.driver.cvol_to_dvol.return_value = volmock + self.assertEqual(self.driver.manage_existing_get_size( + testvol, existing_ref), testvol['size']) def test_manage_existing_get_size_wrong_ref(self): - TEST_NAME = {"source-name": "incorrect-reference"} - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - _stub_volume(), - TEST_NAME) + existing_ref = {'source-name': "ABCD"} + testvol = _stub_volume() + self.driver.cvol_to_ai = mock.MagicMock() + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing_get_size, + testvol, + existing_ref) def test_get_manageable_volumes(self): - if self._apiv == '2': - self.mock_api.return_value = non_cinder_ais - six.assertCountEqual( - self, + testvol = _stub_volume() + v1 = {'reference': {'source-name': 'some-ai:storage-1:volume-1'}, + 'size': 1, + 'safe_to_manage': True, + 'reason_not_safe': '', + 'cinder_id': None, + 'extra_info': {'snapshots': '[]'}} + v2 = {'reference': {'source-name': 'some-other-ai:storage-1:volume-1'}, + 'size': 2, + 'safe_to_manage': True, + 'reason_not_safe': '', + 'cinder_id': None, + 'extra_info': {'snapshots': '[]'}} + + mock1 = mock.MagicMock() + mock1.__getitem__.side_effect = ['some-ai'] + mock1.name = 'some-ai' + mocksi1 = mock.MagicMock() + mocksi1.name = "storage-1" + mocksi1.__getitem__.side_effect = [[mock.MagicMock()]] + mock1.storage_instances.list.return_value = [mocksi1] + mockvol1 = mock.MagicMock() + mockvol1.name = "volume-1" + mockvol1.size = v1['size'] + mocksi1.volumes.list.return_value = [mockvol1] + + mock2 = mock.MagicMock() + mock2.__getitem__.side_effect = ['some-other-ai'] + mock2.name = 'some-other-ai' + mocksi2 = mock.MagicMock() + mocksi2.name = "storage-1" + mocksi2.__getitem__.side_effect = [[mock.MagicMock()]] + mock2.storage_instances.list.return_value = [mocksi2] + mockvol2 = mock.MagicMock() + mockvol2.name = "volume-1" + mockvol2.size = v2['size'] + mocksi2.volumes.list.return_value = [mockvol2] + + listmock = mock.MagicMock() + listmock.return_value = [mock1, mock2] + self.driver.api.app_instances.list = listmock + + marker = mock.MagicMock() + limit = mock.MagicMock() + offset = mock.MagicMock() + sort_keys = mock.MagicMock() + sort_dirs = mock.MagicMock() + if (version.version_string() >= '15.0.0'): + with mock.patch( + 'cinder.volume.volume_utils.paginate_entries_list') \ + as mpage: self.driver.get_manageable_volumes( - {}, "", 10, 0, "", ""), - [{'cinder_id': None, - 'extra_info': None, - 'reason_not_safe': None, - 'reference': { - "source-name": 'test-app-inst:storage-1:volume-1'}, - 'safe_to_manage': True, - 'size': 50}, - {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', - 'extra_info': None, - 'reason_not_safe': None, - 'reference': None, - 'safe_to_manage': False, - 'size': None}]) + [testvol], marker, limit, offset, sort_keys, sort_dirs) + mpage.assert_called_once_with( + [v1, v2], marker, limit, offset, sort_keys, sort_dirs) else: - self.mock_api.return_value = non_cinder_ais_21 - self.assertEqual( + with mock.patch( + 'cinder.volume.utils.paginate_entries_list') as mpage: self.driver.get_manageable_volumes( - {}, "", 10, 0, "", ""), - [{'cinder_id': None, - 'extra_info': None, - 'reason_not_safe': '', - 'reference': { - "source-name": 'test-app-inst:storage-1:volume-1'}, - 'safe_to_manage': True, - 'size': 50}, - {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', - 'extra_info': None, - 'reason_not_safe': '', - 'reference': None, - 'safe_to_manage': False, - 'size': None}]) + [testvol], marker, limit, offset, sort_keys, sort_dirs) + mpage.assert_called_once_with( + [v1, v2], marker, limit, offset, sort_keys, sort_dirs) def test_unmanage(self): - self.mock_api.return_value = {} - self.assertIsNone(self.driver.unmanage(_stub_volume())) - if self._apiv == '2': - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - OS_PREFIX + _stub_volume()['id']), - method='put', - body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}, - api_version=self._apiv) - else: - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - OS_PREFIX + _stub_volume()['id']), - method='put', - body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}, - api_version=self._apiv, - tenant=self._tenant) - - def _generate_fake_api_request(self, targets_exist=True): - def _fake_api_request(resource_type, *args, **kwargs): - if 'api_version' not in kwargs: - raise ValueError("Fix me dummy") - result = None - if resource_type.split('/')[-1] == 'storage-1': - result = stub_get_export - elif (resource_type.split('/')[-1] == - 'c20aba21-6ef6-446b-b374-45733b4883ba'): - result = stub_app_instance[ - 'c20aba21-6ef6-446b-b374-45733b4883ba'] - elif resource_type == 'acl_policy': - result = stub_acl if self._apiv == '2' else stub_acl_21 - elif resource_type == 'ig_group': - result = stub_ig if self._apiv == '2' else stub_ig_21 - elif resource_type.split('/')[-1] == 'snapshots': - result = {'timestamp': 'test_ts'} - elif resource_type.split('/')[-1] == 'test_ts': - result = {'op_state': 'available'} - elif resource_type == 'tenant': - return {} - else: - if kwargs.get('api_version') == '2': - result = list(stub_app_instance.values())[0] - else: - result = stub_app_instance_21['data'] - - if kwargs.get('api_version') == '2': - return result - else: - return {'data': result} - return _fake_api_request + testvol = _stub_volume() + self.assertIsNone(self.driver.unmanage(testvol)) -class DateraVolumeTestCasev21(DateraVolumeTestCasev2): +class DateraVolumeTestCasev21(DateraVolumeTestCasev22): def setUp(self): super(DateraVolumeTestCasev21, self).setUp() - - m = mock.Mock() - m.json.return_value = {'api_versions': ['v2.1']} - self.driver._request.return_value = m - self.driver._store_metadata = mock.Mock() - self._apiv = '2.1' - self._tenant = self.cfg.datera_tenant_id - - -stub_acl = { - 'initiator_groups': [ - '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'], - 'initiators': [], - 'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/' - 'storage_instances/storage-1/acl_policy')} - -stub_acl_21 = { - 'initiator_groups': [ - {'path': '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'}], - 'initiators': [], - 'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/' - 'storage_instances/storage-1/acl_policy')} - -stub_ig = { - 'members': ['/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'], - 'name': 'IG-21e08155-8b95-4108-b148-089f64623963', - 'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'} - -stub_ig_21 = { - 'members': [ - {'path': '/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'}], - 'name': 'IG-21e08155-8b95-4108-b148-089f64623963', - 'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'} - -stub_create_export = { - "_ipColl": ["172.28.121.10", "172.28.120.10"], - "acls": {}, - "activeServers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, - "ctype": "TC_BLOCK_ISCSI", - "endpointsExt1": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ipHigh": 0, - "ipLow": "192421036", - "ipStr": "172.28.120.11", - "ipV": 4, - "name": "", - "network": 24 - } - }, - "endpointsExt2": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ipHigh": 0, - "ipLow": "192486572", - "ipStr": "172.28.121.11", - "ipV": 4, - "name": "", - "network": 24 - } - }, - "inodes": {"c20aba21-6ef6-446b-b374-45733b4883ba": "1"}, - "name": "", - "networkPort": 0, - "serverAllocation": "TS_ALLOC_COMPLETED", - "servers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, - "targetAllocation": "TS_ALLOC_COMPLETED", - "targetIds": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ids": [{ - "dev": None, - "id": "iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe" - }] - } - }, - "typeName": "TargetIscsiConfig", - "uuid": "7071efd7-9f22-4996-8f68-47e9ab19d0fd" -} - - -stub_app_instance = { - "c20aba21-6ef6-446b-b374-45733b4883ba": { - "admin_state": "online", - "clone_src": {}, - "create_mode": "openstack", - "descr": "", - "health": "ok", - "name": "c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba", - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.63" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "3bbb080aab7d9abc", - "path": "/app_instances/c20aba21-6ef6-446b-b374" - "-45733b4883ba/storage_instances/storage-1/access" - }, - "access_control": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1" - "/access_control" - }, - "access_control_mode": "allow_all", - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/1c4feac4-17c7-478b-8928-c76e8ec80b72" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "", - "initiator_user_name": "", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1/auth", - "target_pswd": "", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "descr": "c20aba21-6ef6-446b-b374-45733b4883ba__ST__storage-1", - "op_state": "available", - "name": "storage-1", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1", - "uuid": "b9897b84-149f-43c7-b19c-27d6af8fa815", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1" - "/volumes/volume-1", - "replica_count": 3, - "size": 500, - "snapshot_policies": {}, - "snapshots": { - "1445384931.322468627": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b" - "-b374-45733b4883ba/storage_instances" - "/storage-1/volumes/volume-1/snapshots" - "/1445384931.322468627", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } -} -stub_app_instance_21 = { - "tenant": "/root", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "version": "v2.1", - "data": { - "tenant": "/root", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "name": "OS-9b0216bc-8aab-47f2-b746-843f497cb7a6", - "id": "1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "health": "ok", - "app_template": { - "path": "", - "resolved_path": "", - "resolved_tenant": "" - }, - "descr": "", - "admin_state": "online", - "storage_instances": [ - { - "health": "ok", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb/" - "storage_instances/storage-1", - "name": "storage-1", - "admin_state": "online", - "op_state": "available", - "volumes": [ - { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/volumes/volume-1", - "name": "volume-1", - "replica_count": 1, - "uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6", - "size": 500, - "capacity_in_use": 0, - "snapshot_policies": [], - "snapshots": [], - "placement_mode": "hybrid", - "op_state": "available", - "active_storage_nodes": [ - { - "path": "/storage_nodes/75f2cae4-68fb-4236-" - "a90c-b6c480b68816" - } - ], - "health": "ok" - } - ], - "access_control_mode": "deny_all", - "acl_policy": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/acl_policy", - "initiators": [], - "initiator_groups": [] - }, - "ip_pool": { - "path": "/access_network_ip_pools/default", - "resolved_path": "/access_network_ip_pools/default", - "resolved_tenant": "/root" - }, - "access": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/access", - "ips": [ - "172.28.41.63", - "172.29.41.29" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "3bbb080aab7d9abc" - }, - "auth": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/auth", - "type": "none", - "initiator_user_name": "", - "initiator_pswd": "(hidden)", - "target_user_name": "", - "target_pswd": "(hidden)" - }, - "active_initiators": [], - "active_storage_nodes": [ - { - "path": "/storage_nodes/75f2cae4-68fb-4236-a90c-" - "b6c480b68816" - } - ], - "uuid": "eb3d7b07-b520-4cc2-b365-90135b84c356" - } - ], - "create_mode": "openstack", - "uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6", - "snapshots": [], - "snapshot_policies": [] - } -} - -stub_get_export = stub_app_instance[ - 'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1'] - -stub_single_ai = stub_app_instance['c20aba21-6ef6-446b-b374-45733b4883ba'] - -stub_return_snapshots = \ - { - "1446076293.118600738": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/" - "1446076293.118600738", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - }, - "1446076384.00607846": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/" - "1446076384.00607846", - "uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1" - } - } - -stub_return_snapshots_21 = { - 'data': [ - { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/", - "timestamp": "1446076293.118600738", - "utc_ts": "1446076293.118600738", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - }, - { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/", - "timestamp": "1446076384.00607846", - "utc_ts": "1446076384.00607846", - "uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1" - }] -} - -non_cinder_ais = { - "75bc1c69-a399-4acb-aade-3514caf13c5e": { - "admin_state": "online", - "create_mode": "normal", - "descr": "", - "health": "ok", - "id": "75bc1c69-a399-4acb-aade-3514caf13c5e", - "name": "test-app-inst", - "path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.93" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "29036682e2d37b98", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1", - "uuid": "6421237d-e4fc-433a-b535-148d5b6d8586", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 50, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9" - } - } - } - }, - "uuid": "00000000-0000-0000-0000-000000000000" - }, - "dfdaf8d1-8976-4c13-a829-3345e03cf810": { - "admin_state": "offline", - "create_mode": "openstack", - "descr": "", - "health": "ok", - "id": "dfdaf8d1-8976-4c13-a829-3345e03cf810", - "name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.57" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "56cd59e754ad02b6", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "offline", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "unavailable", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810" - "/storage_instances/storage-1", - "uuid": "5620a673-9985-464e-9616-e325a50eac60", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 5, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } -} - -non_cinder_ais_21 = { - 'data': [{ - "admin_state": "online", - "create_mode": "normal", - "descr": "", - "health": "ok", - "id": "75bc1c69-a399-4acb-aade-3514caf13c5e", - "name": "test-app-inst", - "path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.93" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "29036682e2d37b98", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1", - "uuid": "6421237d-e4fc-433a-b535-148d5b6d8586", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 50, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9" - } - } - } - }, - "uuid": "00000000-0000-0000-0000-000000000000" - }, - { - "admin_state": "offline", - "create_mode": "openstack", - "descr": "", - "health": "ok", - "id": "dfdaf8d1-8976-4c13-a829-3345e03cf810", - "name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.57" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "56cd59e754ad02b6", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "offline", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "unavailable", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810" - "/storage_instances/storage-1", - "uuid": "5620a673-9985-464e-9616-e325a50eac60", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 5, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - }] -} - - -def _stub_datera_volume(*args, **kwargs): - return { - "status": "available", - "name": "test", - "num_replicas": "2", - "parent": "00000000-0000-0000-0000-000000000000", - "size": "1024", - "sub_type": "IS_ORIGINAL", - "uuid": "10305aa4-1343-4363-86fe-f49eb421a48c", - "snapshots": [], - "snapshot_configs": [], - "targets": [ - kwargs.get('targets', "744e1bd8-d741-4919-86cd-806037d98c8a"), - ] - } + self.driver.api = mock.MagicMock() + self.driver.apiv = '2.1' def _stub_volume(*args, **kwargs): - uuid = u'c20aba21-6ef6-446b-b374-45733b4883ba' - name = u'volume-00000001' + uuid = 'c20aba21-6ef6-446b-b374-45733b4883ba' + name = 'volume-00000001' size = 1 volume = {} volume['id'] = kwargs.get('id', uuid) + volume['project_id'] = "test-project" volume['display_name'] = kwargs.get('display_name', name) volume['size'] = kwargs.get('size', size) volume['provider_location'] = kwargs.get('provider_location', None) @@ -1222,16 +474,13 @@ def _stub_volume(*args, **kwargs): def _stub_snapshot(*args, **kwargs): - uuid = u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c' - name = u'snapshot-00000001' - size = 1 - volume = {} - volume['id'] = kwargs.get('id', uuid) - volume['display_name'] = kwargs.get('display_name', name) - volume['volume_size'] = kwargs.get('size', size) - volume['volume_id'] = kwargs.get('volume_id', None) - return volume - - -def _config_getter(*args, **kwargs): - return {} + uuid = '0bb34f0c-fea4-48e0-bf96-591120ac7e3c' + name = 'snapshot-00000001' + volume_size = 1 + snap = {} + snap['id'] = kwargs.get('id', uuid) + snap['project_id'] = "test-project" + snap['display_name'] = kwargs.get('display_name', name) + snap['volume_id'] = kwargs.get('volume_id', None) + snap['volume_size'] = kwargs.get('volume_size', volume_size) + return snap diff --git a/cinder/volume/drivers/datera/datera_api2.py b/cinder/volume/drivers/datera/datera_api2.py deleted file mode 100644 index d7d9c243469..00000000000 --- a/cinder/volume/drivers/datera/datera_api2.py +++ /dev/null @@ -1,763 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ipaddress -import re -import uuid - -import eventlet -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -import cinder.volume.drivers.datera.datera_common as datc -from cinder.volume import volume_utils - -LOG = logging.getLogger(__name__) - - -class DateraApi(object): - - # ================= - # = Create Volume = - # ================= - - def _create_volume_2(self, volume): - # Generate App Instance, Storage Instance and Volume - # Volume ID will be used as the App Instance Name - # Storage Instance and Volumes will have standard names - policies = self._get_policies_for_resource(volume) - num_replicas = int(policies['replica_count']) - storage_name = policies['default_storage_name'] - volume_name = policies['default_volume_name'] - template = policies['template'] - - if template: - app_params = ( - { - 'create_mode': "openstack", - # 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'app_template': '/app_templates/{}'.format(template) - }) - else: - - app_params = ( - { - 'create_mode': "openstack", - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'access_control_mode': 'deny_all', - 'storage_instances': { - storage_name: { - 'name': storage_name, - 'volumes': { - volume_name: { - 'name': volume_name, - 'size': volume['size'], - 'replica_count': num_replicas, - 'snapshot_policies': { - } - } - } - } - } - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - 'post', - body=app_params, - api_version='2') - self._update_qos(volume, policies) - - # ================= - # = Extend Volume = - # ================= - - def _extend_volume_2(self, volume, new_size): - # Current product limitation: - # If app_instance is bound to template resizing is not possible - # Once policies are implemented in the product this can go away - policies = self._get_policies_for_resource(volume) - template = policies['template'] - if template: - LOG.warning("Volume size not extended due to template binding:" - " volume: %(volume)s, template: %(template)s", - volume=volume, template=template) - return - - # Offline App Instance, if necessary - reonline = False - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - api_version='2') - if app_inst['admin_state'] == 'online': - reonline = True - self._detach_volume_2(None, volume) - # Change Volume Size - app_inst = datc._get_name(volume['id']) - data = { - 'size': new_size - } - store_name, vol_name = self._scrape_template(policies) - self._issue_api_request( - datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(app_inst), - method='put', - body=data, - api_version='2') - # Online Volume, if it was online before - if reonline: - self._create_export_2(None, volume, None) - - # ================= - # = Cloned Volume = - # ================= - - def _create_cloned_volume_2(self, volume, src_vref): - policies = self._get_policies_for_resource(volume) - - store_name, vol_name = self._scrape_template(policies) - - src = "/" + datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(datc._get_name(src_vref['id'])) - data = { - 'create_mode': 'openstack', - 'name': datc._get_name(volume['id']), - 'uuid': str(volume['id']), - 'clone_src': src, - } - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2') - - if volume['size'] > src_vref['size']: - self._extend_volume_2(volume, volume['size']) - - # ================= - # = Delete Volume = - # ================= - - def _delete_volume_2(self, volume): - self.detach_volume(None, volume) - app_inst = datc._get_name(volume['id']) - try: - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst), - method='delete', - api_version='2') - except exception.NotFound: - LOG.info("Tried to delete volume %s, but it was not found in the " - "Datera cluster. Continuing with delete.", - datc._get_name(volume['id'])) - - # ================= - # = Ensure Export = - # ================= - - def _ensure_export_2(self, context, volume, connector): - return self._create_export_2(context, volume, connector) - - # ========================= - # = Initialize Connection = - # ========================= - - def _initialize_connection_2(self, volume, connector): - # Now online the app_instance (which will online all storage_instances) - multipath = connector.get('multipath', False) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - app_inst = self._issue_api_request( - url, method='put', body=data, api_version='2') - storage_instances = app_inst["storage_instances"] - si_names = list(storage_instances.keys()) - - portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260' - iqn = storage_instances[si_names[0]]['access']['iqn'] - if multipath: - portals = [p + ':3260' for p in - storage_instances[si_names[0]]['access']['ips']] - iqns = [iqn for _ in - storage_instances[si_names[0]]['access']['ips']] - lunids = [self._get_lunid() for _ in - storage_instances[si_names[0]]['access']['ips']] - - return { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_iqns': iqns, - 'target_portal': portal, - 'target_portals': portals, - 'target_lun': self._get_lunid(), - 'target_luns': lunids, - 'volume_id': volume['id'], - 'discard': False}} - else: - return { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_portal': portal, - 'target_lun': self._get_lunid(), - 'volume_id': volume['id'], - 'discard': False}} - - # ================= - # = Create Export = - # ================= - - def _create_export_2(self, context, volume, connector): - # Online volume in case it hasn't been already - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - self._issue_api_request(url, method='put', body=data, api_version='2') - # Check if we've already setup everything for this volume - url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id']))) - storage_instances = self._issue_api_request(url, api_version='2') - # Handle adding initiator to product if necessary - # Then add initiator to ACL - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - if (connector and - connector.get('initiator') and - not policies['acl_allow_all']): - initiator_name = "OpenStack_{}_{}".format( - self.driver_prefix, str(uuid.uuid4())[:4]) - initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id'] - found = False - initiator = connector['initiator'] - current_initiators = self._issue_api_request( - 'initiators', api_version='2') - for iqn, values in current_initiators.items(): - if initiator == iqn: - found = True - break - # If we didn't find a matching initiator, create one - if not found: - data = {'id': initiator, 'name': initiator_name} - # Try and create the initiator - # If we get a conflict, ignore it because race conditions - self._issue_api_request("initiators", - method="post", - body=data, - conflict_ok=True, - api_version='2') - # Create initiator group with initiator in it - initiator_path = "/initiators/{}".format(initiator) - initiator_group_path = "/initiator_groups/{}".format( - initiator_group) - ig_data = {'name': initiator_group, 'members': [initiator_path]} - self._issue_api_request("initiator_groups", - method="post", - body=ig_data, - conflict_ok=True, - api_version='2') - # Create ACL with initiator group as reference for each - # storage_instance in app_instance - # TODO(_alastor_): We need to avoid changing the ACLs if the - # template already specifies an ACL policy. - for si_name in storage_instances.keys(): - acl_url = (datc.URL_TEMPLATES['si']() + - "/{}/acl_policy").format( - datc._get_name(volume['id']), si_name) - existing_acl = self._issue_api_request(acl_url, - method="get", - api_version='2') - data = {} - data['initiators'] = existing_acl['initiators'] - data['initiator_groups'] = existing_acl['initiator_groups'] - data['initiator_groups'].append(initiator_group_path) - self._issue_api_request(acl_url, - method="put", - body=data, - api_version='2') - - if connector and connector.get('ip'): - try: - # Case where volume_type has non default IP Pool info - if policies['ip_pool'] != 'default': - initiator_ip_pool_path = self._issue_api_request( - "access_network_ip_pools/{}".format( - policies['ip_pool']), api_version='2')['path'] - # Fallback to trying reasonable IP based guess - else: - initiator_ip_pool_path = self._get_ip_pool_for_string_ip( - connector['ip']) - - ip_pool_url = datc.URL_TEMPLATES['si_inst']( - store_name).format(datc._get_name(volume['id'])) - ip_pool_data = {'ip_pool': initiator_ip_pool_path} - self._issue_api_request(ip_pool_url, - method="put", - body=ip_pool_data, - api_version='2') - except exception.DateraAPIException: - # Datera product 1.0 support - pass - - # Check to ensure we're ready for go-time - self._si_poll(volume, policies) - - # ================= - # = Detach Volume = - # ================= - - def _detach_volume_2(self, context, volume, attachment=None): - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'offline', - 'force': True - } - try: - self._issue_api_request(url, method='put', body=data, - api_version='2') - except exception.NotFound: - msg = ("Tried to detach volume %s, but it was not found in the " - "Datera cluster. Continuing with detach.") - LOG.info(msg, volume['id']) - # TODO(_alastor_): Make acl cleaning multi-attach aware - self._clean_acl_2(volume) - - def _check_for_acl_2(self, initiator_path): - """Returns True if an acl is found for initiator_path """ - # TODO(_alastor_) when we get a /initiators/:initiator/acl_policies - # endpoint use that instead of this monstrosity - initiator_groups = self._issue_api_request("initiator_groups", - api_version='2') - for ig, igdata in initiator_groups.items(): - if initiator_path in igdata['members']: - LOG.debug("Found initiator_group: %s for initiator: %s", - ig, initiator_path) - return True - LOG.debug("No initiator_group found for initiator: %s", initiator_path) - return False - - def _clean_acl_2(self, volume): - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - acl_url = (datc.URL_TEMPLATES["si_inst"]( - store_name) + "/acl_policy").format(datc._get_name(volume['id'])) - try: - initiator_group = self._issue_api_request( - acl_url, api_version='2')['initiator_groups'][0] - initiator_iqn_path = self._issue_api_request( - initiator_group.lstrip("/"))["members"][0] - # Clear out ACL and delete initiator group - self._issue_api_request(acl_url, - method="put", - body={'initiator_groups': []}, - api_version='2') - self._issue_api_request(initiator_group.lstrip("/"), - method="delete", - api_version='2') - if not self._check_for_acl_2(initiator_iqn_path): - self._issue_api_request(initiator_iqn_path.lstrip("/"), - method="delete", - api_version='2') - except (IndexError, exception.NotFound): - LOG.debug("Did not find any initiator groups for volume: %s", - volume) - - # =================== - # = Create Snapshot = - # =================== - - def _create_snapshot_2(self, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - url_template = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - url = url_template.format(datc._get_name(snapshot['volume_id'])) - - snap_params = { - 'uuid': snapshot['id'], - } - snap = self._issue_api_request(url, method='post', body=snap_params, - api_version='2') - snapu = "/".join((url, snap['timestamp'])) - self._snap_poll(snapu) - - # =================== - # = Delete Snapshot = - # =================== - - def _delete_snapshot_2(self, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request(snapu, method='get', - api_version='2') - - try: - for ts, snap in snapshots.items(): - if snap['uuid'] == snapshot['id']: - url_template = snapu + '/{}' - url = url_template.format(ts) - self._issue_api_request(url, method='delete', - api_version='2') - break - else: - raise exception.NotFound - except exception.NotFound: - msg = ("Tried to delete snapshot %s, but was not found in " - "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(snapshot['id'])) - - # ======================== - # = Volume From Snapshot = - # ======================== - - def _create_volume_from_snapshot_2(self, volume, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request(snapu, method='get', - api_version='2') - for ts, snap in snapshots.items(): - if snap['uuid'] == snapshot['id']: - found_ts = ts - break - else: - raise exception.NotFound - - snap_url = (snap_temp + '/{}').format( - datc._get_name(snapshot['volume_id']), found_ts) - - self._snap_poll(snap_url) - - src = "/" + snap_url - app_params = ( - { - 'create_mode': 'openstack', - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'clone_src': src, - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - method='post', - body=app_params, - api_version='2') - - if (volume['size'] > snapshot['volume_size']): - self._extend_volume_2(volume, volume['size']) - - # ========== - # = Manage = - # ========== - - def _manage_existing_2(self, volume, existing_ref): - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name = existing_ref.split(":")[0] - LOG.debug("Managing existing Datera volume %s. " - "Changing name to %s", - datc._get_name(volume['id']), - existing_ref) - data = {'name': datc._get_name(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst_name), method='put', body=data, api_version='2') - - # =================== - # = Manage Get Size = - # =================== - - def _manage_existing_get_size_2(self, volume, existing_ref): - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name, si_name, vol_name = existing_ref.split(":") - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst_name), - api_version='2') - return self._get_size_2(volume, app_inst, si_name, vol_name) - - def _get_size_2(self, volume, app_inst=None, si_name=None, vol_name=None): - """Helper method for getting the size of a backend object - - If app_inst is provided, we'll just parse the dict to get - the size instead of making a separate http request - """ - policies = self._get_policies_for_resource(volume) - si_name = si_name if si_name else policies['default_storage_name'] - vol_name = vol_name if vol_name else policies['default_volume_name'] - if not app_inst: - vol_url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - app_inst = self._issue_api_request(vol_url) - size = app_inst[ - 'storage_instances'][si_name]['volumes'][vol_name]['size'] - return size - - # ========================= - # = Get Manageable Volume = - # ========================= - - def _get_manageable_volumes_2(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - LOG.debug("Listing manageable Datera volumes") - app_instances = self._issue_api_request( - datc.URL_TEMPLATES['ai'](), api_version='2').values() - - results = [] - - cinder_volume_ids = [vol['id'] for vol in cinder_volumes] - - for ai in app_instances: - ai_name = ai['name'] - reference = None - size = None - safe_to_manage = False - reason_not_safe = None - cinder_id = None - extra_info = None - if re.match(datc.UUID4_RE, ai_name): - cinder_id = ai_name.lstrip(datc.OS_PREFIX) - if (not cinder_id and - ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids): - safe_to_manage = self._is_manageable(ai) - if safe_to_manage: - si = list(ai['storage_instances'].values())[0] - si_name = si['name'] - vol = list(si['volumes'].values())[0] - vol_name = vol['name'] - size = vol['size'] - reference = {"source-name": "{}:{}:{}".format( - ai_name, si_name, vol_name)} - - results.append({ - 'reference': reference, - 'size': size, - 'safe_to_manage': safe_to_manage, - 'reason_not_safe': reason_not_safe, - 'cinder_id': cinder_id, - 'extra_info': extra_info}) - - page_results = volume_utils.paginate_entries_list( - results, marker, limit, offset, sort_keys, sort_dirs) - - return page_results - - # ============ - # = Unmanage = - # ============ - - def _unmanage_2(self, volume): - LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", - volume['id'], datc._get_unmanaged(volume['id'])) - data = {'name': datc._get_unmanaged(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - method='put', - body=data, - api_version='2') - - # ================ - # = Volume Stats = - # ================ - - def _get_volume_stats_2(self, refresh=False): - if refresh or not self.cluster_stats: - try: - LOG.debug("Updating cluster stats info.") - - results = self._issue_api_request('system', api_version='2') - - if 'uuid' not in results: - LOG.error( - 'Failed to get updated stats from Datera Cluster.') - - backend_name = self.configuration.safe_get( - 'volume_backend_name') - stats = { - 'volume_backend_name': backend_name or 'Datera', - 'vendor_name': 'Datera', - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': ( - int(results['total_capacity']) / units.Gi), - 'free_capacity_gb': ( - int(results['available_capacity']) / units.Gi), - 'reserved_percentage': 0, - } - - self.cluster_stats = stats - except exception.DateraAPIException: - LOG.error('Failed to get updated stats from Datera cluster.') - return self.cluster_stats - - def _is_manageable(self, app_inst): - if len(app_inst['storage_instances']) == 1: - si = list(app_inst['storage_instances'].values())[0] - if len(si['volumes']) == 1: - return True - return False - - # ========= - # = Login = - # ========= - - def _login_2(self): - """Use the san_login and san_password to set token.""" - body = { - 'name': self.username, - 'password': self.password - } - - # Unset token now, otherwise potential expired token will be sent - # along to be used for authorization when trying to login. - self.datera_api_token = None - - try: - LOG.debug('Getting Datera auth token.') - results = self._issue_api_request('login', 'put', body=body, - sensitive=True, api_version='2') - self.datera_api_token = results['key'] - except exception.NotAuthorized: - with excutils.save_and_reraise_exception(): - LOG.error('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.') - - # =========== - # = Polling = - # =========== - - def _snap_poll(self, url): - eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) - TIMEOUT = 10 - retry = 0 - poll = True - while poll and retry < TIMEOUT: - retry += 1 - snap = self._issue_api_request(url, api_version='2') - if snap['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Snapshot not ready.')) - - def _si_poll(self, volume, policies): - # Initial 4 second sleep required for some Datera versions - eventlet.sleep(datc.DEFAULT_SI_SLEEP_API_2) - TIMEOUT = 10 - retry = 0 - check_url = datc.URL_TEMPLATES['si_inst']( - policies['default_storage_name']).format( - datc._get_name(volume['id'])) - poll = True - while poll and retry < TIMEOUT: - retry += 1 - si = self._issue_api_request(check_url, api_version='2') - if si['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Resource not ready.')) - - # ============ - # = IP Pools = - # ============ - - def _get_ip_pool_for_string_ip(self, ip): - """Takes a string ipaddress and return the ip_pool API object dict """ - pool = 'default' - ip_obj = ipaddress.ip_address(six.text_type(ip)) - ip_pools = self._issue_api_request('access_network_ip_pools', - api_version='2') - for ip_pool, ipdata in ip_pools.items(): - for access, adata in ipdata['network_paths'].items(): - if not adata.get('start_ip'): - continue - pool_if = ipaddress.ip_interface( - "/".join((adata['start_ip'], str(adata['netmask'])))) - if ip_obj in pool_if.network: - pool = ip_pool - return self._issue_api_request( - "access_network_ip_pools/{}".format(pool), api_version='2')['path'] - - # ============= - # = Templates = - # ============= - - def _scrape_template(self, policies): - sname = policies['default_storage_name'] - vname = policies['default_volume_name'] - - template = policies['template'] - if template: - result = self._issue_api_request( - datc.URL_TEMPLATES['at']().format(template), api_version='2') - sname, st = list(result['storage_templates'].items())[0] - vname = list(st['volume_templates'].keys())[0] - return sname, vname - - # ======= - # = QoS = - # ======= - - def _update_qos(self, resource, policies): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - fpolicies = {k: int(v) for k, v in - policies.items() if k.endswith("max")} - # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) - if fpolicies: - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2') diff --git a/cinder/volume/drivers/datera/datera_api21.py b/cinder/volume/drivers/datera/datera_api21.py index eaba6fe34bf..75a7a6c6fa4 100644 --- a/cinder/volume/drivers/datera/datera_api21.py +++ b/cinder/volume/drivers/datera/datera_api21.py @@ -1,4 +1,4 @@ -# Copyright 2017 Datera +# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,25 +13,39 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib import ipaddress +import math import random -import re +import time import uuid import eventlet +from os_brick import exception as brick_exception from oslo_log import log as logging -from oslo_utils import excutils +from oslo_serialization import jsonutils as json +from oslo_utils import importutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _ +from cinder.image import image_utils +from cinder import utils import cinder.volume.drivers.datera.datera_common as datc -from cinder.volume import volume_utils +from cinder.volume import volume_types +from cinder.volume import volume_utils as volutils LOG = logging.getLogger(__name__) +dexceptions = importutils.try_import('dfs_sdk.exceptions') +API_VERSION = "2.1" + + +# The DateraAPI classes (2.1, 2.2) are enhanced by datera_common's lookup() +# decorator which generates members run-time. Therefore on the class we disable +# pylint's no-member check pylint: disable=no-member class DateraApi(object): # ================= @@ -39,34 +53,38 @@ class DateraApi(object): # ================= def _create_volume_2_1(self, volume): - tenant = self._create_tenant(volume) policies = self._get_policies_for_resource(volume) num_replicas = int(policies['replica_count']) - storage_name = policies['default_storage_name'] - volume_name = policies['default_volume_name'] + storage_name = 'storage-1' + volume_name = 'volume-1' template = policies['template'] placement = policies['placement_mode'] + ip_pool = policies['ip_pool'] + + name = datc.get_name(volume) if template: app_params = ( { - 'create_mode': "openstack", + 'create_mode': 'openstack', # 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'app_template': '/app_templates/{}'.format(template) + 'name': name, + 'app_template': {'path': '/app_templates/{}'.format( + template)} }) - else: app_params = ( { - 'create_mode': "openstack", + 'create_mode': 'openstack', 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), + 'name': name, 'access_control_mode': 'deny_all', 'storage_instances': [ { 'name': storage_name, + 'ip_pool': {'path': ('/access_network_ip_pools/' + '{}'.format(ip_pool))}, 'volumes': [ { 'name': volume_name, @@ -80,103 +98,92 @@ class DateraApi(object): } ] }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - 'post', - body=app_params, - api_version='2.1', - tenant=tenant) - self._update_qos_2_1(volume, policies, tenant) + + tenant = self.create_tenant(volume['project_id']) + self.api.app_instances.create(tenant=tenant, **app_params) + self._update_qos_2_1(volume, policies) + self._add_vol_meta_2_1(volume) # ================= # = Extend Volume = # ================= def _extend_volume_2_1(self, volume, new_size): - tenant = self._create_tenant(volume) + if volume['size'] >= new_size: + LOG.warning("Volume size not extended due to original size being " + "greater or equal to new size. Originial: " + "%(original)s, New: %(new)s", { + 'original': volume['size'], + 'new': new_size}) + return policies = self._get_policies_for_resource(volume) template = policies['template'] if template: - LOG.warning("Volume size not extended due to template binding:" + LOG.warning("Volume size not extended due to template binding." " volume: %(volume)s, template: %(template)s", - volume=volume, template=template) + {'volume': volume, 'template': template}) return - # Offline App Instance, if necessary - reonline = False - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - api_version='2.1', tenant=tenant) - if app_inst['data']['admin_state'] == 'online': - reonline = True - self._detach_volume_2_1(None, volume) - # Change Volume Size - app_inst = datc._get_name(volume['id']) - data = { - 'size': new_size - } - store_name, vol_name = self._scrape_template(policies) - self._issue_api_request( - datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(app_inst), - method='put', - body=data, - api_version='2.1', - tenant=tenant) - # Online Volume, if it was online before - if reonline: - self._create_export_2_1(None, volume, None) + with self._offline_flip_2_1(volume): + # Change Volume Size + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + dvol.set(tenant=tenant, size=new_size) # ================= # = Cloned Volume = # ================= def _create_cloned_volume_2_1(self, volume, src_vref): - policies = self._get_policies_for_resource(volume) - tenant = self._create_tenant(volume) - store_name, vol_name = self._scrape_template(policies) - - src = "/" + datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(datc._get_name(src_vref['id'])) + tenant = self.get_tenant(volume['project_id']) + sdvol = self.cvol_to_dvol(src_vref, tenant=tenant) + src = sdvol.path data = { 'create_mode': 'openstack', - 'name': datc._get_name(volume['id']), + 'name': datc.get_name(volume), 'uuid': str(volume['id']), 'clone_volume_src': {'path': src}, } - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2.1', - tenant=tenant) + tenant = self.get_tenant(volume['project_id']) + self.api.app_instances.create(tenant=tenant, **data) if volume['size'] > src_vref['size']: self._extend_volume_2_1(volume, volume['size']) + self._add_vol_meta_2_1(volume) # ================= # = Delete Volume = # ================= def _delete_volume_2_1(self, volume): - self._detach_volume_2_1(None, volume) - tenant = self._create_tenant(volume) - app_inst = datc._get_name(volume['id']) try: - self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst), - method='delete', - api_version='2.1', - tenant=tenant) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + + # Clear out ACL + acl = si.acl_policy.get(tenant=tenant) + acl.set(tenant=tenant, initiators=[]) + + # Bring volume offline + data = { + 'admin_state': 'offline', + 'force': True + } + ai.set(tenant=tenant, **data) + + ai.delete(tenant=tenant, force=True) except exception.NotFound: msg = ("Tried to delete volume %s, but it was not found in the " "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(volume['id'])) + LOG.info(msg, datc.get_name(volume)) # ================= # = Ensure Export = # ================= def _ensure_export_2_1(self, context, volume, connector=None): - self.create_export(context, volume, connector) + pass # ========================= # = Initialize Connection = @@ -185,29 +192,25 @@ class DateraApi(object): def _initialize_connection_2_1(self, volume, connector): # Now online the app_instance (which will online all storage_instances) multipath = connector.get('multipath', False) - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'online' } - app_inst = self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant)[ - 'data'] - storage_instances = app_inst["storage_instances"] - si = storage_instances[0] + ai.set(tenant=tenant, **data) + si = ai.storage_instances.list(tenant=tenant)[0] # randomize portal chosen choice = 0 policies = self._get_policies_for_resource(volume) if policies["round_robin"]: choice = random.randint(0, 1) - portal = si['access']['ips'][choice] + ':3260' - iqn = si['access']['iqn'] + portal = si.access['ips'][choice] + ':3260' + iqn = si.access['iqn'] if multipath: - portals = [p + ':3260' for p in si['access']['ips']] - iqns = [iqn for _ in si['access']['ips']] - lunids = [self._get_lunid() for _ in si['access']['ips']] + portals = [p + ':3260' for p in si.access['ips']] + iqns = [iqn for _ in si.access['ips']] + lunids = [self._get_lunid() for _ in si.access['ips']] result = { 'driver_volume_type': 'iscsi', @@ -232,6 +235,12 @@ class DateraApi(object): 'volume_id': volume['id'], 'discard': False}} + if self.use_chap_auth: + result['data'].update( + auth_method="CHAP", + auth_username=self.chap_username, + auth_password=self.chap_password) + return result # ================= @@ -239,291 +248,224 @@ class DateraApi(object): # ================= def _create_export_2_1(self, context, volume, connector): - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'offline', 'force': True } - self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant) + ai.set(tenant=tenant, **data) + si = ai.storage_instances.list(tenant=tenant)[0] policies = self._get_policies_for_resource(volume) - store_name, _ = self._scrape_template(policies) if connector and connector.get('ip'): # Case where volume_type has non default IP Pool info if policies['ip_pool'] != 'default': - initiator_ip_pool_path = self._issue_api_request( - "access_network_ip_pools/{}".format( - policies['ip_pool']), - api_version='2.1', - tenant=tenant)['path'] + initiator_ip_pool_path = self.api.access_network_ip_pools.get( + policies['ip_pool']).path # Fallback to trying reasonable IP based guess else: initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_1( - connector['ip']) + connector['ip'], tenant) - ip_pool_url = datc.URL_TEMPLATES['si_inst']( - store_name).format(datc._get_name(volume['id'])) ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} - self._issue_api_request(ip_pool_url, - method="put", - body=ip_pool_data, - api_version='2.1', - tenant=tenant) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) + si.set(tenant=tenant, **ip_pool_data) data = { 'admin_state': 'online' } - self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant) + ai.set(tenant=tenant, **data) # Check if we've already setup everything for this volume - url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id']))) - storage_instances = self._issue_api_request( - url, api_version='2.1', tenant=tenant) + storage_instances = ai.storage_instances.list(tenant=tenant) # Handle adding initiator to product if necessary # Then add initiator to ACL - if (connector and - connector.get('initiator') and - not policies['acl_allow_all']): - initiator_name = "OpenStack_{}_{}".format( - self.driver_prefix, str(uuid.uuid4())[:4]) - initiator_group = datc.INITIATOR_GROUP_PREFIX + str(uuid.uuid4()) - found = False + if connector and connector.get('initiator'): + initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8]) initiator = connector['initiator'] - if not found: - data = {'id': initiator, 'name': initiator_name} - # Try and create the initiator - # If we get a conflict, ignore it - self._issue_api_request("initiators", - method="post", - body=data, - conflict_ok=True, - api_version='2.1', - tenant=tenant) - # Create initiator group with initiator in it - initiator_path = "/initiators/{}".format(initiator) - initiator_group_path = "/initiator_groups/{}".format( - initiator_group) - ig_data = {'name': initiator_group, - 'members': [{'path': initiator_path}]} - self._issue_api_request("initiator_groups", - method="post", - body=ig_data, - conflict_ok=True, - api_version='2.1', - tenant=tenant) + dinit = None + data = {'id': initiator, 'name': initiator_name} + # Try and create the initiator + # If we get a conflict, ignore it + try: + dinit = self.api.initiators.create(tenant=tenant, **data) + except dexceptions.ApiConflictError: + dinit = self.api.initiators.get(initiator, tenant=tenant) + initiator_path = dinit['path'] # Create ACL with initiator group as reference for each # storage_instance in app_instance # TODO(_alastor_): We need to avoid changing the ACLs if the # template already specifies an ACL policy. - for si in storage_instances['data']: - acl_url = (datc.URL_TEMPLATES['si']() + - "/{}/acl_policy").format( - datc._get_name(volume['id']), si['name']) - existing_acl = self._issue_api_request(acl_url, - method="get", - api_version='2.1', - tenant=tenant)['data'] + for si in storage_instances: + existing_acl = si.acl_policy.get(tenant=tenant) data = {} - data['initiators'] = existing_acl['initiators'] - data['initiator_groups'] = existing_acl['initiator_groups'] - data['initiator_groups'].append({"path": initiator_group_path}) - self._issue_api_request(acl_url, - method="put", - body=data, - api_version='2.1', - tenant=tenant) + # Grabbing only the 'path' key from each existing initiator + # within the existing acl. eacli --> existing acl initiator + eacli = [] + for acl in existing_acl['initiators']: + nacl = {} + nacl['path'] = acl['path'] + eacli.append(nacl) + data['initiators'] = eacli + data['initiators'].append({"path": initiator_path}) + # Grabbing only the 'path' key from each existing initiator + # group within the existing acl. eaclig --> existing + # acl initiator group + eaclig = [] + for acl in existing_acl['initiator_groups']: + nacl = {} + nacl['path'] = acl['path'] + eaclig.append(nacl) + data['initiator_groups'] = eaclig + si.acl_policy.set(tenant=tenant, **data) + if self.use_chap_auth: + for si in storage_instances: + data = {'type': 'chap', + 'target_user_name': self.chap_username, + 'target_pswd': self.chap_password} + si.auth.set(tenant=tenant, **data) # Check to ensure we're ready for go-time - self._si_poll_2_1(volume, policies, tenant) + self._si_poll_2_1(volume, si, tenant) + self._add_vol_meta_2_1(volume, connector=connector) # ================= # = Detach Volume = # ================= def _detach_volume_2_1(self, context, volume, attachment=None): - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'offline', - 'force': True - } try: - self._issue_api_request(url, method='put', body=data, - api_version='2.1', tenant=tenant) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + # Clear out ACL for this specific attachment + si = ai.storage_instances.list(tenant=tenant)[0] + existing_acl = si.acl_policy.get(tenant=tenant) + data = {} + # Grabbing only the 'path' key from each existing initiator + # within the existing acl. eacli --> existing acl initiator + eacli = [] + for acl in existing_acl['initiators']: + if ( + attachment is not None + and attachment.connector is not None + and acl['path'].split('/')[-1] + == attachment.connector['initiator'] + ): + continue + nacl = {} + nacl['path'] = acl['path'] + eacli.append(nacl) + data['initiators'] = eacli + data['initiator_groups'] = existing_acl['initiator_groups'] + si.acl_policy.set(tenant=tenant, **data) + + if not eacli: + # bring the application instance offline if there + # are no initiators left. + data = { + 'admin_state': 'offline', + 'force': True + } + ai.set(tenant=tenant, **data) + except exception.NotFound: msg = ("Tried to detach volume %s, but it was not found in the " "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) - # TODO(_alastor_): Make acl cleaning multi-attach aware - self._clean_acl_2_1(volume, tenant) - - def _check_for_acl_2_1(self, initiator_path): - """Returns True if an acl is found for initiator_path """ - # TODO(_alastor_) when we get a /initiators/:initiator/acl_policies - # endpoint use that instead of this monstrosity - initiator_groups = self._issue_api_request("initiator_groups", - api_version='2.1') - for ig, igdata in initiator_groups.items(): - if initiator_path in igdata['members']: - LOG.debug("Found initiator_group: %s for initiator: %s", - ig, initiator_path) - return True - LOG.debug("No initiator_group found for initiator: %s", initiator_path) - return False - - def _clean_acl_2_1(self, volume, tenant): - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - acl_url = (datc.URL_TEMPLATES["si_inst"]( - store_name) + "/acl_policy").format(datc._get_name(volume['id'])) - try: - initiator_group = self._issue_api_request( - acl_url, api_version='2.1', tenant=tenant)['data'][ - 'initiator_groups'][0]['path'] - # TODO(_alastor_): Re-enable this when we get a force-delete - # option on the /initiators endpoint - # initiator_iqn_path = self._issue_api_request( - # initiator_group.lstrip("/"), api_version='2.1', - # tenant=tenant)[ - # "data"]["members"][0]["path"] - # Clear out ACL and delete initiator group - self._issue_api_request(acl_url, - method="put", - body={'initiator_groups': []}, - api_version='2.1', - tenant=tenant) - self._issue_api_request(initiator_group.lstrip("/"), - method="delete", - api_version='2.1', - tenant=tenant) - # TODO(_alastor_): Re-enable this when we get a force-delete - # option on the /initiators endpoint - # if not self._check_for_acl_2_1(initiator_iqn_path): - # self._issue_api_request(initiator_iqn_path.lstrip("/"), - # method="delete", - # api_version='2.1', - # tenant=tenant) - except (IndexError, exception.NotFound): - LOG.debug("Did not find any initiator groups for volume: %s", - volume) # =================== # = Create Snapshot = # =================== def _create_snapshot_2_1(self, snapshot): - tenant = self._create_tenant(snapshot) - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - url_template = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - url = url_template.format(datc._get_name(snapshot['volume_id'])) + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) snap_params = { 'uuid': snapshot['id'], } - snap = self._issue_api_request(url, method='post', body=snap_params, - api_version='2.1', tenant=tenant) - snapu = "/".join((url, snap['data']['timestamp'])) - self._snap_poll_2_1(snapu, tenant) + snap = dvol.snapshots.create(tenant=tenant, **snap_params) + self._snap_poll_2_1(snap, tenant) # =================== # = Delete Snapshot = # =================== def _delete_snapshot_2_1(self, snapshot): - tenant = self._create_tenant(snapshot) - policies = self._get_policies_for_resource(snapshot) + # Handle case where snapshot is "managed" + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) - store_name, vol_name = self._scrape_template(policies) + snapshots = None - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = [] + # Shortcut if this is a managed snapshot + provider_location = snapshot.get('provider_location') + if provider_location: + snap = dvol.snapshots.get(provider_location, tenant=tenant) + snap.delete(tenant=tenant) + return + + # Long-way. UUID identification try: - snapshots = self._issue_api_request(snapu, - method='get', - api_version='2.1', - tenant=tenant) + snapshots = dvol.snapshots.list(tenant=tenant) except exception.NotFound: msg = ("Tried to delete snapshot %s, but parent volume %s was " "not found in Datera cluster. Continuing with delete.") LOG.info(msg, - datc._get_name(snapshot['id']), - datc._get_name(snapshot['volume_id'])) + datc.get_name(snapshot), + datc.get_name({'id': snapshot['volume_id']})) return try: - for snap in snapshots['data']: - if snap['uuid'] == snapshot['id']: - url_template = snapu + '/{}' - url = url_template.format(snap['timestamp']) - self._issue_api_request( - url, - method='delete', - api_version='2.1', - tenant=tenant) + for snap in snapshots: + if snap.uuid == snapshot['id']: + snap.delete(tenant=tenant) break else: raise exception.NotFound except exception.NotFound: msg = ("Tried to delete snapshot %s, but was not found in " "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(snapshot['id'])) + LOG.info(msg, datc.get_name(snapshot)) # ======================== # = Volume From Snapshot = # ======================== def _create_volume_from_snapshot_2_1(self, volume, snapshot): - tenant = self._create_tenant(volume) - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request( - snapu, method='get', api_version='2.1', tenant=tenant) - - for snap in snapshots['data']: - if snap['uuid'] == snapshot['id']: - found_ts = snap['utc_ts'] - break + # Handle case where snapshot is "managed" + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) + found_snap = None + provider_location = snapshot.get('provider_location') + if provider_location: + found_snap = dvol.snapshots.get(provider_location, tenant=tenant) else: - raise exception.NotFound + snapshots = dvol.snapshots.list(tenant=tenant) + for snap in snapshots: + if snap.uuid == snapshot['id']: + found_snap = snap + break + else: + raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) - snap_url = (snap_temp + '/{}').format( - datc._get_name(snapshot['volume_id']), found_ts) + self._snap_poll_2_1(found_snap, tenant) - self._snap_poll_2_1(snap_url, tenant) - - src = "/" + snap_url + src = found_snap.path app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), + 'name': datc.get_name(volume), 'clone_snapshot_src': {'path': src}, }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - method='post', - body=app_params, - api_version='2.1', - tenant=tenant) + self.api.app_instances.create(tenant=tenant, **app_params) if (volume['size'] > snapshot['volume_size']): self._extend_volume_2_1(volume, volume['size']) + self._add_vol_meta_2_1(volume) # ========== # = Retype = @@ -540,7 +482,7 @@ class DateraApi(object): # And that backend matches this driver old_pol = self._get_policies_for_resource(volume) new_pol = self._get_policies_for_volume_type(new_type) - if (host['capabilities']['vendor_name'].lower() == + if (host['capabilities']['volume_backend_name'].lower() == self.backend_name.lower()): LOG.debug("Starting fast volume retype") @@ -550,19 +492,26 @@ class DateraApi(object): "unsupported. Type1: %s, Type2: %s", volume['volume_type_id'], new_type) - tenant = self._create_tenant(volume) - self._update_qos_2_1(volume, new_pol, tenant) - vol_params = ( - { - 'placement_mode': new_pol['placement_mode'], - 'replica_count': new_pol['replica_count'], - }) - url = datc.URL_TEMPLATES['vol_inst']( - old_pol['default_storage_name'], - old_pol['default_volume_name']).format( - datc._get_name(volume['id'])) - self._issue_api_request(url, method='put', body=vol_params, - api_version='2.1', tenant=tenant) + self._update_qos_2_1(volume, new_pol, clear_old=True) + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + # Only replica_count ip_pool requires offlining the app_instance + if (new_pol['replica_count'] != old_pol['replica_count'] or + new_pol['ip_pool'] != old_pol['ip_pool']): + with self._offline_flip_2_1(volume): + vol_params = ( + { + 'placement_mode': new_pol['placement_mode'], + 'replica_count': new_pol['replica_count'], + }) + dvol.set(tenant=tenant, **vol_params) + elif new_pol['placement_mode'] != old_pol['placement_mode']: + vol_params = ( + { + 'placement_mode': new_pol['placement_mode'], + }) + dvol.set(tenant=tenant, **vol_params) + self._add_vol_meta_2_1(volume) return True else: @@ -575,99 +524,54 @@ class DateraApi(object): def _manage_existing_2_1(self, volume, existing_ref): # Only volumes created under the requesting tenant can be managed in - # the v2.1 API. Eg. If tenant A is the tenant for the volume to be + # the v2.1+ API. Eg. If tenant A is the tenant for the volume to be # managed, it must also be tenant A that makes this request. # This will be fixed in a later API update - tenant = self._create_tenant(volume) existing_ref = existing_ref['source-name'] - if existing_ref.count(":") not in (2, 3): - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format: " - "tenant:app_inst_name:storage_inst_name:vol_name or " - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name = existing_ref.split(":")[0] - try: - (tenant, app_inst_name, storage_inst_name, - vol_name) = existing_ref.split(":") - except TypeError: - app_inst_name, storage_inst_name, vol_name = existing_ref.split( - ":") - tenant = None + app_inst_name, __, __, __ = datc._parse_vol_ref(existing_ref) LOG.debug("Managing existing Datera volume %s " "Changing name to %s", - datc._get_name(volume['id']), existing_ref) - data = {'name': datc._get_name(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst_name), method='put', body=data, api_version='2.1', - tenant=tenant) + datc.get_name(volume), existing_ref) + # Rename AppInstance + dummy_vol = {'id': app_inst_name, + 'project_id': volume['project_id']} + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(dummy_vol, tenant=tenant) + data = {'name': datc.get_name(volume)} + ai.set(tenant=tenant, **data) + self._add_vol_meta_2_1(volume) # =================== # = Manage Get Size = # =================== def _manage_existing_get_size_2_1(self, volume, existing_ref): - tenant = self._create_tenant(volume) existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name, si_name, vol_name = existing_ref.split(":") - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst_name), - api_version='2.1', tenant=tenant) - return self._get_size_2_1( - volume, tenant, app_inst, si_name, vol_name) - - def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None, - vol_name=None): - """Helper method for getting the size of a backend object - - If app_inst is provided, we'll just parse the dict to get - the size instead of making a separate http request - """ - policies = self._get_policies_for_resource(volume) - si_name = si_name if si_name else policies['default_storage_name'] - vol_name = vol_name if vol_name else policies['default_volume_name'] - if not app_inst: - vol_url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - app_inst = self._issue_api_request( - vol_url, api_version='2.1', tenant=tenant)['data'] - if 'data' in app_inst: - app_inst = app_inst['data'] - sis = app_inst['storage_instances'] - found_si = None - for si in sis: - if si['name'] == si_name: - found_si = si - break - found_vol = None - for vol in found_si['volumes']: - if vol['name'] == vol_name: - found_vol = vol - size = found_vol['size'] - return size + app_inst_name, storage_inst_name, vol_name, __ = datc._parse_vol_ref( + existing_ref) + dummy_vol = {'id': app_inst_name, + 'project_id': volume['project_id']} + dvol = self.cvol_to_dvol(dummy_vol) + return dvol.size # ========================= # = Get Manageable Volume = # ========================= - def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit, - offset, sort_keys, sort_dirs): + def _list_manageable_2_1(self, cinder_volumes): # Use the first volume to determine the tenant we're working under if cinder_volumes: - tenant = self._create_tenant(cinder_volumes[0]) + tenant = self.get_tenant(cinder_volumes[0]['project_id']) else: tenant = None - LOG.debug("Listing manageable Datera volumes") - app_instances = self._issue_api_request( - datc.URL_TEMPLATES['ai'](), api_version='2.1', - tenant=tenant)['data'] + app_instances = self.api.app_instances.list(tenant=tenant) results = [] - cinder_volume_ids = [vol['id'] for vol in cinder_volumes] + if cinder_volumes and 'volume_id' in cinder_volumes[0]: + cinder_volume_ids = [vol['volume_id'] for vol in cinder_volumes] + else: + cinder_volume_ids = [vol['id'] for vol in cinder_volumes] for ai in app_instances: ai_name = ai['name'] @@ -676,20 +580,20 @@ class DateraApi(object): safe_to_manage = False reason_not_safe = "" cinder_id = None - extra_info = None - if re.match(datc.UUID4_RE, ai_name): - cinder_id = ai_name.lstrip(datc.OS_PREFIX) - if (not cinder_id and - ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids): - safe_to_manage, reason_not_safe = self._is_manageable_2_1(ai) - if safe_to_manage: - si = list(ai['storage_instances'].values())[0] - si_name = si['name'] - vol = list(si['volumes'].values())[0] - vol_name = vol['name'] - size = vol['size'] - reference = {"source-name": "{}:{}:{}".format( - ai_name, si_name, vol_name)} + extra_info = {} + (safe_to_manage, reason_not_safe, + cinder_id) = self._is_manageable_2_1( + ai, cinder_volume_ids, tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + si_name = si.name + vol = si.volumes.list(tenant=tenant)[0] + vol_name = vol.name + size = vol.size + snaps = [(snap.utc_ts, snap.uuid) + for snap in vol.snapshots.list(tenant=tenant)] + extra_info["snapshots"] = json.dumps(snaps) + reference = {"source-name": "{}:{}:{}".format( + ai_name, si_name, vol_name)} results.append({ 'reference': reference, @@ -698,112 +602,335 @@ class DateraApi(object): 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info}) + return results - page_results = volume_utils.paginate_entries_list( + def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit, + offset, sort_keys, sort_dirs): + LOG.debug("Listing manageable Datera volumes") + results = self._list_manageable_2_1(cinder_volumes) + page_results = volutils.paginate_entries_list( results, marker, limit, offset, sort_keys, sort_dirs) return page_results - def _is_manageable_2_1(self, app_inst): - if len(app_inst['storage_instances']) == 1: - si = list(app_inst['storage_instances'].values())[0] + def _is_manageable_2_1(self, ai, cinder_volume_ids, tenant): + cinder_id = None + ai_name = ai.name + match = datc.UUID4_RE.match(ai_name) + if match: + cinder_id = match.group(1) + if cinder_id and cinder_id in cinder_volume_ids: + return (False, + "App Instance already managed by Cinder", + cinder_id) + if len(ai.storage_instances.list(tenant=tenant)) == 1: + si = ai.storage_instances.list(tenant=tenant)[0] if len(si['volumes']) == 1: - return (True, "") + return (True, "", cinder_id) return (False, - "App Instance has more than one storage instance or volume") + "App Instance has more than one storage instance or volume", + cinder_id) # ============ # = Unmanage = # ============ def _unmanage_2_1(self, volume): - tenant = self._create_tenant(volume) LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", - volume['id'], datc._get_unmanaged(volume['id'])) - data = {'name': datc._get_unmanaged(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - method='put', - body=data, - api_version='2.1', - tenant=tenant) + volume['id'], datc.get_unmanaged(volume['id'])) + data = {'name': datc.get_unmanaged(volume['id'])} + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + ai.set(tenant=tenant, **data) - # ================ - # = Volume Stats = - # ================ + # =================== + # = Manage Snapshot = + # =================== - # ========= - # = Login = - # ========= + def _manage_existing_snapshot_2_1(self, snapshot, existing_ref): + existing_ref = existing_ref['source-name'] + datc._check_snap_ref(existing_ref) + LOG.debug("Managing existing Datera volume snapshot %s for volume %s", + existing_ref, datc.get_name({'id': snapshot['volume_id']})) + return {'provider_location': existing_ref} - # =========== - # = Tenancy = - # =========== + def _manage_existing_snapshot_get_size_2_1(self, snapshot, existing_ref): + existing_ref = existing_ref['source-name'] + datc._check_snap_ref(existing_ref) + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + dvol = self.cvol_to_dvol(dummy_vol) + return dvol.size - def _create_tenant(self, volume=None): - # Create the Datera tenant if specified in the config - # Otherwise use the tenant provided - if self.tenant_id is None: - tenant = None - elif self.tenant_id.lower() == "map" and volume: - # Convert dashless uuid to uuid with dashes - # Eg: 0e33e95a9b154d348c675a1d8ea5b651 --> - # 0e33e95a-9b15-4d34-8c67-5a1d8ea5b651 - tenant = datc._get_name(str(uuid.UUID(volume["project_id"]))) - elif self.tenant_id.lower() == "map" and not volume: - tenant = None - else: - tenant = self.tenant_id + def _get_manageable_snapshots_2_1(self, cinder_snapshots, marker, limit, + offset, sort_keys, sort_dirs): + LOG.debug("Listing manageable Datera snapshots") + results = self._list_manageable_2_1(cinder_snapshots) + snap_results = [] + snapids = set((snap['id'] for snap in cinder_snapshots)) + snaprefs = set((snap.get('provider_location') + for snap in cinder_snapshots)) + for volume in results: + snaps = json.loads(volume["extra_info"]["snapshots"]) + for snapshot in snaps: + reference = snapshot[0] + uuid = snapshot[1] + size = volume["size"] + safe_to_manage = True + reason_not_safe = "" + cinder_id = "" + extra_info = {} + source_reference = volume["reference"] + if uuid in snapids or reference in snaprefs: + safe_to_manage = False + reason_not_safe = _("already managed by Cinder") + elif not volume['safe_to_manage'] and not volume['cinder_id']: + safe_to_manage = False + reason_not_safe = _("parent volume not safe to manage") + snap_results.append({ + 'reference': {'source-name': reference}, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info, + 'source_reference': source_reference}) + page_results = volutils.paginate_entries_list( + snap_results, marker, limit, offset, sort_keys, sort_dirs) - if tenant: - params = {'name': tenant} - self._issue_api_request( - 'tenants', method='post', body=params, conflict_ok=True, - api_version='2.1') - return tenant + return page_results - # ========= - # = Login = - # ========= + def _unmanage_snapshot_2_1(self, snapshot): + return {'provider_location': None} - def _login_2_1(self): - """Use the san_login and san_password to set token.""" - body = { - 'name': self.username, - 'password': self.password - } - - # Unset token now, otherwise potential expired token will be sent - # along to be used for authorization when trying to login. - self.datera_api_token = None + # ==================== + # = Fast Image Clone = + # ==================== + def _clone_image_2_1(self, context, volume, image_location, image_meta, + image_service): + # We're not going to fast image clone if the feature is not enabled + # and/or we can't reach the image being requested + if (not self.image_cache or + not self._image_accessible(context, volume, image_meta)): + return None, False + # Check to make sure we're working with a valid volume type try: - LOG.debug('Getting Datera auth token.') - results = self._issue_api_request( - 'login', 'put', body=body, sensitive=True, api_version='2.1', - tenant=None) - self.datera_api_token = results['key'] - except exception.NotAuthorized: - with excutils.save_and_reraise_exception(): - LOG.error('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.') + found = volume_types.get_volume_type(context, self.image_type) + except (exception.VolumeTypeNotFound, exception.InvalidVolumeType): + found = None + if not found: + msg = "Invalid volume type: %s" + LOG.error(msg, self.image_type) + raise ValueError(_("Option datera_image_cache_volume_type_id must" + " be set to a valid volume_type id")) + # Check image format + fmt = image_meta.get('disk_format', '') + if fmt.lower() != 'raw': + LOG.debug("Image format is not RAW, image requires conversion " + "before clone. Image format: [%s]", fmt) + return None, False + + LOG.debug("Starting fast image clone") + # TODO(_alastor_): determine if Datera is already an image backend + # for this request and direct clone instead of caching + + # Dummy volume, untracked by Cinder + src_vol = {'id': image_meta['id'], + 'volume_type_id': self.image_type, + 'size': volume['size'], + 'project_id': volume['project_id']} + + # Determine if we have a cached version of the image + cached = self._vol_exists_2_1(src_vol) + + if cached: + tenant = self.get_tenant(src_vol['project_id']) + ai = self.cvol_to_ai(src_vol, tenant=tenant) + metadata = ai.metadata.get(tenant=tenant) + # Check to see if the master image has changed since we created + # The cached version + ts = self._get_vol_timestamp_2_1(src_vol) + mts = time.mktime(image_meta['updated_at'].timetuple()) + LOG.debug("Original image timestamp: %s, cache timestamp %s", + mts, ts) + # If the image is created by Glance, we'll trust that even if the + # timestamps don't match up, the data is ok to clone as it's not + # managed by this driver + if metadata.get('type') == 'image': + LOG.debug("Found Glance volume-backed image for %s", + src_vol['id']) + # If the master image time is greater than the volume creation + # time, we invalidate the cache and delete the volume. The + # exception is if the cached volume was created by Glance. We + # NEVER want to delete this volume. It's annotated with + # 'type': 'image' in the metadata, so we'll check for that + elif mts > ts and metadata.get('type') != 'image': + LOG.debug("Cache is older than original image, deleting cache") + cached = False + self._delete_volume_2_1(src_vol) + + # If we don't have the image, we'll cache it + if not cached: + LOG.debug("No image cache found for: %s, caching image", + image_meta['id']) + self._cache_vol_2_1(context, src_vol, image_meta, image_service) + + # Now perform the clone of the found image or newly cached image + self._create_cloned_volume_2_1(volume, src_vol) + # Force volume resize + vol_size = volume['size'] + volume['size'] = 0 + self._extend_volume_2_1(volume, vol_size) + volume['size'] = vol_size + # Determine if we need to retype the newly created volume + vtype_id = volume.get('volume_type_id') + if vtype_id and self.image_type and vtype_id != self.image_type: + vtype = volume_types.get_volume_type(context, vtype_id) + LOG.debug("Retyping newly cloned volume from type: %s to type: %s", + self.image_type, vtype_id) + diff, discard = volume_types.volume_types_diff( + context, self.image_type, vtype_id) + host = {'capabilities': {'vendor_name': self.backend_name}} + self._retype_2_1(context, volume, vtype, diff, host) + return None, True + + def _cache_vol_2_1(self, context, vol, image_meta, image_service): + image_id = image_meta['id'] + # Pull down image and determine if valid + with image_utils.TemporaryImages.fetch(image_service, + context, + image_id) as tmp_image: + data = image_utils.qemu_img_info(tmp_image) + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("fmt=%(fmt)s backed by:%(backing_file)s") + % {'fmt': fmt, 'backing_file': backing_file, }) + + vsize = int( + math.ceil(float(data.virtual_size) / units.Gi)) + vol['size'] = vsize + vtype = vol['volume_type_id'] + LOG.info("Creating cached image with volume type: %(vtype)s and " + "size %(size)s", {'vtype': vtype, 'size': vsize}) + self._create_volume_2_1(vol) + with self._connect_vol(context, vol) as device: + LOG.debug("Moving image %s to volume %s", + image_meta['id'], datc.get_name(vol)) + image_utils.convert_image(tmp_image, + device, + 'raw', + run_as_root=True) + LOG.debug("Finished moving image %s to volume %s", + image_meta['id'], datc.get_name(vol)) + data = image_utils.qemu_img_info(device, run_as_root=True) + if data.file_format != 'raw': + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_( + "Converted to %(vol_format)s, but format is " + "now %(file_format)s") % { + 'vol_format': 'raw', + 'file_format': data.file_format}) + # TODO(_alastor_): Remove this snapshot creation when we fix + # "created_at" attribute in the frontend + # We don't actually care about the snapshot uuid, we just want + # a single snapshot + snapshot = {'id': str(uuid.uuid4()), + 'volume_id': vol['id'], + 'project_id': vol['project_id']} + self._create_snapshot_2_1(snapshot) + metadata = {'type': 'cached_image'} + tenant = self.get_tenant(vol['project_id']) + ai = self.cvol_to_ai(vol, tenant=tenant) + ai.metadata.set(tenant=tenant, **metadata) + # Cloning offline AI is ~4 seconds faster than cloning online AI + self._detach_volume_2_1(None, vol) + + def _get_vol_timestamp_2_1(self, volume): + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + snapshots = dvol.snapshots.list(tenant=tenant) + if len(snapshots) == 1: + return float(snapshots[0].utc_ts) + else: + # We'll return 0 if we find no snapshots (or the incorrect number) + # to ensure the timestamp comparison with the master copy fails + # since the master copy will always have a timestamp > 0. + LOG.debug("Number of snapshots found: %s", len(snapshots)) + return 0 + + def _vol_exists_2_1(self, volume): + LOG.debug("Checking if volume %s exists", volume['id']) + try: + ai = self.cvol_to_ai(volume) + LOG.debug("Volume %s exists", volume['id']) + return ai + except exception.NotFound: + LOG.debug("Volume %s not found", volume['id']) + return None + + @contextlib.contextmanager + def _connect_vol(self, context, vol): + connector = None + try: + # Start connection, get the connector object and create the + # export (ACL, IP-Pools, etc) + conn = self._initialize_connection_2_1( + vol, {'multipath': False}) + connector = utils.brick_get_connector( + conn['driver_volume_type'], + use_multipath=False, + device_scan_attempts=10, + conn=conn) + connector_info = {'initiator': connector.get_initiator()} + self._create_export_2_1(None, vol, connector_info) + retries = 10 + attach_info = conn['data'] + while True: + try: + attach_info.update( + connector.connect_volume(conn['data'])) + break + except brick_exception.FailedISCSITargetPortalLogin: + retries -= 1 + if not retries: + LOG.error("Could not log into portal before end of " + "polling period") + raise + LOG.debug("Failed to login to portal, retrying") + eventlet.sleep(2) + device_path = attach_info['path'] + yield device_path + finally: + # Close target connection + if connector: + # Best effort disconnection + try: + connector.disconnect_volume(attach_info, attach_info) + except Exception: + pass # =========== # = Polling = # =========== - def _snap_poll_2_1(self, url, tenant): + def _snap_poll_2_1(self, snap, tenant): eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) TIMEOUT = 20 retry = 0 poll = True - while poll and retry < TIMEOUT: + while poll and not retry >= TIMEOUT: retry += 1 - snap = self._issue_api_request(url, - api_version='2.1', - tenant=tenant)['data'] - if snap['op_state'] == 'available': + snap = snap.reload(tenant=tenant) + if snap.op_state == 'available': poll = False else: eventlet.sleep(1) @@ -811,21 +938,16 @@ class DateraApi(object): raise exception.VolumeDriverException( message=_('Snapshot not ready.')) - def _si_poll_2_1(self, volume, policies, tenant): + def _si_poll_2_1(self, volume, si, tenant): # Initial 4 second sleep required for some Datera versions eventlet.sleep(datc.DEFAULT_SI_SLEEP) TIMEOUT = 10 retry = 0 - check_url = datc.URL_TEMPLATES['si_inst']( - policies['default_storage_name']).format( - datc._get_name(volume['id'])) poll = True - while poll and retry < TIMEOUT: + while poll and not retry >= TIMEOUT: retry += 1 - si = self._issue_api_request(check_url, - api_version='2.1', - tenant=tenant)['data'] - if si['op_state'] == 'available': + si = si.reload(tenant=tenant) + if si.op_state == 'available': poll = False else: eventlet.sleep(1) @@ -838,12 +960,13 @@ class DateraApi(object): # ================ def _get_volume_stats_2_1(self, refresh=False): + # cluster_stats is defined by datera_iscsi + # pylint: disable=access-member-before-definition if refresh or not self.cluster_stats: try: LOG.debug("Updating cluster stats info.") - results = self._issue_api_request( - 'system', api_version='2.1')['data'] + results = self.api.system.get() if 'uuid' not in results: LOG.error( @@ -855,9 +978,9 @@ class DateraApi(object): 'driver_version': self.VERSION, 'storage_protocol': 'iSCSI', 'total_capacity_gb': ( - int(results['total_capacity']) / units.Gi), + int(results.total_capacity) / units.Gi), 'free_capacity_gb': ( - int(results['available_capacity']) / units.Gi), + int(results.available_capacity) / units.Gi), 'reserved_percentage': 0, 'QoS_support': True, } @@ -871,43 +994,105 @@ class DateraApi(object): # = QoS = # ======= - def _update_qos_2_1(self, resource, policies, tenant): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) + def _update_qos_2_1(self, volume, policies, clear_old=False): + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + type_id = volume.get('volume_type_id', None) if type_id is not None: + iops_per_gb = int(policies.get('iops_per_gb', 0)) + bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0)) # Filter for just QOS policies in result. All of their keys # should end with "max" fpolicies = {k: int(v) for k, v in policies.items() if k.endswith("max")} # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) + fpolicies = {k: int(v) for k, v in + fpolicies.items() if v > 0} + # Calculate and set iops/gb and bw/gb, but only if they don't + # exceed total_iops_max and total_bw_max aren't set since they take + # priority + if iops_per_gb: + ipg = iops_per_gb * volume['size'] + # Not using zero, because zero means unlimited + im = fpolicies.get('total_iops_max', 1) + r = ipg + if ipg > im: + r = im + fpolicies['total_iops_max'] = r + if bandwidth_per_gb: + bpg = bandwidth_per_gb * volume['size'] + # Not using zero, because zero means unlimited + bm = fpolicies.get('total_bandwidth_max', 1) + r = bpg + if bpg > bm: + r = bm + fpolicies['total_bandwidth_max'] = r + if fpolicies or clear_old: + try: + pp = dvol.performance_policy.get(tenant=tenant) + pp.delete(tenant=tenant) + except dexceptions.ApiNotFoundError: + LOG.debug("No existing performance policy found") if fpolicies: - self._issue_api_request(url, 'delete', api_version='2.1', - tenant=tenant) - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2.1', tenant=tenant) + dvol.performance_policy.create(tenant=tenant, **fpolicies) # ============ # = IP Pools = # ============ - def _get_ip_pool_for_string_ip_2_1(self, ip): + def _get_ip_pool_for_string_ip_2_1(self, ip, tenant): """Takes a string ipaddress and return the ip_pool API object dict """ pool = 'default' ip_obj = ipaddress.ip_address(six.text_type(ip)) - ip_pools = self._issue_api_request('access_network_ip_pools', - api_version='2.1') - for ipdata in ip_pools['data']: + ip_pools = self.api.access_network_ip_pools.list(tenant=tenant) + for ipdata in ip_pools: for adata in ipdata['network_paths']: if not adata.get('start_ip'): continue pool_if = ipaddress.ip_interface( "/".join((adata['start_ip'], str(adata['netmask'])))) if ip_obj in pool_if.network: - pool = ipdata['name'] - return self._issue_api_request( - "access_network_ip_pools/{}".format(pool), - api_version='2.1')['path'] + pool = ipdata.name + return self.api.access_network_ip_pools.get(pool, tenant=tenant).path + # ==================== + # = Volume Migration = + # ==================== + + def _update_migrated_volume_2_1(self, context, volume, new_volume, + volume_status): + """Rename the newly created volume to the original volume. + + So we can find it correctly. + """ + tenant = self.get_tenant(new_volume['project_id']) + ai = self.cvol_to_ai(new_volume, tenant=tenant) + data = {'name': datc.get_name(volume)} + ai.set(tenant=tenant, **data) + return {'_name_id': None} + + @contextlib.contextmanager + def _offline_flip_2_1(self, volume): + reonline = False + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + if ai.admin_state == 'online': + reonline = True + ai.set(tenant=tenant, admin_state='offline') + yield + if reonline: + ai.set(tenant=tenant, admin_state='online') + + def _add_vol_meta_2_1(self, volume, connector=None): + if not self.do_metadata: + return + metadata = {'host': volume.get('host', ''), + 'display_name': datc.filter_chars( + volume.get('display_name', '')), + 'bootable': str(volume.get('bootable', False)), + 'availability_zone': volume.get('availability_zone', '')} + if connector: + metadata.update(connector) + LOG.debug("Adding volume metadata: %s", metadata) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + ai.metadata.set(tenant=tenant, **metadata) diff --git a/cinder/volume/drivers/datera/datera_api22.py b/cinder/volume/drivers/datera/datera_api22.py new file mode 100644 index 00000000000..cfd6c21af18 --- /dev/null +++ b/cinder/volume/drivers/datera/datera_api22.py @@ -0,0 +1,1175 @@ +# Copyright 2020 Datera +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import ipaddress +import math +import random +import time +import uuid + +import eventlet +from os_brick import exception as brick_exception +from oslo_log import log as logging +from oslo_serialization import jsonutils as json +from oslo_utils import importutils +from oslo_utils import units +import six + +from cinder import exception +from cinder.i18n import _ +from cinder.image import image_utils +from cinder import utils +import cinder.volume.drivers.datera.datera_common as datc +from cinder.volume import volume_types +from cinder.volume import volume_utils as volutils + +LOG = logging.getLogger(__name__) + +dexceptions = importutils.try_import('dfs_sdk.exceptions') + +API_VERSION = "2.2" + + +# The DateraAPI classes (2.1, 2.2) are enhanced by datera_common's lookup() +# decorator which generates members run-time. Therefore on the class we disable +# pylint's no-member check pylint: disable=no-member +class DateraApi(object): + + # ================= + # = Create Volume = + # ================= + + def _create_volume_2_2(self, volume): + policies = self._get_policies_for_resource(volume) + num_replicas = int(policies['replica_count']) + storage_name = 'storage-1' + volume_name = 'volume-1' + template = policies['template'] + placement = policies['placement_mode'] + ppolicy = policies['placement_policy'] + ip_pool = datc.get_ip_pool(policies) + + name = datc.get_name(volume) + + if template: + app_params = ( + { + 'create_mode': 'openstack', + # 'uuid': str(volume['id']), + 'name': name, + 'app_template': {'path': '/app_templates/{}'.format( + template)} + }) + if self._support_template_override_2_2(): + app_params['template_override'] = { + 'storage_instances': { + storage_name: { + 'volumes': { + volume_name: { + 'size': str(volume['size'])}}}}} + + else: + + app_params = ( + { + 'create_mode': 'openstack', + 'uuid': str(volume['id']), + 'name': name, + 'access_control_mode': 'deny_all', + 'storage_instances': [ + { + 'name': storage_name, + 'ip_pool': {'path': ('/access_network_ip_pools/' + '{}'.format(ip_pool))}, + 'volumes': [ + { + 'name': volume_name, + 'size': volume['size'], + 'replica_count': num_replicas, + 'snapshot_policies': [ + ] + } + ] + } + ] + }) + create_vol = app_params['storage_instances'][0]['volumes'][0] + if datc.dat_version_gte(self.datera_version, '3.3.0.0'): + create_vol['placement_policy'] = { + 'path': '/placement_policies/{}'.format(ppolicy)} + else: + create_vol['placement_mode'] = placement + + tenant = self.create_tenant(volume['project_id']) + self.api.app_instances.create(tenant=tenant, **app_params) + self._update_qos_2_2(volume, policies) + self._add_vol_meta_2_2(volume) + + # ================= + # = Extend Volume = + # ================= + + def _extend_volume_2_2(self, volume, new_size): + if volume['size'] >= new_size: + LOG.warning("Volume size not extended due to original size being " + "greater or equal to new size. Original: " + "%(original)s, New: %(new)s", + {'original': volume['size'], + 'new': new_size}) + return + policies = self._get_policies_for_resource(volume) + template = policies['template'] + if template and not self._support_template_override_2_2(): + LOG.warning("Volume size not extended due to template binding. " + "Template override is supported in product versions " + "3.3.X+: volume: %(volume)s, template: %(template)s", + {'volume': volume, 'template': template}) + return + + with self._offline_flip_2_2(volume): + # Change Volume Size + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant) + dvol.set(tenant=tenant, size=new_size) + + # ================= + # = Cloned Volume = + # ================= + + def _create_cloned_volume_2_2(self, volume, src_vref): + tenant = self.get_tenant(volume['project_id']) + sdvol = self.cvol_to_dvol(src_vref, tenant=tenant) + src = sdvol.path + data = { + 'create_mode': 'openstack', + 'name': datc.get_name(volume), + 'uuid': str(volume['id']), + 'clone_volume_src': {'path': src}, + } + tenant = self.get_tenant(volume['project_id']) + self.api.app_instances.create(tenant=tenant, **data) + + if volume['size'] > src_vref['size']: + self._extend_volume_2_2(volume, volume['size']) + self._add_vol_meta_2_2(volume) + + # ================= + # = Delete Volume = + # ================= + + def _delete_volume_2_2(self, volume): + try: + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + + # Clear out ACL + acl = si.acl_policy.get(tenant=tenant) + acl.set(tenant=tenant, initiators=[]) + + # Bring volume offline + data = { + 'admin_state': 'offline', + 'force': True + } + ai.set(tenant=tenant, **data) + + ai.delete(tenant=tenant, force=True) + except exception.NotFound: + msg = ("Tried to delete volume %s, but it was not found in the " + "Datera cluster. Continuing with delete.") + LOG.info(msg, datc.get_name(volume)) + + # ================= + # = Ensure Export = + # ================= + + def _ensure_export_2_2(self, context, volume, connector=None): + pass + + # ========================= + # = Initialize Connection = + # ========================= + + def _initialize_connection_2_2(self, volume, connector): + # Now online the app_instance (which will online all storage_instances) + multipath = connector.get('multipath', False) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + data = { + 'admin_state': 'online' + } + ai.set(tenant=tenant, **data) + si = ai.storage_instances.list(tenant=tenant)[0] + + # randomize portal chosen + choice = 0 + policies = self._get_policies_for_resource(volume) + if policies["round_robin"]: + choice = random.randint(0, 1) + portal = si.access['ips'][choice] + ':3260' + iqn = si.access['iqn'] + if multipath: + portals = [p + ':3260' for p in si.access['ips']] + iqns = [iqn for _ in si.access['ips']] + lunids = [self._get_lunid() for _ in si.access['ips']] + + result = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': False, + 'target_iqn': iqn, + 'target_iqns': iqns, + 'target_portal': portal, + 'target_portals': portals, + 'target_lun': self._get_lunid(), + 'target_luns': lunids, + 'volume_id': volume['id'], + 'discard': False}} + else: + result = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': False, + 'target_iqn': iqn, + 'target_portal': portal, + 'target_lun': self._get_lunid(), + 'volume_id': volume['id'], + 'discard': False}} + + if self.use_chap_auth: + result['data'].update( + auth_method="CHAP", + auth_username=self.chap_username, + auth_password=self.chap_password) + + return result + + # ================= + # = Create Export = + # ================= + + def _create_export_2_2(self, context, volume, connector): + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + data = { + 'admin_state': 'offline', + 'force': True + } + ai.set(tenant=tenant, **data) + si = ai.storage_instances.list(tenant=tenant)[0] + policies = self._get_policies_for_resource(volume) + if connector and connector.get('ip'): + # Case where volume_type has non default IP Pool info + ip_pool = datc.get_ip_pool(policies) + if ip_pool != 'default': + initiator_ip_pool_path = self.api.access_network_ip_pools.get( + ip_pool).path + # Fallback to trying reasonable IP based guess + else: + initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_2( + connector['ip'], tenant) + + ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} + if not ai.app_template["path"]: + si.set(tenant=tenant, **ip_pool_data) + data = { + 'admin_state': 'online' + } + ai.set(tenant=tenant, **data) + # Check if we've already setup everything for this volume + storage_instances = ai.storage_instances.list(tenant=tenant) + # Handle adding initiator to product if necessary + # Then add initiator to ACL + if connector and connector.get('initiator'): + initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8]) + initiator = connector['initiator'] + dinit = None + try: + # We want to make sure the initiator is created under the + # current tenant rather than using the /root one + dinit = self.api.initiators.get(initiator, tenant=tenant) + if dinit.tenant != tenant: + raise dexceptions.ApiNotFoundError( + "Initiator {} was not found under tenant {} " + "[{} != {}]".format( + initiator, tenant, dinit.tenant, tenant)) + except dexceptions.ApiNotFoundError: + # TODO(_alastor_): Take out the 'force' flag when we fix + # DAT-15931 + data = {'id': initiator, 'name': initiator_name, 'force': True} + # Try and create the initiator + # If we get a conflict, ignore it + try: + dinit = self.api.initiators.create(tenant=tenant, **data) + except dexceptions.ApiConflictError: + pass + initiator_path = dinit['path'] + # Create ACL with initiator group as reference for each + # storage_instance in app_instance + # TODO(_alastor_): We need to avoid changing the ACLs if the + # template already specifies an ACL policy. + for si in storage_instances: + existing_acl = si.acl_policy.get(tenant=tenant) + data = {} + # Grabbing only the 'path' key from each existing initiator + # within the existing acl. eacli --> existing acl initiator + eacli = [] + for acl in existing_acl['initiators']: + nacl = {} + nacl['path'] = acl['path'] + eacli.append(nacl) + data['initiators'] = eacli + data['initiators'].append({"path": initiator_path}) + # Grabbing only the 'path' key from each existing initiator + # group within the existing acl. eaclig --> existing + # acl initiator group + eaclig = [] + for acl in existing_acl['initiator_groups']: + nacl = {} + nacl['path'] = acl['path'] + eaclig.append(nacl) + data['initiator_groups'] = eaclig + si.acl_policy.set(tenant=tenant, **data) + if self.use_chap_auth: + for si in storage_instances: + data = {'type': 'chap', + 'target_user_name': self.chap_username, + 'target_pswd': self.chap_password} + si.auth.set(tenant=tenant, **data) + # Check to ensure we're ready for go-time + self._si_poll_2_2(volume, si, tenant) + self._add_vol_meta_2_2(volume, connector=connector) + + # ================= + # = Detach Volume = + # ================= + + def _detach_volume_2_2(self, context, volume, attachment=None): + try: + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + # Clear out ACL for this specific attachment + si = ai.storage_instances.list(tenant=tenant)[0] + existing_acl = si.acl_policy.get(tenant=tenant) + data = {} + # Grabbing only the 'path' key from each existing initiator + # within the existing acl. eacli --> existing acl initiator + eacli = [] + for acl in existing_acl['initiators']: + if ( + attachment is not None + and attachment.connector is not None + and acl['path'].split('/')[-1] + == attachment.connector['initiator'] + ): + continue + nacl = {} + nacl['path'] = acl['path'] + eacli.append(nacl) + data['initiators'] = eacli + data['initiator_groups'] = existing_acl['initiator_groups'] + si.acl_policy.set(tenant=tenant, **data) + + if not eacli: + # bring the application instance offline if there + # are no initiators left. + data = { + 'admin_state': 'offline', + 'force': True + } + ai.set(tenant=tenant, **data) + + except exception.NotFound: + msg = ("Tried to detach volume %s, but it was not found in the " + "Datera cluster. Continuing with detach.") + LOG.info(msg, volume['id']) + + # =================== + # = Create Snapshot = + # =================== + + def _create_snapshot_2_2(self, snapshot): + + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) + snap_params = { + 'uuid': snapshot['id'], + } + snap = dvol.snapshots.create(tenant=tenant, **snap_params) + self._snap_poll_2_2(snap, tenant) + + # =================== + # = Delete Snapshot = + # =================== + + def _delete_snapshot_2_2(self, snapshot): + # Handle case where snapshot is "managed" + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) + + snapshots = None + + # Shortcut if this is a managed snapshot + provider_location = snapshot.get('provider_location') + if provider_location: + snap = dvol.snapshots.get(provider_location, tenant=tenant) + snap.delete(tenant=tenant) + return + + # Long-way. UUID identification + try: + snapshots = dvol.snapshots.list(tenant=tenant) + except exception.NotFound: + msg = ("Tried to delete snapshot %s, but parent volume %s was " + "not found in Datera cluster. Continuing with delete.") + LOG.info(msg, + datc.get_name(snapshot), + datc.get_name({'id': snapshot['volume_id']})) + return + + try: + for snap in snapshots: + if snap.uuid == snapshot['id']: + snap.delete(tenant=tenant) + break + else: + raise exception.NotFound + except exception.NotFound: + msg = ("Tried to delete snapshot %s, but was not found in " + "Datera cluster. Continuing with delete.") + LOG.info(msg, datc.get_name(snapshot)) + + # ======================== + # = Volume From Snapshot = + # ======================== + + def _create_volume_from_snapshot_2_2(self, volume, snapshot): + # Handle case where snapshot is "managed" + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + tenant = self.get_tenant(dummy_vol['project_id']) + dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) + found_snap = None + provider_location = snapshot.get('provider_location') + if provider_location: + found_snap = dvol.snapshots.get(provider_location, tenant=tenant) + else: + snapshots = dvol.snapshots.list(tenant=tenant) + for snap in snapshots: + if snap.uuid == snapshot['id']: + found_snap = snap + break + else: + raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) + + self._snap_poll_2_2(found_snap, tenant) + + src = found_snap.path + app_params = ( + { + 'create_mode': 'openstack', + 'uuid': str(volume['id']), + 'name': datc.get_name(volume), + 'clone_snapshot_src': {'path': src}, + }) + + self.api.app_instances.create(tenant=tenant, **app_params) + if (volume['size'] > snapshot['volume_size']): + self._extend_volume_2_2(volume, volume['size']) + self._add_vol_meta_2_2(volume) + + # ========== + # = Retype = + # ========== + + def _retype_2_2(self, ctxt, volume, new_type, diff, host): + LOG.debug("Retype called\n" + "Volume: %(volume)s\n" + "NewType: %(new_type)s\n" + "Diff: %(diff)s\n" + "Host: %(host)s\n", {'volume': volume, 'new_type': new_type, + 'diff': diff, 'host': host}) + # We'll take the fast route only if the types share the same backend + # And that backend matches this driver + old_pol = self._get_policies_for_resource(volume) + new_pol = self._get_policies_for_volume_type(new_type) + if (host['capabilities']['volume_backend_name'].lower() == + self.backend_name.lower()): + LOG.debug("Starting fast volume retype") + + if old_pol.get('template') or new_pol.get('template'): + LOG.warning( + "Fast retyping between template-backed volume-types " + "unsupported. Type1: %s, Type2: %s", + volume['volume_type_id'], new_type) + + self._update_qos_2_2(volume, new_pol, clear_old=True) + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + # Only replica_count ip_pool requires offlining the app_instance + if (new_pol['replica_count'] != old_pol['replica_count'] or + new_pol['ip_pool'] != old_pol['ip_pool']): + with self._offline_flip_2_2(volume): + # ip_pool is Storage Instance level + ai = self.cvol_to_ai(volume, tenant=tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + ip_pool = datc.get_ip_pool(new_pol) + si_params = ( + { + 'ip_pool': {'path': ('/access_network_ip_pools/' + '{}'.format(ip_pool))}, + }) + si.set(tenant=tenant, **si_params) + # placement_mode and replica_count are Volume level + vol_params = ( + { + 'placement_mode': new_pol['placement_mode'], + 'replica_count': new_pol['replica_count'], + }) + if datc.dat_version_gte(self.datera_version, '3.3.0.0'): + ppolicy = {'path': '/placement_policies/{}'.format( + new_pol.get('placement_policy'))} + vol_params['placement_policy'] = ppolicy + dvol.set(tenant=tenant, **vol_params) + elif (new_pol['placement_mode'] != old_pol[ + 'placement_mode'] or new_pol[ + 'placement_policy'] != old_pol['placement_policy']): + vol_params = ( + { + 'placement_mode': new_pol['placement_mode'], + }) + if datc.dat_version_gte(self.datera_version, '3.3.0.0'): + ppolicy = {'path': '/placement_policies/{}'.format( + new_pol.get('placement_policy'))} + vol_params['placement_policy'] = ppolicy + dvol.set(tenant=tenant, **vol_params) + self._add_vol_meta_2_2(volume) + return True + + else: + LOG.debug("Couldn't fast-retype volume between specified types") + return False + + # ========== + # = Manage = + # ========== + + def _manage_existing_2_2(self, volume, existing_ref): + # Only volumes created under the requesting tenant can be managed in + # the v2.1+ API. Eg. If tenant A is the tenant for the volume to be + # managed, it must also be tenant A that makes this request. + # This will be fixed in a later API update + existing_ref = existing_ref['source-name'] + app_inst_name, __, __, __ = datc._parse_vol_ref(existing_ref) + LOG.debug("Managing existing Datera volume %s " + "Changing name to %s", + datc.get_name(volume), existing_ref) + # Rename AppInstance + dummy_vol = {'id': app_inst_name, + 'project_id': volume['project_id']} + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(dummy_vol, tenant=tenant) + data = {'name': datc.get_name(volume)} + ai.set(tenant=tenant, **data) + self._add_vol_meta_2_2(volume) + + # =================== + # = Manage Get Size = + # =================== + + def _manage_existing_get_size_2_2(self, volume, existing_ref): + existing_ref = existing_ref['source-name'] + app_inst_name, storage_inst_name, vol_name, __ = datc._parse_vol_ref( + existing_ref) + dummy_vol = {'id': app_inst_name, + 'project_id': volume['project_id']} + dvol = self.cvol_to_dvol(dummy_vol) + return dvol.size + + # ========================= + # = Get Manageable Volume = + # ========================= + + def _list_manageable_2_2(self, cinder_volumes): + # Use the first volume to determine the tenant we're working under + if cinder_volumes: + tenant = self.get_tenant(cinder_volumes[0]['project_id']) + else: + tenant = None + app_instances = self.api.app_instances.list(tenant=tenant) + + results = [] + + if cinder_volumes and 'volume_id' in cinder_volumes[0]: + cinder_volume_ids = [vol['volume_id'] for vol in cinder_volumes] + else: + cinder_volume_ids = [vol['id'] for vol in cinder_volumes] + + for ai in app_instances: + ai_name = ai['name'] + reference = None + size = None + safe_to_manage = False + reason_not_safe = "" + cinder_id = None + extra_info = {} + (safe_to_manage, reason_not_safe, + cinder_id) = self._is_manageable_2_2( + ai, cinder_volume_ids, tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + si_name = si.name + vol = si.volumes.list(tenant=tenant)[0] + vol_name = vol.name + size = vol.size + snaps = [(snap.utc_ts, snap.uuid) + for snap in vol.snapshots.list(tenant=tenant)] + extra_info["snapshots"] = json.dumps(snaps) + reference = {"source-name": "{}:{}:{}".format( + ai_name, si_name, vol_name)} + + results.append({ + 'reference': reference, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info}) + return results + + def _get_manageable_volumes_2_2(self, cinder_volumes, marker, limit, + offset, sort_keys, sort_dirs): + LOG.debug("Listing manageable Datera volumes") + results = self._list_manageable_2_2(cinder_volumes) + page_results = volutils.paginate_entries_list( + results, marker, limit, offset, sort_keys, sort_dirs) + + return page_results + + def _is_manageable_2_2(self, ai, cinder_volume_ids, tenant): + cinder_id = None + ai_name = ai.name + match = datc.UUID4_RE.match(ai_name) + if match: + cinder_id = match.group(1) + if cinder_id and cinder_id in cinder_volume_ids: + return (False, + "App Instance already managed by Cinder", + cinder_id) + if len(ai.storage_instances.list(tenant=tenant)) == 1: + si = ai.storage_instances.list(tenant=tenant)[0] + if len(si['volumes']) == 1: + return (True, "", cinder_id) + return (False, + "App Instance has more than one storage instance or volume", + cinder_id) + # ============ + # = Unmanage = + # ============ + + def _unmanage_2_2(self, volume): + LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", + volume['id'], datc.get_unmanaged(volume['id'])) + data = {'name': datc.get_unmanaged(volume['id'])} + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + ai.set(tenant=tenant, **data) + + # =================== + # = Manage Snapshot = + # =================== + + def _manage_existing_snapshot_2_2(self, snapshot, existing_ref): + existing_ref = existing_ref['source-name'] + datc._check_snap_ref(existing_ref) + LOG.debug("Managing existing Datera volume snapshot %s for volume %s", + existing_ref, datc.get_name({'id': snapshot['volume_id']})) + return {'provider_location': existing_ref} + + def _manage_existing_snapshot_get_size_2_2(self, snapshot, existing_ref): + existing_ref = existing_ref['source-name'] + datc._check_snap_ref(existing_ref) + dummy_vol = {'id': snapshot['volume_id'], + 'project_id': snapshot['project_id']} + dvol = self.cvol_to_dvol(dummy_vol) + return dvol.size + + def _get_manageable_snapshots_2_2(self, cinder_snapshots, marker, limit, + offset, sort_keys, sort_dirs): + LOG.debug("Listing manageable Datera snapshots") + results = self._list_manageable_2_2(cinder_snapshots) + snap_results = [] + snapids = set((snap['id'] for snap in cinder_snapshots)) + snaprefs = set((snap.get('provider_location') + for snap in cinder_snapshots)) + for volume in results: + snaps = json.loads(volume["extra_info"]["snapshots"]) + for snapshot in snaps: + reference = snapshot[0] + uuid = snapshot[1] + size = volume["size"] + safe_to_manage = True + reason_not_safe = "" + cinder_id = "" + extra_info = {} + source_reference = volume["reference"] + if uuid in snapids or reference in snaprefs: + safe_to_manage = False + reason_not_safe = _("already managed by Cinder") + elif not volume['safe_to_manage'] and not volume['cinder_id']: + safe_to_manage = False + reason_not_safe = _("parent volume not safe to manage") + snap_results.append({ + 'reference': {'source-name': reference}, + 'size': size, + 'safe_to_manage': safe_to_manage, + 'reason_not_safe': reason_not_safe, + 'cinder_id': cinder_id, + 'extra_info': extra_info, + 'source_reference': source_reference}) + page_results = volutils.paginate_entries_list( + snap_results, marker, limit, offset, sort_keys, sort_dirs) + + return page_results + + def _unmanage_snapshot_2_2(self, snapshot): + return {'provider_location': None} + + # ==================== + # = Fast Image Clone = + # ==================== + + def _clone_image_2_2(self, context, volume, image_location, image_meta, + image_service): + # We're not going to fast image clone if the feature is not enabled + # and/or we can't reach the image being requested + if (not self.image_cache or + not self._image_accessible(context, volume, image_meta)): + return None, False + # Check to make sure we're working with a valid volume type + try: + found = volume_types.get_volume_type(context, self.image_type) + except (exception.VolumeTypeNotFound, exception.InvalidVolumeType): + found = None + if not found: + msg = "Invalid volume type: %s" + LOG.error(msg, self.image_type) + raise ValueError(_("Option datera_image_cache_volume_type_id must" + " be set to a valid volume_type id")) + # Check image format + fmt = image_meta.get('disk_format', '') + if fmt.lower() != 'raw': + LOG.debug("Image format is not RAW, image requires conversion " + "before clone. Image format: [%s]", fmt) + return None, False + + LOG.debug("Starting fast image clone") + # TODO(_alastor_): determine if Datera is already an image backend + # for this request and direct clone instead of caching + + # Dummy volume, untracked by Cinder + src_vol = {'id': image_meta['id'], + 'volume_type_id': self.image_type, + 'size': volume['size'], + 'project_id': volume['project_id']} + + # Determine if we have a cached version of the image + cached = self._vol_exists_2_2(src_vol) + + if cached: + tenant = self.get_tenant(src_vol['project_id']) + ai = self.cvol_to_ai(src_vol, tenant=tenant) + metadata = ai.metadata.get(tenant=tenant) + # Check to see if the master image has changed since we created + # The cached version + ts = self._get_vol_timestamp_2_2(src_vol) + mts = time.mktime(image_meta['updated_at'].timetuple()) + LOG.debug("Original image timestamp: %s, cache timestamp %s", + mts, ts) + # If the image is created by Glance, we'll trust that even if the + # timestamps don't match up, the data is ok to clone as it's not + # managed by this driver + if metadata.get('type') == 'image': + LOG.debug("Found Glance volume-backed image for %s", + src_vol['id']) + # If the master image time is greater than the volume creation + # time, we invalidate the cache and delete the volume. The + # exception is if the cached volume was created by Glance. We + # NEVER want to delete this volume. It's annotated with + # 'type': 'image' in the metadata, so we'll check for that + elif mts > ts and metadata.get('type') != 'image': + LOG.debug("Cache is older than original image, deleting cache") + cached = False + self._delete_volume_2_2(src_vol) + + # If we don't have the image, we'll cache it + if not cached: + LOG.debug("No image cache found for: %s, caching image", + image_meta['id']) + self._cache_vol_2_2(context, src_vol, image_meta, image_service) + + # Now perform the clone of the found image or newly cached image + self._create_cloned_volume_2_2(volume, src_vol) + # Force volume resize + vol_size = volume['size'] + volume['size'] = 0 + self._extend_volume_2_2(volume, vol_size) + volume['size'] = vol_size + # Determine if we need to retype the newly created volume + vtype_id = volume.get('volume_type_id') + if vtype_id and self.image_type and vtype_id != self.image_type: + vtype = volume_types.get_volume_type(context, vtype_id) + LOG.debug("Retyping newly cloned volume from type: %s to type: %s", + self.image_type, vtype_id) + diff, discard = volume_types.volume_types_diff( + context, self.image_type, vtype_id) + host = {'capabilities': {'vendor_name': self.backend_name}} + self._retype_2_2(context, volume, vtype, diff, host) + return None, True + + def _cache_vol_2_2(self, context, vol, image_meta, image_service): + image_id = image_meta['id'] + # Pull down image and determine if valid + with image_utils.TemporaryImages.fetch(image_service, + context, + image_id) as tmp_image: + data = image_utils.qemu_img_info(tmp_image) + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("fmt=%(fmt)s backed by:%(backing_file)s") + % {'fmt': fmt, 'backing_file': backing_file, }) + + vsize = int( + math.ceil(float(data.virtual_size) / units.Gi)) + vol['size'] = vsize + vtype = vol['volume_type_id'] + LOG.info("Creating cached image with volume type: %(vtype)s and " + "size %(size)s", {'vtype': vtype, 'size': vsize}) + self._create_volume_2_2(vol) + with self._connect_vol(context, vol) as device: + LOG.debug("Moving image %s to volume %s", + image_meta['id'], datc.get_name(vol)) + image_utils.convert_image(tmp_image, + device, + 'raw', + run_as_root=True) + LOG.debug("Finished moving image %s to volume %s", + image_meta['id'], datc.get_name(vol)) + data = image_utils.qemu_img_info(device, run_as_root=True) + if data.file_format != 'raw': + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_( + "Converted to %(vol_format)s, but format is " + "now %(file_format)s") % { + 'vol_format': 'raw', + 'file_format': data.file_format}) + # TODO(_alastor_): Remove this snapshot creation when we fix + # "created_at" attribute in the frontend + # We don't actually care about the snapshot uuid, we just want + # a single snapshot + snapshot = {'id': str(uuid.uuid4()), + 'volume_id': vol['id'], + 'project_id': vol['project_id']} + self._create_snapshot_2_2(snapshot) + metadata = {'type': 'cached_image'} + tenant = self.get_tenant(vol['project_id']) + ai = self.cvol_to_ai(vol, tenant=tenant) + ai.metadata.set(tenant=tenant, **metadata) + # Cloning offline AI is ~4 seconds faster than cloning online AI + self._detach_volume_2_2(None, vol) + + def _get_vol_timestamp_2_2(self, volume): + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + snapshots = dvol.snapshots.list(tenant=tenant) + if len(snapshots) == 1: + return float(snapshots[0].utc_ts) + else: + # We'll return 0 if we find no snapshots (or the incorrect number) + # to ensure the timestamp comparison with the master copy fails + # since the master copy will always have a timestamp > 0. + LOG.debug("Number of snapshots found: %s", len(snapshots)) + return 0 + + def _vol_exists_2_2(self, volume): + LOG.debug("Checking if volume %s exists", volume['id']) + try: + ai = self.cvol_to_ai(volume) + LOG.debug("Volume %s exists", volume['id']) + return ai + except exception.NotFound: + LOG.debug("Volume %s not found", volume['id']) + return None + + @contextlib.contextmanager + def _connect_vol(self, context, vol): + connector = None + try: + # Start connection, get the connector object and create the + # export (ACL, IP-Pools, etc) + conn = self._initialize_connection_2_2( + vol, {'multipath': False}) + connector = utils.brick_get_connector( + conn['driver_volume_type'], + use_multipath=False, + device_scan_attempts=10, + conn=conn) + connector_info = {'initiator': connector.get_initiator()} + self._create_export_2_2(None, vol, connector_info) + retries = 10 + attach_info = conn['data'] + while True: + try: + attach_info.update( + connector.connect_volume(conn['data'])) + break + except brick_exception.FailedISCSITargetPortalLogin: + retries -= 1 + if not retries: + LOG.error("Could not log into portal before end of " + "polling period") + raise + LOG.debug("Failed to login to portal, retrying") + eventlet.sleep(2) + device_path = attach_info['path'] + yield device_path + finally: + # Close target connection + if connector: + # Best effort disconnection + try: + connector.disconnect_volume(attach_info, attach_info) + except Exception: + pass + + # =========== + # = Polling = + # =========== + + def _snap_poll_2_2(self, snap, tenant): + eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) + TIMEOUT = 20 + retry = 0 + poll = True + while poll and not retry >= TIMEOUT: + retry += 1 + snap = snap.reload(tenant=tenant) + if snap.op_state == 'available': + poll = False + else: + eventlet.sleep(1) + if retry >= TIMEOUT: + raise exception.VolumeDriverException( + message=_('Snapshot not ready.')) + + def _si_poll_2_2(self, volume, si, tenant): + # Initial 4 second sleep required for some Datera versions + eventlet.sleep(datc.DEFAULT_SI_SLEEP) + TIMEOUT = 10 + retry = 0 + poll = True + while poll and not retry >= TIMEOUT: + retry += 1 + si = si.reload(tenant=tenant) + if si.op_state == 'available': + poll = False + else: + eventlet.sleep(1) + if retry >= TIMEOUT: + raise exception.VolumeDriverException( + message=_('Resource not ready.')) + + # ================ + # = Volume Stats = + # ================ + + def _get_volume_stats_2_2(self, refresh=False): + # cluster_stats is defined by datera_iscsi + # pylint: disable=access-member-before-definition + if refresh or not self.cluster_stats: + try: + LOG.debug("Updating cluster stats info.") + + results = self.api.system.get() + self.datera_version = results.sw_version + + if 'uuid' not in results: + LOG.error( + 'Failed to get updated stats from Datera Cluster.') + + stats = { + 'volume_backend_name': self.backend_name, + 'vendor_name': 'Datera', + 'driver_version': self.VERSION, + 'storage_protocol': 'iSCSI', + 'total_capacity_gb': ( + int(results.total_capacity) / units.Gi), + 'free_capacity_gb': ( + int(results.available_capacity) / units.Gi), + 'total_flash_capacity_gb': ( + int(results.all_flash_total_capacity) / units.Gi), + 'total_hybrid_capacity_gb': ( + int(results.hybrid_total_capacity) / units.Gi), + 'free_flash_capacity_gb': ( + int(results.all_flash_available_capacity) / units.Gi), + 'free_hybrid_capacity_gb': ( + int(results.hybrid_available_capacity) / units.Gi), + 'reserved_percentage': 0, + 'QoS_support': True, + 'compression': results.get('compression_enabled', False), + 'compression_ratio': results.get('compression_ratio', '0'), + 'l3_enabled': results.get('l3_enabled', False), + 'filter_function': self.filterf, + 'goodness_function': self.goodnessf + } + + self.cluster_stats = stats + except exception.DateraAPIException: + LOG.error('Failed to get updated stats from Datera cluster.') + return self.cluster_stats + + # ======= + # = QoS = + # ======= + + def _update_qos_2_2(self, volume, policies, clear_old=False): + tenant = self.get_tenant(volume['project_id']) + dvol = self.cvol_to_dvol(volume, tenant=tenant) + type_id = volume.get('volume_type_id', None) + if type_id is not None: + iops_per_gb = int(policies.get('iops_per_gb', 0)) + bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0)) + # Filter for just QOS policies in result. All of their keys + # should end with "max" + fpolicies = {k: int(v) for k, v in + policies.items() if k.endswith("max")} + # Filter all 0 values from being passed + fpolicies = {k: int(v) for k, v in + fpolicies.items() if v > 0} + # Calculate and set iops/gb and bw/gb, but only if they don't + # exceed total_iops_max and total_bw_max aren't set since they take + # priority + if iops_per_gb: + ipg = iops_per_gb * volume['size'] + # Not using zero, because zero means unlimited + im = fpolicies.get('total_iops_max', 1) + r = ipg + if ipg > im: + r = im + fpolicies['total_iops_max'] = r + if bandwidth_per_gb: + bpg = bandwidth_per_gb * volume['size'] + # Not using zero, because zero means unlimited + bm = fpolicies.get('total_bandwidth_max', 1) + r = bpg + if bpg > bm: + r = bm + fpolicies['total_bandwidth_max'] = r + if fpolicies or clear_old: + try: + pp = dvol.performance_policy.get(tenant=tenant) + pp.delete(tenant=tenant) + except dexceptions.ApiNotFoundError: + LOG.debug("No existing performance policy found") + if fpolicies: + dvol.performance_policy.create(tenant=tenant, **fpolicies) + + # ============ + # = IP Pools = + # ============ + + def _get_ip_pool_for_string_ip_2_2(self, ip, tenant): + """Takes a string ipaddress and return the ip_pool API object dict """ + pool = 'default' + ip_obj = ipaddress.ip_address(six.text_type(ip)) + ip_pools = self.api.access_network_ip_pools.list(tenant=tenant) + for ipdata in ip_pools: + for adata in ipdata['network_paths']: + if not adata.get('start_ip'): + continue + pool_if = ipaddress.ip_interface( + "/".join((adata['start_ip'], str(adata['netmask'])))) + if ip_obj in pool_if.network: + pool = ipdata.name + return self.api.access_network_ip_pools.get(pool, tenant=tenant).path + # ==================== + # = Volume Migration = + # ==================== + + def _update_migrated_volume_2_2(self, context, volume, new_volume, + volume_status): + """Rename the newly created volume to the original volume. + + So we can find it correctly. + """ + tenant = self.get_tenant(new_volume['project_id']) + ai = self.cvol_to_ai(new_volume, tenant=tenant) + data = {'name': datc.get_name(volume)} + ai.set(tenant=tenant, **data) + return {'_name_id': None} + + @contextlib.contextmanager + def _offline_flip_2_2(self, volume): + reonline = False + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + if ai.admin_state == 'online': + reonline = True + ai.set(tenant=tenant, admin_state='offline') + yield + if reonline: + ai.set(tenant=tenant, admin_state='online') + + def _add_vol_meta_2_2(self, volume, connector=None): + if not self.do_metadata: + return + metadata = {'host': volume.get('host', ''), + 'display_name': datc.filter_chars( + volume.get('display_name', '')), + 'bootable': str(volume.get('bootable', False)), + 'availability_zone': volume.get('availability_zone', '')} + if connector: + metadata.update(connector) + LOG.debug("Adding volume metadata: %s", metadata) + tenant = self.get_tenant(volume['project_id']) + ai = self.cvol_to_ai(volume, tenant=tenant) + ai.metadata.set(tenant=tenant, **metadata) + + def _support_template_override_2_2(self): + # Getting the whole api schema is expensive + # so we only want to do this once per driver + # instantiation. + if not self.template_override: + return False + if not hasattr(self, '_to_22'): + api = self.api.api.get() + prop = api['/app_instances']['create']['bodyParamSchema'][ + 'properties'] + self._to_22 = 'template_override' in prop + return self._to_22 diff --git a/cinder/volume/drivers/datera/datera_common.py b/cinder/volume/drivers/datera/datera_common.py index bd008dd3806..a9ec14065be 100644 --- a/cinder/volume/drivers/datera/datera_common.py +++ b/cinder/volume/drivers/datera/datera_common.py @@ -1,4 +1,4 @@ -# Copyright 2017 Datera +# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,228 +14,138 @@ # under the License. import functools -import json +import random import re +import string import time import types import uuid -import eventlet +from glanceclient import exc as glance_exc from oslo_log import log as logging -import requests -import six -from six.moves import http_client +from oslo_utils import importutils from cinder import context from cinder import exception from cinder.i18n import _ +from cinder.image import glance from cinder.volume import qos_specs from cinder.volume import volume_types - LOG = logging.getLogger(__name__) -OS_PREFIX = "OS-" -UNMANAGE_PREFIX = "UNMANAGED-" + +dfs_sdk = importutils.try_import('dfs_sdk') + +OS_PREFIX = "OS" +UNMANAGE_PREFIX = "UNMANAGED" # Taken from this SO post : # http://stackoverflow.com/a/18516125 # Using old-style string formatting because of the nature of the regex # conflicting with new-style curly braces -UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]" - "[a-f0-9]{3}-?[a-f0-9]{12}") +UUID4_STR_RE = ("%s.*([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]" + "[a-f0-9]{3}-?[a-f0-9]{12})") UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX) +SNAP_RE = re.compile(r"\d{10,}\.\d+") # Recursive dict to assemble basic url structure for the most common # API URL endpoints. Most others are constructed from these -URL_TEMPLATES = { - 'ai': lambda: 'app_instances', - 'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'), - 'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'), - 'si_inst': lambda storage_name: ( - (URL_TEMPLATES['si']() + '/{}').format( - '{}', storage_name)), - 'vol': lambda storage_name: ( - (URL_TEMPLATES['si_inst'](storage_name) + '/volumes')), - 'vol_inst': lambda storage_name, volume_name: ( - (URL_TEMPLATES['vol'](storage_name) + '/{}').format( - '{}', volume_name)), - 'at': lambda: 'app_templates/{}'} - DEFAULT_SI_SLEEP = 1 DEFAULT_SI_SLEEP_API_2 = 5 DEFAULT_SNAP_SLEEP = 1 -INITIATOR_GROUP_PREFIX = "IG-" -API_VERSIONS = ["2", "2.1"] +API_VERSIONS = ["2.1", "2.2"] API_TIMEOUT = 20 -############### -# METADATA KEYS -############### - -M_TYPE = 'cinder_volume_type' -M_CALL = 'cinder_calls' -M_CLONE = 'cinder_clone_from' -M_MANAGED = 'cinder_managed' - -M_KEYS = [M_TYPE, M_CALL, M_CLONE, M_MANAGED] +VALID_CHARS = set(string.ascii_letters + string.digits + "-_.") class DateraAPIException(exception.VolumeBackendAPIException): message = _("Bad response from Datera API") -def _get_name(name): - return "".join((OS_PREFIX, name)) +def get_name(resource): + dn = resource.get('display_name') + cid = resource.get('id') + if dn: + dn = filter_chars(dn) + # Check to ensure the name is short enough to fit. Prioritize + # the prefix and Cinder ID, strip all invalid characters + nl = len(OS_PREFIX) + len(dn) + len(cid) + 2 + if nl >= 64: + dn = dn[:-(nl - 63)] + return "-".join((OS_PREFIX, dn, cid)) + return "-".join((OS_PREFIX, cid)) -def _get_unmanaged(name): - return "".join((UNMANAGE_PREFIX, name)) +def get_unmanaged(name): + return "-".join((UNMANAGE_PREFIX, name)) -def _authenticated(func): - """Ensure the driver is authenticated to make a request. - - In do_setup() we fetch an auth token and store it. If that expires when - we do API request, we'll fetch a new one. - """ - @functools.wraps(func) - def func_wrapper(driver, *args, **kwargs): - try: - return func(driver, *args, **kwargs) - except exception.NotAuthorized: - # Prevent recursion loop. After the driver arg is the - # resource_type arg from _issue_api_request(). If attempt to - # login failed, we should just give up. - if args[0] == 'login': - raise - - # Token might've expired, get a new one, try again. - driver.login() - return func(driver, *args, **kwargs) - return func_wrapper +def filter_chars(s): + if s: + return ''.join([c for c in s if c in VALID_CHARS]) + return s -def _api_lookup(func): - """Perform a dynamic API implementation lookup for a call - - Naming convention follows this pattern: - - # original_func(args) --> _original_func_X_?Y?(args) - # where X and Y are the major and minor versions of the latest - # supported API version - - # From the Datera box we've determined that it supports API - # versions ['2', '2.1'] - # This is the original function call - @_api_lookup - def original_func(arg1, arg2): - print("I'm a shim, this won't get executed!") - pass - - # This is the function that is actually called after determining - # the correct API version to use - def _original_func_2_1(arg1, arg2): - some_version_2_1_implementation_here() - - # This is the function that would be called if the previous function - # did not exist: - def _original_func_2(arg1, arg2): - some_version_2_implementation_here() - - # This function would NOT be called, because the connected Datera box - # does not support the 1.5 version of the API - def _original_func_1_5(arg1, arg2): - some_version_1_5_implementation_here() - """ +def lookup(func): @functools.wraps(func) def wrapper(*args, **kwargs): obj = args[0] - api_versions = _get_supported_api_versions(obj) - api_version = None - index = -1 - while True: - try: - api_version = api_versions[index] - except (IndexError, KeyError): - msg = _("No compatible API version found for this product: " - "api_versions -> %(api_version)s, %(func)s") - LOG.error(msg, api_version=api_version, func=func) - raise DateraAPIException(msg % { - 'api_version': api_version, 'func': func}) - # Py27 - try: - name = "_" + "_".join( - (func.func_name, api_version.replace(".", "_"))) - # Py3+ - except AttributeError: - name = "_" + "_".join( - (func.__name__, api_version.replace(".", "_"))) - try: - if obj.do_profile: - LOG.info("Trying method: %s", name) - call_id = uuid.uuid4() - LOG.debug("Profiling method: %s, id %s", name, call_id) - t1 = time.time() - obj.thread_local.trace_id = call_id - result = getattr(obj, name)(*args[1:], **kwargs) - if obj.do_profile: - t2 = time.time() - timedelta = round(t2 - t1, 3) - LOG.debug("Profile for method %s, id %s: %ss", - name, call_id, timedelta) - return result - except AttributeError as e: - # If we find the attribute name in the error message - # then we continue otherwise, raise to prevent masking - # errors - if name not in six.text_type(e): - raise - else: - LOG.info(e) - index -= 1 - except DateraAPIException as e: - if "UnsupportedVersionError" in six.text_type(e): - index -= 1 - else: - raise - + name = "_" + func.__name__ + "_" + obj.apiv.replace(".", "_") + LOG.debug("Trying method: %s", name) + call_id = uuid.uuid4() + if obj.do_profile: + LOG.debug("Profiling method: %s, id %s", name, call_id) + t1 = time.time() + obj.thread_local.trace_id = call_id + result = getattr(obj, name)(*args[1:], **kwargs) + if obj.do_profile: + t2 = time.time() + timedelta = round(t2 - t1, 3) + LOG.debug("Profile for method %s, id %s: %ss", + name, call_id, timedelta) + return result return wrapper -def _get_supported_api_versions(driver): - t = time.time() - if driver.api_cache and driver.api_timeout - t < API_TIMEOUT: - return driver.api_cache - driver.api_timeout = t + API_TIMEOUT - results = [] - host = driver.configuration.san_ip - port = driver.configuration.datera_api_port - client_cert = driver.configuration.driver_client_cert - client_cert_key = driver.configuration.driver_client_cert_key - cert_data = None - header = {'Content-Type': 'application/json; charset=utf-8', - 'Datera-Driver': 'OpenStack-Cinder-{}'.format(driver.VERSION)} - protocol = 'http' - if client_cert: - protocol = 'https' - cert_data = (client_cert, client_cert_key) +def _parse_vol_ref(ref): + if ref.count(":") not in (2, 3): + raise exception.ManageExistingInvalidReference( + _("existing_ref argument must be of this format: " + "tenant:app_inst_name:storage_inst_name:vol_name or " + "app_inst_name:storage_inst_name:vol_name")) try: - url = '%s://%s:%s/api_versions' % (protocol, host, port) - resp = driver._request(url, "get", None, header, cert_data) - data = resp.json() - results = [elem.strip("v") for elem in data['api_versions']] - except (DateraAPIException, KeyError): - # Fallback to pre-endpoint logic - for version in API_VERSIONS[0:-1]: - url = '%s://%s:%s/v%s' % (protocol, host, port, version) - resp = driver._request(url, "get", None, header, cert_data) - if ("api_req" in resp.json() or - str(resp.json().get("code")) == "99"): - results.append(version) - else: - LOG.error("No supported API versions available, " - "Please upgrade your Datera EDF software") - return results + (tenant, app_inst_name, storage_inst_name, + vol_name) = ref.split(":") + if tenant == "root": + tenant = None + except (TypeError, ValueError): + app_inst_name, storage_inst_name, vol_name = ref.split( + ":") + tenant = None + return app_inst_name, storage_inst_name, vol_name, tenant + + +def _check_snap_ref(ref): + if not SNAP_RE.match(ref): + raise exception.ManageExistingInvalidReference( + _("existing_ref argument must be of this format: " + "1234567890.12345678")) + return True + + +def _get_size(app_inst): + """Helper method for getting the size of a backend object + + If app_inst is provided, we'll just parse the dict to get + the size instead of making a separate http request + """ + if 'data' in app_inst: + app_inst = app_inst['data'] + sis = app_inst['storage_instances'] + found_si = sis[0] + found_vol = found_si['volumes'][0] + return found_vol['size'] def _get_volume_type_obj(driver, resource): @@ -251,16 +161,20 @@ def _get_volume_type_obj(driver, resource): def _get_policies_for_resource(driver, resource): + volume_type = driver._get_volume_type_obj(resource) + return driver._get_policies_for_volume_type(volume_type) + + +def _get_policies_for_volume_type(driver, volume_type): """Get extra_specs and qos_specs of a volume_type. This fetches the scoped keys from the volume type. Anything set from qos_specs will override key/values set from extra_specs. """ - volume_type = driver._get_volume_type_obj(resource) # Handle case of volume with no type. We still want the # specified defaults from above if volume_type: - specs = volume_type.get('extra_specs') + specs = volume_type.get('extra_specs', {}) else: specs = {} @@ -269,19 +183,19 @@ def _get_policies_for_resource(driver, resource): in driver._init_vendor_properties()[0].items()} if volume_type: - # Populate updated value - for key, value in specs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - policies[key] = value qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: ctxt = context.get_admin_context() qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] if qos_kvs: - policies.update(qos_kvs) + specs.update(qos_kvs) + # Populate updated value + for key, value in specs.items(): + if ':' in key: + fields = key.split(':') + key = fields[1] + policies[key] = value # Cast everything except booleans int that can be cast for k, v in policies.items(): # Handle String Boolean case @@ -296,199 +210,152 @@ def _get_policies_for_resource(driver, resource): return policies -# ================ -# = API Requests = -# ================ - -def _request(driver, connection_string, method, payload, header, cert_data): - LOG.debug("Endpoint for Datera API call: %s", connection_string) - LOG.debug("Payload for Datera API call: %s", payload) - try: - response = getattr(requests, method)(connection_string, - data=payload, headers=header, - verify=False, cert=cert_data) - return response - except requests.exceptions.RequestException as ex: - msg = _( - 'Failed to make a request to Datera cluster endpoint due ' - 'to the following reason: %s') % six.text_type( - ex.message) - LOG.error(msg) - raise DateraAPIException(msg) +def _image_accessible(driver, context, volume, image_meta): + # Determine if image is accessible by current project + pid = volume.get('project_id', '') + public = False + visibility = image_meta.get('visibility', None) + LOG.debug("Image %(image)s visibility: %(vis)s", + {"image": image_meta['id'], "vis": visibility}) + if visibility and visibility in ['public', 'community']: + public = True + elif visibility and visibility in ['shared', 'private']: + # Do membership check. Newton and before didn't have a 'shared' + # visibility option, so we have to do this check for 'private' + # as well + gclient = glance.get_default_image_service() + members = [] + # list_members is only available in Rocky+ + try: + members = gclient.list_members(context, image_meta['id']) + except AttributeError: + # This is the fallback method for the same query + try: + members = gclient._client.call(context, + 'list', + controller='image_members', + image_id=image_meta['id']) + except glance_exc.HTTPForbidden as e: + LOG.warning(e) + except glance_exc.HTTPForbidden as e: + LOG.warning(e) + members = list(members) + LOG.debug("Shared image %(image)s members: %(members)s", + {"image": image_meta['id'], "members": members}) + for member in members: + if (member['member_id'] == pid and + member['status'] == 'accepted'): + public = True + break + if image_meta.get('is_public', False): + public = True + else: + if image_meta.get('owner', '') == pid: + public = True + if not public: + LOG.warning("Requested image is not " + "accessible by current Project.") + return public -def _raise_response(driver, response): - msg = _('Request to Datera cluster returned bad status:' - ' %(status)s | %(reason)s') % { - 'status': response.status_code, - 'reason': response.reason} - LOG.error(msg) - raise DateraAPIException(msg) +def _format_tenant(tenant): + if tenant == "all" or (tenant and ('/root' in tenant or 'root' in tenant)): + return '/root' + elif tenant and ('/root' not in tenant and 'root' not in tenant): + return "/" + "/".join(('root', tenant)).strip('/') + return tenant -def _handle_bad_status(driver, - response, - connection_string, - method, - payload, - header, - cert_data, - sensitive=False, - conflict_ok=False): - if (response.status_code == http_client.BAD_REQUEST and - connection_string.endswith("api_versions")): - # Raise the exception, but don't log any error. We'll just fall - # back to the old style of determining API version. We make this - # request a lot, so logging it is just noise - raise DateraAPIException - if response.status_code == http_client.NOT_FOUND: - raise exception.NotFound(response.json()['message']) - elif response.status_code in [http_client.FORBIDDEN, - http_client.UNAUTHORIZED]: - raise exception.NotAuthorized() - elif response.status_code == http_client.CONFLICT and conflict_ok: - # Don't raise, because we're expecting a conflict - pass - elif response.status_code == http_client.SERVICE_UNAVAILABLE: - current_retry = 0 - while current_retry <= driver.retry_attempts: - LOG.debug("Datera 503 response, trying request again") - eventlet.sleep(driver.interval) - resp = driver._request(connection_string, - method, - payload, - header, - cert_data) - if resp.ok: - return response.json() - elif resp.status_code != http_client.SERVICE_UNAVAILABLE: - driver._raise_response(resp) +def get_ip_pool(policies): + ip_pool = policies['ip_pool'] + if ',' in ip_pool: + ip_pools = ip_pool.split(',') + ip_pool = random.choice(ip_pools) + return ip_pool + + +def create_tenant(driver, project_id): + if driver.tenant_id.lower() == 'map': + name = get_name({'id': project_id}) + elif driver.tenant_id: + name = driver.tenant_id.replace('root', '').strip('/') else: - driver._raise_response(response) + name = 'root' + if name: + try: + driver.api.tenants.create(name=name) + except dfs_sdk.exceptions.ApiConflictError: + LOG.debug("Tenant {} already exists".format(name)) + return _format_tenant(name) -@_authenticated -def _issue_api_request(driver, resource_url, method='get', body=None, - sensitive=False, conflict_ok=False, - api_version='2', tenant=None): - """All API requests to Datera cluster go through this method. +def get_tenant(driver, project_id): + if driver.tenant_id.lower() == 'map': + return _format_tenant(get_name({'id': project_id})) + elif not driver.tenant_id: + return _format_tenant('root') + return _format_tenant(driver.tenant_id) - :param resource_url: the url of the resource - :param method: the request verb - :param body: a dict with options for the action_type - :param sensitive: Bool, whether request should be obscured from logs - :param conflict_ok: Bool, True to suppress ConflictError exceptions - during this request - :param api_version: The Datera api version for the request - :param tenant: The tenant header value for the request (only applicable - to 2.1 product versions and later) - :returns: a dict of the response from the Datera cluster - """ - host = driver.configuration.san_ip - port = driver.configuration.datera_api_port - api_token = driver.datera_api_token - payload = json.dumps(body, ensure_ascii=False) - payload.encode('utf-8') +def cvol_to_ai(driver, resource, tenant=None): + if not tenant: + tenant = get_tenant(driver, resource['project_id']) + try: + # api.tenants.get needs a non '/'-prefixed tenant id + driver.api.tenants.get(tenant.strip('/')) + except dfs_sdk.exceptions.ApiNotFoundError: + create_tenant(driver, resource['project_id']) + cid = resource.get('id', None) + if not cid: + raise ValueError('Unsure what id key to use for object', resource) + ais = driver.api.app_instances.list( + filter='match(name,.*{}.*)'.format(cid), + tenant=tenant) + if not ais: + raise exception.VolumeNotFound(volume_id=cid) + return ais[0] - header = {'Content-Type': 'application/json; charset=utf-8'} - header.update(driver.HEADER_DATA) - protocol = 'http' - if driver.configuration.driver_use_ssl: - protocol = 'https' +def cvol_to_dvol(driver, resource, tenant=None): + if not tenant: + tenant = get_tenant(driver, resource['project_id']) + ai = cvol_to_ai(driver, resource, tenant=tenant) + si = ai.storage_instances.list(tenant=tenant)[0] + vol = si.volumes.list(tenant=tenant)[0] + return vol - if api_token: - header['Auth-Token'] = api_token - if tenant == "all": - header['tenant'] = tenant - elif tenant and '/root' not in tenant: - header['tenant'] = "".join(("/root/", tenant)) - elif tenant and '/root' in tenant: - header['tenant'] = tenant - elif driver.tenant_id and driver.tenant_id.lower() != "map": - header['tenant'] = driver.tenant_id +def _version_to_int(ver): + # Using a factor of 100 per digit so up to 100 versions are supported + # per major/minor/patch/subpatch digit in this calculation + # Example: + # In [2]: _version_to_int("3.3.0.0") + # Out[2]: 303000000 + # In [3]: _version_to_int("2.2.7.1") + # Out[3]: 202070100 + VERSION_DIGITS = 4 + factor = pow(10, VERSION_DIGITS * 2) + div = pow(10, 2) + val = 0 + for c in ver.split("."): + val += int(int(c) * factor) + factor /= div + return val - client_cert = driver.configuration.driver_client_cert - client_cert_key = driver.configuration.driver_client_cert_key - cert_data = None - if client_cert: - protocol = 'https' - cert_data = (client_cert, client_cert_key) - - connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port, - api_version, resource_url) - - request_id = uuid.uuid4() - - if driver.do_profile: - t1 = time.time() - if not sensitive: - LOG.debug("\nDatera Trace ID: %(tid)s\n" - "Datera Request ID: %(rid)s\n" - "Datera Request URL: /v%(api)s/%(url)s\n" - "Datera Request Method: %(method)s\n" - "Datera Request Payload: %(payload)s\n" - "Datera Request Headers: %(header)s\n", - {'tid': driver.thread_local.trace_id, - 'rid': request_id, - 'api': api_version, - 'url': resource_url, - 'method': method, - 'payload': payload, - 'header': header}) - response = driver._request(connection_string, - method, - payload, - header, - cert_data) - - data = response.json() - - timedelta = "Profiling disabled" - if driver.do_profile: - t2 = time.time() - timedelta = round(t2 - t1, 3) - if not sensitive: - LOG.debug("\nDatera Trace ID: %(tid)s\n" - "Datera Response ID: %(rid)s\n" - "Datera Response TimeDelta: %(delta)ss\n" - "Datera Response URL: %(url)s\n" - "Datera Response Payload: %(payload)s\n" - "Datera Response Object: %(obj)s\n", - {'tid': driver.thread_local.trace_id, - 'rid': request_id, - 'delta': timedelta, - 'url': response.url, - 'payload': payload, - 'obj': vars(response)}) - if not response.ok: - driver._handle_bad_status(response, - connection_string, - method, - payload, - header, - cert_data, - conflict_ok=conflict_ok) - - return data +def dat_version_gte(version_a, version_b): + return _version_to_int(version_a) >= _version_to_int(version_b) def register_driver(driver): - for func in [_get_supported_api_versions, - _get_volume_type_obj, + for func in [_get_volume_type_obj, _get_policies_for_resource, - _request, - _raise_response, - _handle_bad_status, - _issue_api_request]: - # PY27 + _get_policies_for_volume_type, + _image_accessible, + get_tenant, + create_tenant, + cvol_to_ai, + cvol_to_dvol]: f = types.MethodType(func, driver) - try: - setattr(driver, func.func_name, f) - # PY3+ - except AttributeError: - setattr(driver, func.__name__, f) + setattr(driver, func.__name__, f) diff --git a/cinder/volume/drivers/datera/datera_iscsi.py b/cinder/volume/drivers/datera/datera_iscsi.py index 9364a613585..b7ea0c92b74 100644 --- a/cinder/volume/drivers/datera/datera_iscsi.py +++ b/cinder/volume/drivers/datera/datera_iscsi.py @@ -1,4 +1,4 @@ -# Copyright 2017 Datera +# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,28 +19,34 @@ import uuid from eventlet.green import threading from oslo_config import cfg from oslo_log import log as logging +from oslo_utils import importutils import six from cinder import exception from cinder.i18n import _ +from cinder import interface from cinder import utils -from cinder.volume import configuration -import cinder.volume.drivers.datera.datera_api2 as api2 import cinder.volume.drivers.datera.datera_api21 as api21 +import cinder.volume.drivers.datera.datera_api22 as api22 import cinder.volume.drivers.datera.datera_common as datc from cinder.volume.drivers.san import san - LOG = logging.getLogger(__name__) +dfs_sdk = importutils.try_import('dfs_sdk') + d_opts = [ cfg.StrOpt('datera_api_port', default='7717', + deprecated_for_removal=True, help='Datera API port.'), cfg.StrOpt('datera_api_version', - default='2', + default='2.2', deprecated_for_removal=True, help='Datera API version.'), + cfg.StrOpt('datera_ldap_server', + default=None, + help='LDAP authentication server'), cfg.IntOpt('datera_503_timeout', default='120', help='Timeout for HTTP 503 retry messages'), @@ -58,25 +64,51 @@ d_opts = [ default=None, help="If set to 'Map' --> OpenStack project ID will be mapped " "implicitly to Datera tenant ID\n" - "If set to 'None' --> Datera tenant ID will not be used " + "If set to None --> Datera tenant ID will not be used " "during volume provisioning\n" "If set to anything else --> Datera tenant ID will be the " "provided value"), + cfg.BoolOpt('datera_enable_image_cache', + default=False, + help="Set to True to enable Datera backend image caching"), + cfg.StrOpt('datera_image_cache_volume_type_id', + default=None, + help="Cinder volume type id to use for cached volumes"), cfg.BoolOpt('datera_disable_profiler', default=False, help="Set to True to disable profiling in the Datera driver"), + cfg.BoolOpt('datera_disable_extended_metadata', + default=False, + help="Set to True to disable sending additional metadata to " + "the Datera backend"), + cfg.BoolOpt('datera_disable_template_override', + default=False, + help="Set to True to disable automatic template override of " + "the size attribute when creating from a template"), + cfg.DictOpt('datera_volume_type_defaults', + default={}, + help="Settings here will be used as volume-type defaults if " + "the volume-type setting is not provided. This can be " + "used, for example, to set a very low total_iops_max " + "value if none is specified in the volume-type to " + "prevent accidental overusage. Options are specified " + "via the following format, WITHOUT ANY 'DF:' PREFIX: " + "'datera_volume_type_defaults=" + "iops_per_gb:100,bandwidth_per_gb:200...etc'."), ] CONF = cfg.CONF CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') -CONF.register_opts(d_opts, group=configuration.SHARED_CONF_GROUP) +CONF.register_opts(d_opts) @six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): +@interface.volumedriver +class DateraDriver(san.SanISCSIDriver, api21.DateraApi, api22.DateraApi): + """The OpenStack Datera iSCSI volume driver. - """The OpenStack Datera Driver + .. code-block:: none Version history: * 1.0 - Initial driver @@ -91,21 +123,85 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): * 2.3.1 - Scalability bugfixes * 2.3.2 - Volume Placement, ACL multi-attach bugfix * 2.4.0 - Fast Retype Support + * 2.5.0 - Glance Image Caching, retyping/QoS bugfixes + * 2.6.0 - Api 2.2 support + * 2.6.1 - Glance interoperability fix + * 2.7.0 - IOPS/GB and BW/GB settings, driver level overrides + (API 2.1+ only) + * 2.7.2 - Allowing DF: QoS Spec prefix, QoS type leak bugfix + * 2.7.3 - Fixed bug in clone_image where size was not set correctly + * 2.7.4 - Fix for create_tenant incorrect API call + Temporary fix for DAT-15931 + * 2.7.5 - Removed "force" parameter from /initiators v2.1 API requests + * 2.8.0 - iops_per_gb and bandwidth_per_gb are now limited by + total_iops_max and total_bandwidth_max (API 2.1+ only) + Bugfix for cinder retype with online volume + * 2.8.1 - Bugfix for missing default dict during retype + * 2.8.2 - Updated most retype operations to not detach volume + * 2.8.3 - Bugfix for not allowing fast clones for shared/community + volumes + * 2.8.4 - Fixed missing API version pinning in _offline_flip + * 2.8.5 - Membership check for fast image cloning. Metadata API pinning + * 2.8.6 - Added LDAP support and CHAP support + * 2.8.7 - Bugfix for missing tenancy calls in offline_flip + * 2.9.0 - Volumes now correctly renamed during backend migration. + Implemented update_migrated_volume (API 2.1+ only), + Prevent non-raw image cloning + * 2.9.1 - Added extended metadata attributes during volume creation + and attachment. Added datera_disable_extended_metadata + option to disable it. + * 2.9.2 - Made ensure_export a no-op. Removed usage of + initiator-groups + * 2018.4.5.0 - Switch to new date-based versioning scheme. Removed v2 + API support + * 2018.4.17.1 - Bugfixes to IP Pools, Templates and Initiators + * 2018.4.25.0 - Snapshot Manage. List Manageable Snapshots support + * 2018.4.27.0 - Major driver revamp/restructure, no functionality + change + * 2018.5.1.0 - Bugfix for Map tenant auto-creation + * 2018.5.18.0 - Bugfix for None tenant handling + * 2018.6.7.0 - Bugfix for missing project_id during image clone + * 2018.7.13.0 - Massive update porting to use the Datera Python-SDK + * 2018.7.20.0 - Driver now includes display_name in created backend + app_instances. + * 2018.9.17.0 - Requirements and doc changes + * 2018.10.8.0 - Added extra_headers to Python-SDK constructor call. + This allows for the SDK to send the type of driver + performing each request along with the request. This + functionality existed before the Python-SDK revamp, so + this change adds the functionality back in. + * 2018.10.8.1 - Adding thread_local to Python-SDK constructor call. + This preserves trace_id in the logs + * 2018.10.30.0 - Adding template_override support. Added + datera_disable_template_override cfgOpt to disable + this feature. Updated required requests version to + >=2.20.0 because of a security vulnerability in + <=2.19.X. Added support for filter_function and + goodness_function. + * 2018.11.1.0 - Adding flash and hybrid capacity info to + get_volume_stats + * 2018.11.8.0 - Fixing bug that broke 2.2.X support + * 2018.11.14.0 - Bugfixes for v2.1 API support and unicode character + support + * 2019.1.24.0 - Python-SDK requirements update, README updates + * 2019.2.25.0 - Scalability fixes and utility script updates + * 2019.6.4.1 - Added Pypi packaging installation support + * 2019.12.10.0 - Python 3.x support, tox tests, CI ready, live + migration support, image cache, bugfixes. """ - VERSION = '2.4.0' + + VERSION = '2019.12.10.0' CI_WIKI_NAME = "datera-ci" HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)} - # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. - SUPPORTED = False - def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.username = self.configuration.san_login self.password = self.configuration.san_password + self.ldap = self.configuration.datera_ldap_server self.cluster_stats = {} self.datera_api_token = None self.interval = self.configuration.datera_503_interval @@ -113,23 +209,39 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug - self.datera_api_versions = [] if self.datera_debug: utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id + if self.tenant_id is None: + self.tenant_id = '' + self.defaults = self.configuration.datera_volume_type_defaults if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None + self.template_override = ( + not self.configuration.datera_disable_template_override) self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not self.configuration.datera_disable_profiler - self.thread_local = threading.local() + self.do_metadata = ( + not self.configuration.datera_disable_extended_metadata) + self.image_cache = self.configuration.datera_enable_image_cache + self.image_type = self.configuration.datera_image_cache_volume_type_id + self.thread_local = threading.local() # pylint: disable=no-member + self.datera_version = None + self.apiv = None + self.api = None + self.filterf = self.get_filter_function() + self.goodnessf = self.get_goodness_function() + + self.use_chap_auth = self.configuration.use_chap_auth + self.chap_username = self.configuration.chap_username + self.chap_password = self.configuration.chap_password backend_name = self.configuration.safe_get( 'volume_backend_name') self.backend_name = backend_name or 'Datera' - datc.register_driver(self) def do_setup(self, context): @@ -142,8 +254,25 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): LOG.error(msg) raise exception.InvalidInput(msg) - self.login() - self._create_tenant() + # Try each valid api version starting with the latest until we find + # one that works + for apiv in reversed(datc.API_VERSIONS): + try: + api = dfs_sdk.get_api(self.configuration.san_ip, + self.username, + self.password, + 'v{}'.format(apiv), + disable_log=True, + extra_headers=self.HEADER_DATA, + thread_local=self.thread_local, + ldap_server=self.ldap) + system = api.system.get() + LOG.debug('Connected successfully to cluster: %s', system.name) + self.api = api + self.apiv = apiv + break + except Exception as e: + LOG.warning(e) # ================= @@ -151,7 +280,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Create Volume = # ================= - @datc._api_lookup + @datc.lookup def create_volume(self, volume): """Create a logical volume.""" pass @@ -160,7 +289,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Extend Volume = # ================= - @datc._api_lookup + @datc.lookup def extend_volume(self, volume, new_size): pass @@ -170,7 +299,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Cloned Volume = # ================= - @datc._api_lookup + @datc.lookup def create_cloned_volume(self, volume, src_vref): pass @@ -178,7 +307,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Delete Volume = # ================= - @datc._api_lookup + @datc.lookup def delete_volume(self, volume): pass @@ -186,7 +315,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Ensure Export = # ================= - @datc._api_lookup + @datc.lookup def ensure_export(self, context, volume, connector=None): """Gets the associated account, retrieves CHAP info and updates.""" @@ -194,7 +323,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Initialize Connection = # ========================= - @datc._api_lookup + @datc.lookup def initialize_connection(self, volume, connector): pass @@ -202,7 +331,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Create Export = # ================= - @datc._api_lookup + @datc.lookup def create_export(self, context, volume, connector): pass @@ -210,7 +339,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Detach Volume = # ================= - @datc._api_lookup + @datc.lookup def detach_volume(self, context, volume, attachment=None): pass @@ -218,7 +347,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Create Snapshot = # =================== - @datc._api_lookup + @datc.lookup def create_snapshot(self, snapshot): pass @@ -226,7 +355,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Delete Snapshot = # =================== - @datc._api_lookup + @datc.lookup def delete_snapshot(self, snapshot): pass @@ -234,7 +363,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Volume From Snapshot = # ======================== - @datc._api_lookup + @datc.lookup def create_volume_from_snapshot(self, volume, snapshot): pass @@ -242,12 +371,11 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Retype = # ========== - @datc._api_lookup + @datc.lookup def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. - :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to @@ -262,7 +390,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Manage = # ========== - @datc._api_lookup + @datc.lookup def manage_existing(self, volume, existing_ref): """Manage an existing volume on the Datera backend @@ -276,7 +404,6 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): (existing_ref['source-name'] == tenant:app_inst_name:storage_inst_name:vol_name) - if using Datera 2.1 API or @@ -292,11 +419,41 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): """ pass + @datc.lookup + def manage_existing_snapshot(self, snapshot, existing_ref): + """Brings an existing backend storage object under Cinder management. + + existing_ref is passed straight through from the API request's + manage_existing_ref value, and it is up to the driver how this should + be interpreted. It should be sufficient to identify a storage object + that the driver should somehow associate with the newly-created cinder + snapshot structure. + + There are two ways to do this: + + 1. Rename the backend storage object so that it matches the + snapshot['name'] which is how drivers traditionally map between a + cinder snapshot and the associated backend storage object. + + 2. Place some metadata on the snapshot, or somewhere in the backend, + that allows other driver requests (e.g. delete) to locate the + backend storage object when required. + + If the existing_ref doesn't make sense, or doesn't refer to an existing + backend storage object, raise a ManageExistingInvalidReference + exception. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + """ + pass + # =================== # = Manage Get Size = # =================== - @datc._api_lookup + @datc.lookup def manage_existing_get_size(self, volume, existing_ref): """Get the size of an unmanaged volume on the Datera backend @@ -316,20 +473,32 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): """ pass + @datc.lookup + def manage_existing_snapshot_get_size(self, snapshot, existing_ref): + """Return size of snapshot to be managed by manage_existing. + + When calculating the size, round up to the next GB. + + :param snapshot: Cinder volume snapshot to manage + :param existing_ref: Driver-specific information used to identify a + volume snapshot + :returns size: Volume snapshot size in GiB (integer) + """ + pass + # ========================= # = Get Manageable Volume = # ========================= - @datc._api_lookup + @datc.lookup def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - - reference (dictionary): The reference for a volume, which can be - passed to 'manage_existing'. + passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to @@ -353,11 +522,50 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): """ pass + # ============================ + # = Get Manageable Snapshots = + # ============================ + + @datc.lookup + def get_manageable_snapshots(self, cinder_snapshots, marker, limit, + offset, sort_keys, sort_dirs): + """List snapshots on the backend available for management by Cinder. + + Returns a list of dictionaries, each specifying a snapshot in the host, + with the following keys: + - reference (dictionary): The reference for a snapshot, which can be + passed to "manage_existing_snapshot". + - size (int): The size of the snapshot according to the storage + backend, rounded up to the nearest GB. + - safe_to_manage (boolean): Whether or not this snapshot is safe to + manage according to the storage backend. For example, is the snapshot + in use or invalid for any reason. + - reason_not_safe (string): If safe_to_manage is False, the reason why. + - cinder_id (string): If already managed, provide the Cinder ID. + - extra_info (string): Any extra information to return to the user + - source_reference (string): Similar to "reference", but for the + snapshot's source volume. + + :param cinder_snapshots: A list of snapshots in this host that Cinder + currently manages, used to determine if + a snapshot is manageable or not. + :param marker: The last item of the previous page; we return the + next results after this value (after sorting) + :param limit: Maximum number of items to return + :param offset: Number of items to skip after marker + :param sort_keys: List of keys to sort results by (valid keys are + 'identifier' and 'size') + :param sort_dirs: List of directions to sort by, corresponding to + sort_keys (valid directions are 'asc' and 'desc') + + """ + pass + # ============ # = Unmanage = # ============ - @datc._api_lookup + @datc.lookup def unmanage(self, volume): """Unmanage a currently managed volume in Cinder @@ -365,11 +573,43 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): """ pass + # ==================== + # = Fast Image Clone = + # ==================== + + @datc.lookup + def clone_image(self, context, volume, image_location, image_meta, + image_service): + """Clone an existing image volume.""" + pass + + # ==================== + # = Volume Migration = + # ==================== + + @datc.lookup + def update_migrated_volume(self, context, volume, new_volume, + volume_status): + """Return model update for migrated volume. + + Each driver implementing this method needs to be responsible for the + values of _name_id and provider_location. If None is returned or either + key is not set, it means the volume table does not need to change the + value(s) for the key(s). + The return format is {"_name_id": value, "provider_location": value}. + :param volume: The original volume that was migrated to this backend + :param new_volume: The migration volume object that was created on + this backend as part of the migration process + :param original_volume_status: The status of the original volume + :returns: model_update to update DB with any needed changes + """ + pass + # ================ # = Volume Stats = # ================ - @datc._api_lookup + @datc.lookup def get_volume_stats(self, refresh=False): """Get volume stats. @@ -384,31 +624,10 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): # = Login = # ========= - @datc._api_lookup + @datc.lookup def login(self): pass - # ======= - # = QoS = - # ======= - - def _update_qos(self, resource, policies): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - fpolicies = {k: int(v) for k, v in - policies.items() if k.endswith("max")} - # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) - if fpolicies: - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2') - def _get_lunid(self): return 0 @@ -447,18 +666,62 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): prefix: DF --> Datera Fabric """ + LOG.debug("Using the following volume-type defaults: %s", + self.defaults) properties = {} + self._set_property( + properties, + "DF:iops_per_gb", + "Datera IOPS Per GB Setting", + _("Setting this value will calculate IOPS for each volume of " + "this type based on their size. Eg. A setting of 100 will " + "give a 1 GB volume 100 IOPS, but a 10 GB volume 1000 IOPS. " + "A setting of '0' is unlimited. This value is applied to " + "total_iops_max and will be overridden by total_iops_max if " + "iops_per_gb is set and a large enough volume is provisioned " + "which would exceed total_iops_max"), + "integer", + minimum=0, + default=int(self.defaults.get('iops_per_gb', 0))) + + self._set_property( + properties, + "DF:bandwidth_per_gb", + "Datera Bandwidth Per GB Setting", + _("Setting this value will calculate bandwidth for each volume of " + "this type based on their size in KiB/s. Eg. A setting of 100 " + "will give a 1 GB volume 100 KiB/s bandwidth, but a 10 GB " + "volume 1000 KiB/s bandwidth. A setting of '0' is unlimited. " + "This value is applied to total_bandwidth_max and will be " + "overridden by total_bandwidth_max if set and a large enough " + "volume is provisioned which woudl exceed total_bandwidth_max"), + "integer", + minimum=0, + default=int(self.defaults.get('bandwidth_per_gb', 0))) + self._set_property( properties, "DF:placement_mode", - "Datera Volume Placement", - _("'single_flash' for single-flash-replica placement, " + "Datera Volume Placement Mode (deprecated)", + _("'DEPRECATED: PLEASE USE 'placement_policy' on 3.3.X+ versions " + " of the Datera product. 'single_flash' for " + "single-flash-replica placement, " "'all_flash' for all-flash-replica placement, " "'hybrid' for hybrid placement"), "string", - default="hybrid") + default=self.defaults.get('placement_mode', 'hybrid')) + + self._set_property( + properties, + "DF:placement_policy", + "Datera Volume Placement Policy", + _("Valid path to a media placement policy. Example: " + "/placement_policies/all-flash"), + "string", + default=self.defaults.get('placement_policy', + 'default')) self._set_property( properties, @@ -466,7 +729,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "Datera Round Robin Portals", _("True to round robin the provided portals for a target"), "boolean", - default=False) + default="True" == self.defaults.get('round_robin', "False")) if self.configuration.get('datera_debug_replica_count_override'): replica_count = 1 @@ -480,24 +743,20 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "increased once volume is created"), "integer", minimum=1, - default=replica_count) - - self._set_property( - properties, - "DF:acl_allow_all", - "Datera ACL Allow All", - _("True to set acl 'allow_all' on volumes created. Cannot be " - "changed on volume once set"), - "boolean", - default=False) + default=int(self.defaults.get('replica_count', replica_count))) self._set_property( properties, "DF:ip_pool", "Datera IP Pool", - _("Specifies IP pool to use for volume"), + _("Specifies IP pool to use for volume. If provided string " + "contains commas, it will be split on the commas and each " + "substring will be uses as a separate IP pool and the volume's " + "IP pool will be chosen randomly from the list. Example: " + "'my-ip-pool1,my-ip-pool2,my-ip-pool3', next attach " + "my-ip-pool2 was chosen randomly as the volume IP pool"), "string", - default="default") + default=self.defaults.get('ip_pool', 'default')) self._set_property( properties, @@ -505,7 +764,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "Datera Template", _("Specifies Template to use for volume provisioning"), "string", - default="") + default=self.defaults.get('template', '')) # ###### QoS Settings ###### # self._set_property( @@ -516,23 +775,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) - - self._set_property( - properties, - "DF:default_storage_name", - "Datera Default Storage Instance Name", - _("The name to use for storage instances created"), - "string", - default="storage-1") - - self._set_property( - properties, - "DF:default_volume_name", - "Datera Default Volume Name", - _("The name to use for volumes created"), - "string", - default="volume-1") + default=int(self.defaults.get('read_bandwidth_max', 0))) self._set_property( properties, @@ -542,7 +785,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) + default=int(self.defaults.get('write_bandwidth_max', 0))) self._set_property( properties, @@ -552,7 +795,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) + default=int(self.defaults.get('total_bandwidth_max', 0))) self._set_property( properties, @@ -562,7 +805,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) + default=int(self.defaults.get('read_iops_max', 0))) self._set_property( properties, @@ -572,7 +815,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) + default=int(self.defaults.get('write_iops_max', 0))) self._set_property( properties, @@ -582,7 +825,7 @@ class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): "use 0 for unlimited"), "integer", minimum=0, - default=0) + default=int(self.defaults.get('total_iops_max', 0))) # ###### End QoS Settings ###### # return properties, 'DF' diff --git a/driver-requirements.txt b/driver-requirements.txt index 0ecc04bad6f..578c6db5ff8 100644 --- a/driver-requirements.txt +++ b/driver-requirements.txt @@ -39,3 +39,6 @@ infi.dtypes.iqn # PSF # Storpool storpool>=4.0.0 # Apache-2.0 storpool.spopenstack>=2.2.1 # Apache-2.0 + +# Datera +dfs_sdk>=1.2.25 # Apache-2.0 diff --git a/lower-constraints.txt b/lower-constraints.txt index c9add1a5b81..2c736528923 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -175,3 +175,4 @@ purestorage==1.6.0 rsd-lib==1.1.0 storpool==4.0.0 storpool.spopenstack==2.2.1 +dfs_sdk==1.2.25 diff --git a/setup.cfg b/setup.cfg index 29ea5f314f1..60acb86dbe1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -132,4 +132,6 @@ rsd = storpool = storpool>=4.0.0 # Apache-2.0 storpool.spopenstack>=2.2.1 # Apache-2.0 +datera = + dfs_sdk>=1.2.25 # Apache-2.0