Merge "Initial commit for Yadro Tatlin.UNIFIED driver"

This commit is contained in:
Zuul 2022-07-28 10:26:46 +00:00 committed by Gerrit Code Review
commit 4e7d338b9a
16 changed files with 3305 additions and 0 deletions

View File

@ -186,6 +186,8 @@ from cinder.volume.drivers.windows import iscsi as \
cinder_volume_drivers_windows_iscsi
from cinder.volume.drivers.windows import smbfs as \
cinder_volume_drivers_windows_smbfs
from cinder.volume.drivers.yadro import tatlin_common as \
cinder_volume_drivers_yadro_tatlincommon
from cinder.volume.drivers.zadara import zadara as \
cinder_volume_drivers_zadara_zadara
from cinder.volume import manager as cinder_volume_manager
@ -411,6 +413,7 @@ def list_opts():
cinder_volume_drivers_vzstorage.vzstorage_opts,
cinder_volume_drivers_windows_iscsi.windows_opts,
cinder_volume_drivers_windows_smbfs.volume_opts,
cinder_volume_drivers_yadro_tatlincommon.tatlin_opts,
cinder_volume_drivers_zadara_zadara.common.zadara_opts,
cinder_volume_manager.volume_backend_opts,
cinder_volume_targets_spdknvmf.spdk_opts,

View File

@ -0,0 +1,452 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest import TestCase
import requests
from requests import codes
from cinder.exception import NotAuthorized
from cinder.exception import VolumeBackendAPIException
from cinder.tests.unit.fake_constants import VOLUME_NAME
from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \
DummyVolume
from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \
MockResponse
from cinder.volume.drivers.yadro import tatlin_api
from cinder.volume.drivers.yadro.tatlin_client import InitTatlinClient
from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI
from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon
from cinder.volume.drivers.yadro.tatlin_client import TatlinClientV23
from cinder.volume.drivers.yadro.tatlin_client import TatlinClientV25
from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException
RES_PORTS_RESP = [
{
"port": "fc20",
"port_status": "healthy",
"port_status_desc": "resource is available",
"running": [
"sp-0",
"sp-1"
],
"wwn": [
"10:00:14:52:90:00:03:10",
"10:00:14:52:90:00:03:90"
],
"lun": "scsi-lun-fc20-5",
"volume": "pty-vol-0d9627cb-c52e-49f1-878c-57c9bc3010c9",
"lun_index": "5"
}
]
ALL_HOSTS_RESP = [
{
"version": "d6a2d310d9adb16f0d24d5352b5c4837",
"id": "5e37d335-8fff-4aee-840a-34749301a16a",
"name": "victoria-fc",
"port_type": "fc",
"initiators": [
"21:00:34:80:0d:6b:aa:e3",
"21:00:34:80:0d:6b:aa:e2"
],
"tags": [],
"comment": "",
"auth": {}
}
]
RES_MAPPING_RESP = [
{
"resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"host_id": "5e37d335-8fff-4aee-840a-34749301a16a",
"mapped_lun_id": 1
}
]
POOL_LIST_RESPONCE = [
{
"id": "7e259486-deb8-4d11-8cb0-e2c5874aaa5e",
"name": "cinder-pool",
"status": "ready"
}
]
VOL_ID = 'cinder-volume-id'
ERROR_VOLUME = [
{
"ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"type": "block",
"poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d",
"size": 16106127360,
"maxModifySize": 95330557231104,
"status": "error",
}
]
READY_VOLUME = [
{
"ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"type": "block",
"poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d",
"size": 16106127360,
"maxModifySize": 95330557231104,
"status": "ready",
}
]
RESOURCE_INFORMATION = {
"ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"name": "res1",
"type": "block",
"poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508",
"size": 1073741824,
"maxModifySize": 5761094647808,
"status": "ready",
"stat": {
"used_capacity": 1073741824,
"mapped_blocks": 0,
"dedup_count": 0,
"reduction_ratio": 0
},
"lbaFormat": "4kn",
"volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"wwid": "naa.614529011650000c4000800000000004",
"lun_id": "4",
"cached": "true",
"rCacheMode": "enabled",
"wCacheMode": "enabled",
"ports": [
{
"port": "fc21",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:91",
"10:00:14:52:90:00:03:11"
],
"lun": "scsi-lun-fc21-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
},
{
"port": "fc20",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:10",
"10:00:14:52:90:00:03:90"
],
"lun": "scsi-lun-fc20-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
}
],
"volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a",
"blockSize": "4kn",
"replication": {
"is_enabled": False
}
}
ALL_HOST_GROUP_RESP = [
{
"version": "20c28d21549fb7ec5777637f72f50043",
"id": "314b5546-45da-4c8f-a24c-b615265fbc32",
"name": "cinder-group",
"host_ids": [
"5e37d335-8fff-4aee-840a-34749301a16a"
],
"tags": None,
"comment": ""
}
]
class TatlinClientTest(TestCase):
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
def setUp(self, auth_access):
self.access_api = TatlinAccessAPI('127.0.0.1', 443,
'user', 'passwd', False)
self.client = TatlinClientV25(self.access_api,
api_retry_count=1,
wait_interval=1,
wait_retry_count=1)
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
@mock.patch.object(TatlinAccessAPI, 'get_tatlin_version')
def test_different_client_versions(self, version, auth):
version.side_effect = [(2, 2), (2, 3), (2, 4), (2, 5), (3, 0)]
args = ['1.2.3.4', 443, 'username', 'password', True, 1, 1, 1]
self.assertIsInstance(InitTatlinClient(*args), TatlinClientV23)
self.assertIsInstance(InitTatlinClient(*args), TatlinClientV23)
self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25)
self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25)
self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25)
@mock.patch.object(requests, 'packages')
@mock.patch.object(requests, 'session')
def test_authenticate_success(self, session, packages):
session().post.return_value = MockResponse({'token': 'ABC'},
codes.ok)
TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', False)
session().post.assert_called_once_with(
'https://127.0.0.1:443/auth/login',
data={'user': 'user', 'secret': 'passwd'},
verify=False
)
session().headers.update.assert_any_call({'X-Auth-Token': 'ABC'})
TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True)
session().headers.update.assert_any_call({'X-Auth-Token': 'ABC'})
@mock.patch.object(requests, 'session')
def test_authenticate_fail(self, session):
session().post.return_value = MockResponse(
{}, codes.unauthorized)
self.assertRaises(NotAuthorized,
TatlinAccessAPI,
'127.0.0.1', 443, 'user', 'passwd', False)
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
@mock.patch.object(requests, 'session')
def test_send_request(self, session, auth):
session().request.side_effect = [
MockResponse({}, codes.ok),
MockResponse({}, codes.unauthorized),
MockResponse({}, codes.ok)]
access_api = TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True)
access_api.session = session()
access_api.send_request(tatlin_api.ALL_RESOURCES, {}, 'GET')
access_api.session.request.assert_called_once_with(
'GET',
'https://127.0.0.1:443/' + tatlin_api.ALL_RESOURCES,
json={},
verify=True
)
access_api.send_request(tatlin_api.ALL_RESOURCES, {}, 'GET')
self.assertEqual(auth.call_count, 2)
access_api.session.request.assert_called_with(
'GET',
'https://127.0.0.1:443/' + tatlin_api.ALL_RESOURCES,
json={},
verify=True
)
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_tatlin_version(self, send_request, auth):
send_request.return_value = MockResponse({'build-version': '2.3.0-44'},
codes.ok)
access_api = TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True)
self.assertEqual(access_api.get_tatlin_version(), (2, 3))
send_request.assert_called_once()
self.assertEqual(access_api.get_tatlin_version(), (2, 3))
send_request.assert_called_once()
@mock.patch.object(TatlinClientCommon, '_is_vol_on_host')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_add_volume_to_host(self,
send_request,
is_on_host):
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
# Success volume already on host
is_on_host.side_effect = [True]
self.client.add_vol_to_host(vol.name_id, 10)
send_request.assert_not_called()
# Success volume added
is_on_host.side_effect = [False, True]
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
self.client.add_vol_to_host(vol.name_id, 10)
# Error adding volume to host
is_on_host.side_effect = [False]
send_request.side_effect = [
TatlinAPIException(codes.internal_server_error, ''),
]
with self.assertRaises(TatlinAPIException):
self.client.add_vol_to_host(vol.name_id, 10)
# Added successfull but not on host
is_on_host.side_effect = [False, False]
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
with self.assertRaises(VolumeBackendAPIException):
self.client.add_vol_to_host(vol.name_id, 10)
@mock.patch.object(TatlinClientCommon, '_is_vol_on_host')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_remove_volume_to_host(self,
send_request,
is_on_host):
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
# Success volume not on host
is_on_host.side_effect = [False]
self.client.remove_vol_from_host(vol.name_id, 10)
send_request.assert_not_called()
# Success volume removed
is_on_host.side_effect = [True, False]
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
self.client.remove_vol_from_host(vol.name_id, 10)
# Remove from host rise an error
is_on_host.side_effect = [True, False]
send_request.side_effect = [
TatlinAPIException(codes.internal_server_error, ''),
]
with self.assertRaises(TatlinAPIException):
self.client.remove_vol_from_host(vol.name_id, 10)
# Removed successfull but still on host
is_on_host.side_effect = [True, True]
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
with self.assertRaises(VolumeBackendAPIException):
self.client.remove_vol_from_host(vol.name_id, 10)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_is_volume_exist_success(self, send_request):
send_request.side_effect = [
(MockResponse(RESOURCE_INFORMATION, codes.ok)),
]
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
result = self.client.is_volume_exists(vol.name_id)
self.assertTrue(result)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_is_volume_exist_not_found(self, send_request):
send_request.return_value = MockResponse(
RESOURCE_INFORMATION, codes.not_found)
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
result = self.client.is_volume_exists(vol.name_id)
self.assertFalse(result)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_is_volume_exist_unknown_error(self, send_request):
send_request.return_value = MockResponse(
{}, codes.internal_server_error)
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
with self.assertRaises(VolumeBackendAPIException):
self.client.is_volume_exists(vol.name_id)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_delete_volume(self, send_request):
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
# Success delete
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
self.client.delete_volume(vol.name_id)
# Volume does't exist
send_request.side_effect = [(MockResponse({}, 404)), ]
self.client.delete_volume(vol.name_id)
# Volume delete error
send_request.side_effect = [
(MockResponse({}, codes.internal_server_error)),
]
with self.assertRaises(TatlinAPIException):
self.client.delete_volume(vol.name_id)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_extend_volume(self, send_request):
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
# Success delete
send_request.side_effect = [(MockResponse({}, codes.ok)), ]
self.client.extend_volume(vol.name_id, 20000)
# Error
send_request.side_effect = [
(MockResponse({}, codes.internal_server_error)),
]
with self.assertRaises(VolumeBackendAPIException):
self.client.extend_volume(vol.name_id, 20000)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_is_volume_ready(self, send_request):
send_request.side_effect = [(MockResponse(READY_VOLUME, codes.ok)), ]
self.assertTrue(self.client.is_volume_ready(VOLUME_NAME))
send_request.side_effect = [
(MockResponse(ERROR_VOLUME, codes.ok))
]
self.assertFalse(self.client.is_volume_ready(VOLUME_NAME))
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_host_group_id_success(self, send_request):
send_request.return_value = MockResponse(
ALL_HOST_GROUP_RESP, codes.ok)
self.assertEqual(self.client.get_host_group_id('cinder-group'),
'314b5546-45da-4c8f-a24c-b615265fbc32')
@mock.patch.object(TatlinClientCommon,
'is_volume_exists',
return_value=True)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_resource_ports_array(self, send_request, *args):
send_request.return_value = MockResponse(RES_PORTS_RESP, codes.ok)
self.assertListEqual(self.client.get_resource_ports_array(VOL_ID),
["fc20"])
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_resource_mapping_negative(self, send_request):
send_request.return_value = MockResponse(
{}, codes.internal_server_error)
self.assertRaises(VolumeBackendAPIException,
self.client.get_resource_mapping)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_pool_id_by_name(self, send_request, *args):
send_request.return_value = MockResponse(POOL_LIST_RESPONCE, codes.ok)
self.assertEqual(self.client.get_pool_id_by_name('cinder-pool'),
'7e259486-deb8-4d11-8cb0-e2c5874aaa5e')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_all_hosts(self, send_request):
send_request.return_value = MockResponse({}, codes.ok)
self.client.get_all_hosts()
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_get_all_hosts_negative(self, send_request):
send_request.return_value = MockResponse(
{}, codes.internal_server_error)
self.assertRaises(VolumeBackendAPIException,
self.client.get_all_hosts)

View File

@ -0,0 +1,519 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest import TestCase
from cinder.cmd import manage as cinder_manage
from cinder.exception import ManageExistingInvalidReference
from cinder.exception import VolumeBackendAPIException
from cinder.tests.unit.fake_constants import VOLUME_NAME
from cinder.volume import configuration
from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI
from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon
from cinder.volume.drivers.yadro.tatlin_common import tatlin_opts
from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver
from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException
from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections
OSMGR_ISCSI_PORTS = [
{
"id": "ip-sp-1-98039b04091a",
"meta": {
"tatlin-node": "sp-1",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p30",
"physical-port": "p30",
"ipaddress": "172.20.101.65",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-0-b8599f1caf1b",
"meta": {
"tatlin-node": "sp-0",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p31",
"physical-port": "p31",
"ipaddress": "172.20.101.66",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-1-98039b04091b",
"meta": {
"tatlin-node": "sp-1",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p31",
"physical-port": "p31",
"ipaddress": "172.20.101.67",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-0-b8599f1caf1a",
"meta": {
"tatlin-node": "sp-0",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p30",
"physical-port": "p30",
"ipaddress": "172.20.101.64",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
]
ISCSI_PORT_PORTALS = {
'p30': ['172.20.101.65:3260', '172.20.101.64:3260'],
'p31': ['172.20.101.66:3260', '172.20.101.67:3260']
}
RES_MAPPING_RESP = [
{
"resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"host_id": "5e37d335-8fff-4aee-840a-34749301a16a",
"mapped_lun_id": 1
}
]
POOL_LIST_RESPONCE = [
{
"id": "7e259486-deb8-4d11-8cb0-e2c5874aaa5e",
"name": "cinder-pool",
"status": "ready"
}
]
OK_POOL_ID = '7e259486-deb8-4d11-8cb0-e2c5874aaa5e'
WRONG_POOL_ID = 'wrong-id'
ERROR_VOLUME = [
{
"ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"type": "block",
"poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d",
"size": 16106127360,
"maxModifySize": 95330557231104,
"status": "error",
}
]
READY_VOLUME = [
{
"ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"type": "block",
"poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d",
"size": 16106127360,
"maxModifySize": 95330557231104,
"status": "ready",
}
]
ONLINE_VOLUME = [
{
"ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a",
"type": "block",
"poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d",
"size": 16106127360,
"maxModifySize": 95330557231104,
"status": "online",
}
]
RESOURCE_INFORMATION = {
"ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"name": "res1",
"type": "block",
"poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508",
"size": 1073741824,
"maxModifySize": 5761094647808,
"status": "ready",
"stat": {
"used_capacity": 1073741824,
"mapped_blocks": 0,
"dedup_count": 0,
"reduction_ratio": 0
},
"lbaFormat": "4kn",
"volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"wwid": "naa.614529011650000c4000800000000004",
"lun_id": "4",
"cached": "true",
"rCacheMode": "enabled",
"wCacheMode": "enabled",
"ports": [
{
"port": "fc21",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:91",
"10:00:14:52:90:00:03:11"
],
"lun": "scsi-lun-fc21-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
},
{
"port": "fc20",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:10",
"10:00:14:52:90:00:03:90"
],
"lun": "scsi-lun-fc20-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
}
],
"volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a",
"blockSize": "4kn",
"replication": {
"is_enabled": False
}
}
POOL_NAME = 'cinder-pool'
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
class DummyVolume(object):
def __init__(self, volid, volsize=1):
self.id = volid
self._name_id = None
self.size = volsize
self.status = None
self.__volume_type_id = 1
self.attach_status = None
self.volume_attachment = None
self.provider_location = None
self.name = None
self.metadata = {}
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@property
def name(self):
return self.name_id
@property
def volume_type_id(self):
return self.__volume_type_id
@name_id.setter
def name_id(self, value):
self._name_id = value
@name.setter
def name(self, value):
self._name_id = value
@volume_type_id.setter
def volume_type_id(self, value):
self.__volume_type_id = value
def get_fake_tatlin_config():
config = configuration.Configuration(
tatlin_opts,
configuration.SHARED_CONF_GROUP)
config.san_ip = '127.0.0.1'
config.san_password = 'pwd'
config.san_login = 'admin'
config.pool_name = POOL_NAME
config.host_group = 'cinder-group'
config.tat_api_retry_count = 1
config.wait_interval = 1
config.wait_retry_count = 3
config.chap_username = 'chap_user'
config.chap_password = 'chap_passwd'
config.state_path = '/tmp'
return config
class TatlinCommonVolumeDriverTest(TestCase):
@mock.patch.object(TatlinVolumeConnections, 'create_store')
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
def setUp(self, auth_access, create_store):
access_api = TatlinAccessAPI('127.0.0.1', '443',
'user', 'passwd', False)
access_api._authenticate_access = MagicMock()
self.client = TatlinClientCommon(access_api,
api_retry_count=1,
wait_interval=1,
wait_retry_count=3)
self.driver = TatlinCommonVolumeDriver(
configuration=get_fake_tatlin_config())
self.driver._get_tatlin_client = MagicMock()
self.driver._get_tatlin_client.return_value = self.client
self.driver.do_setup(None)
@mock.patch.object(TatlinClientCommon, 'delete_volume')
@mock.patch.object(TatlinClientCommon, 'is_volume_exists')
def test_delete_volume_ok(self, is_volume_exist, delete_volume):
cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks',
group='oslo_concurrency')
is_volume_exist.side_effect = [True, False, False]
self.driver.delete_volume(DummyVolume(VOLUME_NAME))
@mock.patch.object(TatlinClientCommon, 'delete_volume')
@mock.patch.object(TatlinClientCommon, 'is_volume_exists')
def test_delete_volume_ok_404(self, is_volume_exist, delete_volume):
cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks',
group='oslo_concurrency')
is_volume_exist.side_effect = [False]
self.driver.delete_volume(DummyVolume(VOLUME_NAME))
@mock.patch.object(TatlinClientCommon, 'delete_volume')
@mock.patch.object(TatlinClientCommon, 'is_volume_exists')
def test_delete_volume_error_500(self, is_volume_exist, delete_volume):
cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks',
group='oslo_concurrency')
is_volume_exist.return_value = True
delete_volume.side_effect = TatlinAPIException(500, 'ERROR')
with self.assertRaises(VolumeBackendAPIException):
self.driver.delete_volume(DummyVolume(VOLUME_NAME))
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinClientCommon, 'is_volume_ready')
@mock.patch.object(TatlinClientCommon, 'extend_volume')
@mock.patch.object(TatlinClientCommon, 'is_volume_exists')
def test_extend_volume_ok(self,
is_volume_exist,
extend_volume,
is_volume_ready,
update_qos):
cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks',
group='oslo_concurrency')
is_volume_ready.return_value = True
is_volume_exist.return_value = True
self.driver.extend_volume(DummyVolume(VOLUME_NAME), 10)
@mock.patch('time.sleep')
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinClientCommon, 'is_volume_ready')
@mock.patch.object(TatlinClientCommon, 'extend_volume')
@mock.patch.object(TatlinClientCommon, 'is_volume_exists')
def test_extend_volume_error_not_ready(self,
is_volume_exist,
extend_volume,
is_volume_ready,
update_qos,
sleeper):
cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks',
group='oslo_concurrency')
is_volume_ready.return_value = False
is_volume_exist.return_value = True
with self.assertRaises(VolumeBackendAPIException):
self.driver.extend_volume(DummyVolume(VOLUME_NAME), 10)
@mock.patch.object(TatlinClientCommon,
'is_volume_ready',
return_value=True)
def test_wait_volume_reay_success(self, is_ready):
self.driver.wait_volume_ready(DummyVolume('cinder_volume'))
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_succeess_manage_existing(self, sendMock, qosMock):
sendMock.side_effect = [
(MockResponse([{'id': '1', 'poolId': OK_POOL_ID}], 200)),
(MockResponse(POOL_LIST_RESPONCE, 200))
]
self.driver.manage_existing(DummyVolume(VOLUME_NAME), {
'source-name': 'existing-resource'
})
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_fail_manage_existing_volume_not_found(self, sendMock):
self.driver.tatlin_api._send_request = Mock()
sendMock.side_effect = [
(MockResponse([{}], 404)),
]
with self.assertRaises(ManageExistingInvalidReference):
self.driver.manage_existing(DummyVolume('new-vol-id'), {
'source-name': 'existing-resource'
})
self.driver.tatlin_api.get_volume_info.assert_called_once()
self.driver.tatlin_api.get_pool_id_by_name.assert_not_called()
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_fail_manage_existing_wrong_pool(self, sendMock, qosMock):
sendMock.side_effect = [
(MockResponse([{'id': '1', 'poolId': WRONG_POOL_ID}], 200)),
(MockResponse(POOL_LIST_RESPONCE, 200))
]
with self.assertRaises(ManageExistingInvalidReference):
self.driver.manage_existing(DummyVolume('new-vol-id'), {
'source-name': 'existing-resource'
})
self.driver.tatlin_api.get_volume_info.assert_called_once()
self.driver.tatlin_api.get_pool_id_by_name.assert_called_once()
@mock.patch.object(TatlinClientCommon, 'get_resource_count')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_success_create_volume(self, send_requst, object_count):
self.driver._stats['overall_resource_count'] = 1
object_count.side_effect = [(1, 1)]
send_requst.side_effect = [
(MockResponse(POOL_LIST_RESPONCE, 200)), # Get pool id
(MockResponse({}, 200)), # Create volume
(MockResponse(READY_VOLUME, 200)), # Is volume ready
(MockResponse(READY_VOLUME, 200)) # Is volume ready
]
self.driver._update_qos = Mock()
self.driver.create_volume(DummyVolume(VOLUME_NAME))
@mock.patch.object(TatlinClientCommon, 'get_resource_count')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_fail_create_volume_400(self, send_request, object_count):
self.driver._stats['overall_resource_count'] = 1
object_count.side_effect = [(1, 1)]
send_request.side_effect = [
(MockResponse(POOL_LIST_RESPONCE, 200)),
(MockResponse({}, 500)),
(MockResponse({}, 400))
]
with self.assertRaises(VolumeBackendAPIException):
self.driver.create_volume(DummyVolume(VOLUME_NAME))
self.driver.tatlin_api.create_volume.assert_called_once()
@mock.patch('time.sleep')
@mock.patch.object(TatlinClientCommon, 'get_resource_count')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_fail_volume_not_ready_create_volume(self, sendMock,
volume_count, sleeper):
self.driver._stats['overall_resource_count'] = 1
volume_count.side_effect = [(1, 1)]
sendMock.side_effect = [
(MockResponse(POOL_LIST_RESPONCE, 200)),
(MockResponse({}, 200)),
(MockResponse(ERROR_VOLUME, 200)),
(MockResponse(ERROR_VOLUME, 200)),
(MockResponse(ERROR_VOLUME, 200)),
]
with self.assertRaises(VolumeBackendAPIException):
self.driver.create_volume(DummyVolume(VOLUME_NAME))
@mock.patch.object(TatlinCommonVolumeDriver, '_get_ports_portals')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_fail_create_export(self, sendMock, portsMock):
sendMock.side_effect = [
(MockResponse(OSMGR_ISCSI_PORTS, 200)),
]
portsMock.side_effect = [
ISCSI_PORT_PORTALS
]
self.driver._is_all_ports_assigned = Mock(return_value=True)
with self.assertRaises(NotImplementedError):
self.driver.create_export(None, DummyVolume(VOLUME_NAME), None)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_find_mapped_lun(self, sendMock):
sendMock.side_effect = [
(MockResponse(RES_MAPPING_RESP, 200)),
]
self.driver.find_current_host = Mock(
return_value='5e37d335-8fff-4aee-840a-34749301a16a')
self.driver._find_mapped_lun(
'62bbb941-ba4a-4101-927d-e527ce5ee011', '')
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinCommonVolumeDriver, 'wait_volume_online')
@mock.patch.object(TatlinClientCommon, 'add_vol_to_host')
@mock.patch.object(TatlinClientCommon,
'is_volume_exists',
return_value=True)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_add_volume_to_host(self,
*args):
vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011')
self.driver.add_volume_to_host(
vol, '5e37d335-8fff-4aee-840a-34749301a16a'
)

View File

@ -0,0 +1,338 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest import TestCase
from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \
MockResponse
from cinder.volume import configuration
from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI
from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon
from cinder.volume.drivers.yadro.tatlin_common import tatlin_opts
from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver
from cinder.volume.drivers.yadro.tatlin_iscsi import TatlinISCSIVolumeDriver
from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections
OSMGR_ISCSI_PORTS = [
{
"id": "ip-sp-1-98039b04091a",
"meta": {
"tatlin-node": "sp-1",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p30",
"physical-port": "p30",
"ipaddress": "172.20.101.65",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-0-b8599f1caf1b",
"meta": {
"tatlin-node": "sp-0",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p31",
"physical-port": "p31",
"ipaddress": "172.20.101.66",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-1-98039b04091b",
"meta": {
"tatlin-node": "sp-1",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p31",
"physical-port": "p31",
"ipaddress": "172.20.101.67",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
{
"id": "ip-sp-0-b8599f1caf1a",
"meta": {
"tatlin-node": "sp-0",
"type": "ip",
"port-type": "active"
},
"params": {
"dhcp": False,
"ifname": "p30",
"physical-port": "p30",
"ipaddress": "172.20.101.64",
"netmask": "24",
"mtu": "1500",
"gateway": "172.20.101.1",
"roles": "",
"iflabel": "",
"wwpn": ""
}
},
]
ISCSI_PORT_PORTALS = {
'p30': ['172.20.101.65:3260', '172.20.101.64:3260'],
'p31': ['172.20.101.66:3260', '172.20.101.67:3260']
}
RES_PORTS_RESP = [
{
"port": "fc20",
"port_status": "healthy",
"port_status_desc": "resource is available",
"running": [
"sp-0",
"sp-1"
],
"wwn": [
"10:00:14:52:90:00:03:10",
"10:00:14:52:90:00:03:90"
],
"lun": "scsi-lun-fc20-5",
"volume": "pty-vol-0d9627cb-c52e-49f1-878c-57c9bc3010c9",
"lun_index": "5"
}
]
ALL_HOSTS_RESP = [
{
"version": "d6a2d310d9adb16f0d24d5352b5c4837",
"id": "5e37d335-8fff-4aee-840a-34749301a16a",
"name": "victoria-fc",
"port_type": "fc",
"initiators": [
"21:00:34:80:0d:6b:aa:e3",
"21:00:34:80:0d:6b:aa:e2"
],
"tags": [],
"comment": "",
"auth": {}
}
]
RES_MAPPING_RESP = [
{
"resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"host_id": "5e37d335-8fff-4aee-840a-34749301a16a",
"mapped_lun_id": 1
}
]
RESOURCE_INFORMATION = {
"ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"id": "62bbb941-ba4a-4101-927d-e527ce5ee011",
"name": "res1",
"type": "block",
"poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508",
"size": 1073741824,
"maxModifySize": 5761094647808,
"status": "ready",
"stat": {
"used_capacity": 1073741824,
"mapped_blocks": 0,
"dedup_count": 0,
"reduction_ratio": 0
},
"lbaFormat": "4kn",
"volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"wwid": "naa.614529011650000c4000800000000004",
"lun_id": "4",
"cached": "true",
"rCacheMode": "enabled",
"wCacheMode": "enabled",
"ports": [
{
"port": "fc21",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:91",
"10:00:14:52:90:00:03:11"
],
"lun": "scsi-lun-fc21-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
},
{
"port": "fc20",
"port_status": "healthy",
"port_status_desc":
"resource is available on all storage controllers",
"running": [
"sp-1",
"sp-0"
],
"wwn": [
"10:00:14:52:90:00:03:10",
"10:00:14:52:90:00:03:90"
],
"lun": "scsi-lun-fc20-4",
"volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011",
"lun_index": "4"
}
],
"volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a",
"blockSize": "4kn",
"replication": {
"is_enabled": False
}
}
ALL_HOST_GROUP_RESP = [
{
"version": "20c28d21549fb7ec5777637f72f50043",
"id": "314b5546-45da-4c8f-a24c-b615265fbc32",
"name": "cinder-group",
"host_ids": [
"5e37d335-8fff-4aee-840a-34749301a16a"
],
"tags": None,
"comment": ""
}
]
HOST_GROUP_RESP = {
"version": "20c28d21549fb7ec5777637f72f50043",
"id": "314b5546-45da-4c8f-a24c-b615265fbc32",
"name": "cinder-group",
"host_ids": [
"5e37d335-8fff-4aee-840a-34749301a16a"
],
"tags": None,
"comment": ""
}
ISCSI_HOST_INFO = {
"version": "8c516c292055283e8ec3b7676d42f149",
"id": "5e37d335-8fff-4aee-840a-34749301a16a",
"name": "iscsi-host",
"port_type": "iscsi",
"initiators": [
"iqn.1994-05.com.redhat:4e5d7ab85a4c",
],
"tags": None,
"comment": "",
"auth": {
"auth_type": "none"
}
}
POOL_NAME = 'cinder-pool'
def get_fake_tatlin_config():
config = configuration.Configuration(
tatlin_opts,
configuration.SHARED_CONF_GROUP)
config.san_ip = '127.0.0.1'
config.san_password = 'pwd'
config.san_login = 'admin'
config.pool_name = POOL_NAME
config.host_group = 'cinder-group'
config.tat_api_retry_count = 1
config.wait_interval = 1
config.wait_retry_count = 3
config.chap_username = 'chap_user'
config.chap_password = 'chap_passwd'
config.state_path = '/tmp'
return config
class TatlinISCSIVolumeDriverTest(TestCase):
@mock.patch.object(TatlinVolumeConnections, 'create_store')
@mock.patch.object(TatlinAccessAPI, '_authenticate_access')
def setUp(self, auth_access, create_store):
access_api = TatlinAccessAPI('127.0.0.1', '443',
'user', 'passwd', False)
access_api._authenticate_access = MagicMock()
self.client = TatlinClientCommon(access_api,
api_retry_count=1,
wait_interval=1,
wait_retry_count=1)
mock.patch.object(TatlinAccessAPI, '_authenticate_access')
self.driver = TatlinISCSIVolumeDriver(
configuration=get_fake_tatlin_config())
self.driver._get_tatlin_client = MagicMock()
self.driver._get_tatlin_client.return_value = self.client
self.driver.do_setup(None)
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_success_find_current_host(self, sr_mock):
sr_mock.side_effect = [
(MockResponse(ALL_HOST_GROUP_RESP, 200)),
(MockResponse(HOST_GROUP_RESP, 200)),
(MockResponse(ISCSI_HOST_INFO, 200)),
]
self.assertEqual(self.driver.find_current_host(
'iqn.1994-05.com.redhat:4e5d7ab85a4c'),
'5e37d335-8fff-4aee-840a-34749301a16a')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_success_get_ports_portals(self, sr_mock):
sr_mock.side_effect = [
(MockResponse(OSMGR_ISCSI_PORTS, 200)),
]
portals = self.driver._get_ports_portals()
self.assertEqual(portals, ISCSI_PORT_PORTALS)
@mock.patch.object(TatlinCommonVolumeDriver, '_update_qos')
@mock.patch.object(TatlinAccessAPI, 'send_request')
def test_success_initialize_connection(self, sr_mock, qos_mock):
self.driver._get_ports_portals = Mock(return_value=OSMGR_ISCSI_PORTS)
self.driver.find_current_host = Mock(
return_value='5e37d335-8fff-4aee-840a-34749301a16a')
self.driver.add_volume_to_host = Mock()
sr_mock.side_effect = [
(MockResponse(RESOURCE_INFORMATION, 200)), # Get volume
(MockResponse(RES_MAPPING_RESP, 200)), # In vol on host
(MockResponse(RES_PORTS_RESP, 200)), # Get ports
(MockResponse(ALL_HOSTS_RESP, 200)), # Find mapped LUN
]

View File

@ -0,0 +1,83 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest.mock import mock_open
from unittest.mock import patch
from unittest import TestCase
from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections
VOL_ID = 'cinder-volume-id'
class TatlinVolumeConnectionsTest(TestCase):
@patch('oslo_concurrency.lockutils.lock', autospec=True)
@patch('os.mkdir')
@patch('os.path.isdir')
def setUp(self, isdir, mkdir, lock):
self.path = 'fake_path'
isdir.return_value = False
self.connections = TatlinVolumeConnections(self.path)
isdir.assert_called_once_with(self.path)
mkdir.assert_called_once_with(self.path)
isdir.reset_mock()
mkdir.reset_mock()
isdir.return_value = True
self.connections = TatlinVolumeConnections(self.path)
isdir.assert_called_once_with(self.path)
mkdir.assert_not_called()
@patch('oslo_concurrency.lockutils.lock', autospec=True)
@patch('builtins.open', mock_open(read_data='1'))
@patch('os.path.exists')
def test_get(self, exists, lock):
exists.side_effect = [False, True]
self.assertEqual(self.connections.get(VOL_ID), 0)
self.assertEqual(self.connections.get(VOL_ID), 1)
@patch('oslo_concurrency.lockutils.lock', autospec=True)
@patch('builtins.open', callable=mock_open(read_data='1'))
@patch('os.path.exists')
def test_increment(self, exists, open, lock):
exists.side_effect = [False, True]
self.assertEqual(self.connections.increment(VOL_ID), 1)
open.assert_called_once_with(os.path.join(self.path, VOL_ID), 'w')
with open() as f:
f.write.assert_called_once_with('1')
self.assertEqual(self.connections.increment(VOL_ID), 2)
open.assert_called_with(os.path.join(self.path, VOL_ID), 'w')
with open() as f:
f.write.assert_called_with('2')
@patch('oslo_concurrency.lockutils.lock', autospec=True)
@patch('builtins.open', callable=mock_open())
@patch('os.remove')
@patch('os.path.exists')
def test_decrement(self, exists, remove, open, lock):
exists.side_effect = [False, True, True]
with open() as f:
f.read.side_effect = [2, 1]
self.assertEqual(self.connections.decrement(VOL_ID), 0)
remove.assert_not_called()
self.assertEqual(self.connections.decrement(VOL_ID), 1)
open.assert_called_with(os.path.join(self.path, VOL_ID), 'w')
f.write.assert_called_with('1')
self.assertEqual(self.connections.decrement(VOL_ID), 0)
remove.assert_called_with(os.path.join(self.path, VOL_ID))

View File

View File

@ -0,0 +1,28 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
HOST_GROUPS = 'personalities/v1/config/groups'
HOSTS = 'personalities/v1/config/hosts'
RESOURCE = 'personalities/v1/personalities/block/%s'
RESOURCE_DETAIL = 'personalities/v1/personalities?id=%s'
RESOURCE_HEALTH = 'health/v1/personalities?id=%s'
RESOURCE_MAPPING = 'personalities/v1/config/resource_mapping'
VOLUME_TO_HOST = 'personalities/v1/personalities/block/%s/hosts/%s'
ALL_RESOURCES = 'personalities/v1/personalities'
POOLS = 'health/v1/pools'
STATISTICS = 'health/v1/statistics/current'
IP_PORTS = 'osmgr/v1/ports/%s'
RESOURCE_COUNT = 'personalities/v1/personalities/block/countPerPool'
TATLIN_VERSION = 'upmgr/v1/version'

View File

@ -0,0 +1,673 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import requests
from cinder import exception
from cinder.i18n import _
from cinder.utils import retry
from cinder.volume.drivers.yadro import tatlin_api
from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException
LOG = logging.getLogger(__name__)
retry_exc = (Exception,)
def InitTatlinClient(ip, port, username, password,
verify, api_retry_count,
wait_interval, wait_retry_count):
access_api = TatlinAccessAPI(ip, port, username, password, verify)
tatlin_version = access_api.get_tatlin_version()
if tatlin_version <= (2, 3):
return TatlinClientV23(access_api,
api_retry_count=api_retry_count,
wait_interval=wait_interval,
wait_retry_count=wait_retry_count)
else:
return TatlinClientV25(access_api,
api_retry_count=api_retry_count,
wait_interval=wait_interval,
wait_retry_count=wait_retry_count)
class TatlinAccessAPI:
session = None
ip = None
port = None
username = None
password = None
verify = False
_api_version = None
def __init__(self, ip, port, user, passwd, verify):
self.ip = ip
self.port = port
self.username = user
self.password = passwd
self.verify = verify
self._authenticate_access()
def _authenticate_access(self):
LOG.debug('Generating new Tatlin API session')
self.session = requests.session()
LOG.debug('SSL verification %s', self.session.verify)
self.session.verify = self.verify
if not self.verify:
requests.packages.urllib3.disable_warnings()
# Here 'address' will be only IPv4.
response = self.session.post('https://%s:%d/auth/login'
% (self.ip, self.port),
data={'user': self.username,
'secret': self.password},
verify=self.verify)
if response.status_code != requests.codes.ok:
LOG.error('Failed to authenticate to remote cluster at %s for %s.',
self.ip, self.username)
raise exception.NotAuthorized(_('Authentication failure.'))
result = response.json()
self.session.headers.update({'X-Auth-Token': result['token']})
self.session.headers.update({'Content-Type': 'application/json'})
def send_request(self, path, input_data, method):
full_url = self._get_api(path)
resp = self.session.request(
method, full_url, verify=self.verify, json=input_data)
LOG.debug('Tatlin response for method %s URL %s %s',
method, full_url, resp)
if resp.status_code == requests.codes.unauthorized:
LOG.info('Not authenticated. Logging in.')
self._authenticate_access()
resp = self.session.request(
method, full_url, verify=self.verify, json=input_data)
return resp
def get_tatlin_version(self):
if not self._api_version:
responce = self.send_request(tatlin_api.TATLIN_VERSION,
{}, 'GET')
ver = responce.json()['build-version'].split('.')
self._api_version = (int(ver[0]), int(ver[1]))
LOG.debug('Tatlin version: %s', str(self._api_version))
return self._api_version
def _get_api(self, tail):
return ('https://%s:%d/' % (self.ip, self.port)) + tail
class TatlinClientCommon:
session = None
_api = None
access_api_retry_count = 1
def __init__(self, tatlin_rest_api, api_retry_count,
wait_interval, wait_retry_count):
self.session = None
self._api = tatlin_rest_api
self.access_api_retry_count = api_retry_count
self.wait_interval = wait_interval
self.wait_retry_count = wait_retry_count
def add_vol_to_host(self, vol_id, host_id):
LOG.debug('Adding volume %s to host %s', vol_id, host_id)
if self._is_vol_on_host(vol_id, host_id):
return
path = tatlin_api.VOLUME_TO_HOST % (vol_id, host_id)
try:
self._access_api(path, {}, 'PUT',
pass_codes=[requests.codes.bad_request])
except TatlinAPIException as exp:
message = _('Unable to add volume %s to host %s error %s' %
(vol_id, host_id, exp.message))
LOG.error(message)
raise TatlinAPIException(500, message)
if not self._is_vol_on_host(vol_id, host_id):
raise exception.VolumeBackendAPIException(
'Unable to add volume %s to host %s' % (vol_id, host_id))
return
def remove_vol_from_host(self, vol_id, host_id):
if not self._is_vol_on_host(vol_id, host_id):
return
path = tatlin_api.VOLUME_TO_HOST % (vol_id, host_id)
try:
LOG.debug('Removing volume %s from host %s', vol_id, host_id)
self._access_api(path, {}, 'DELETE',
pass_codes=[requests.codes.not_found,
requests.codes.bad_request])
except TatlinAPIException as exp:
message = _('Unable to remove volume %s from host %s error %s' %
(vol_id, host_id, exp.message))
LOG.error(message)
raise TatlinAPIException(500, message)
if self._is_vol_on_host(vol_id, host_id):
raise exception.VolumeBackendAPIException(
'Volume %s still on host %s' % (vol_id, host_id))
return
def create_volume(self,
vol_id, name,
size_in_byte,
pool_id,
lbaFormat='512e'):
data = {"name": name,
"size": size_in_byte,
"poolId": pool_id,
"deduplication": False,
"compression": False,
"alert_threshold": 0,
"lbaFormat": lbaFormat
}
path = tatlin_api.RESOURCE % vol_id
LOG.debug('Create volume: volume=%(v3)s path=%(v1)s body=%(v2)s',
{'v1': path, 'v2': data, 'v3': vol_id},)
try:
self._access_api(path, data, 'PUT')
except TatlinAPIException as exp:
message = _('Create volume %s failed due to %s' %
(id, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def delete_volume(self, vol_id):
LOG.debug('Delete volume %s', vol_id)
path = tatlin_api.RESOURCE % vol_id
try:
self._access_api(path, {}, 'DELETE',
pass_codes=[requests.codes.not_found,
requests.codes.bad_request])
except TatlinAPIException as exp:
message = _('Delete volume %s failed due to %s' %
(vol_id, exp.message))
LOG.error(message)
raise
def extend_volume(self, vol_id, new_size_in_byte):
path = tatlin_api.RESOURCE % vol_id
data = {"new_size": new_size_in_byte}
LOG.debug('Extending volume to %s ', new_size_in_byte)
try:
self._access_api(path, data, 'POST')
except TatlinAPIException as exp:
message = _('Unable to extend volume %s due to %s' %
(vol_id, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def get_resource_mapping(self):
try:
result, status = self._access_api(tatlin_api.RESOURCE_MAPPING)
return result
except TatlinAPIException as exp:
message = _(
'TATLIN: Error getting resource mapping information %s' %
exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def get_all_hosts(self):
try:
result, status = self._access_api(tatlin_api.HOSTS)
return result
except TatlinAPIException:
message = _('Unable to get hosts configuration')
raise exception.VolumeBackendAPIException(message=message)
def get_host_info(self, host_id):
try:
result, stat = self._access_api(tatlin_api.HOSTS + '/' + host_id)
LOG.debug('Host info for %s is %s', host_id, result)
return result
except TatlinAPIException as exp:
message = _('Unable to get host info %s error %s' %
(host_id, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def get_host_id(self, name):
return self.get_host_id_by_name(name)
def get_iscsi_cred(self):
auth_path = tatlin_api.RESOURCE % 'auth'
try:
cred, status = self._access_api(auth_path)
except TatlinAPIException as exp:
message = _('Unable to get iscsi user cred due to %s' %
exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
return cred
def get_host_group_info(self, group_id):
try:
result, status = self._access_api(tatlin_api.HOST_GROUPS + '/' +
group_id)
return result
except TatlinAPIException as exp:
message = _('Unable to get host group info %s error %s' %
(group_id, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def get_host_group_id(self, name):
try:
result, status = self._access_api(tatlin_api.HOST_GROUPS)
for h in result:
LOG.debug('Host name: %s Host ID %s', h['name'], h['id'])
if h['name'] == name:
return h['id']
except TatlinAPIException as exp:
message = (_('Unable to get id for host group %s error %s') %
(name, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(
message='Unable to find host group id for %s' % name)
def get_volume_ports(self, vol_id):
if not self.is_volume_exists(vol_id):
message = _('Unable to get volume info %s' % vol_id)
LOG.error(message)
return {}
path = tatlin_api.RESOURCE % vol_id + '/ports'
try:
response, stat = self._access_api(path)
except TatlinAPIException as exp:
message = _('Unable to get ports for target %s '
'with %s error code: %s' %
(vol_id, exp.message, exp.code))
LOG.error(message)
return {}
return response
def get_resource_ports_array(self, volume_id):
ports = self.get_volume_ports(volume_id)
if ports == {}:
return []
res = []
for p in ports:
res.append(p['port'])
LOG.debug('Volume %s port list %s', volume_id, res)
return res
def get_port_portal(self, portal_type):
path = tatlin_api.IP_PORTS % portal_type
try:
result, stat = self._access_api(path)
except TatlinAPIException as exp:
message = _('Failed to get ports info due to %s' % exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
return result
def is_volume_exists(self, vol_id):
volume_path = tatlin_api.RESOURCE % vol_id
LOG.debug('get personality statistic: volume_path=%(v1)s ',
{'v1': volume_path})
try:
volume_result, status = self._access_api(
volume_path, {}, 'GET',
pass_codes=[requests.codes.not_found])
if status == requests.codes.not_found:
message = _('Volume %s does not exist' % vol_id)
LOG.debug(message)
return False
except TatlinAPIException as exp:
message = _('Exception Unable to get volume info %s '
'due to %s stat: %s' %
(vol_id, exp.message, exp.code))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
LOG.debug('Volume %s exists', vol_id)
return True
def get_volume(self, vol_id):
volume_path = tatlin_api.RESOURCE % vol_id
LOG.debug('get personality statistic: volume_path=%(v1)s',
{'v1': volume_path})
try:
volume_result, stat = self._access_api(
volume_path, {}, 'GET',
pass_codes=[requests.codes.not_found])
if stat == requests.codes.not_found:
message = _('Unable to get volume info %s due to %s stat: %s' %
(vol_id, 'Volume not found', '404'))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
except TatlinAPIException as exp:
message = _('Unable to get volume info %s due to %s stat: %s' %
(vol_id, exp.message, exp.code))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
return volume_result
def get_pool_id_by_name(self, pool_name):
try:
result, status = self._access_api(tatlin_api.POOLS)
except TatlinAPIException as exp:
message = _('Unable to get pool id for %s due to %s' %
pool_name, exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
for p in result:
if p['name'] == pool_name:
return p['id']
message = _('Pool "%s" not found' % pool_name)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def get_pool_detail(self, pool_id):
if not pool_id:
return {}
path = tatlin_api.POOLS + "/" + pool_id
try:
result, status = self._access_api(path)
except TatlinAPIException as exp:
message = _('Unable to get pool information for %s due to %s' %
(pool_id, exp.message))
LOG.error(message)
return {}
return result
def get_sys_statistic(self):
try:
sys_stat, status = self._access_api(tatlin_api.STATISTICS)
except TatlinAPIException as exp:
message = _('Unable to get system statistic due to %s' %
exp.message)
LOG.error(message)
raise
return sys_stat
def get_volume_info(self, vol_name):
path = tatlin_api.RESOURCE_DETAIL % vol_name
try:
result, status = self._access_api(path)
except TatlinAPIException as exp:
message = _('Unable to get volume %s error %s' %
(vol_name, exp.message))
LOG.error(message)
raise exception.ManageExistingInvalidReference(message)
return result
def get_tatlin_version(self):
return self._api.get_tatlin_version()
def get_resource_count(self, p_id):
raise NotImplementedError()
def is_volume_ready(self, id):
path = tatlin_api.RESOURCE_DETAIL % id
try:
result, status = self._access_api(path)
except TatlinAPIException:
return False
for p in result:
LOG.debug('Volume %s status: %s', id, p['status'])
if p['status'] != 'ready':
return False
return True
def get_volume_status(self, id):
path = tatlin_api.RESOURCE_HEALTH % id
try:
result, status = self._access_api(path)
except TatlinAPIException:
return False
for p in result:
LOG.debug('Volume status: %s', p['status'])
return p['status']
return ''
def set_port(self, vol_id, port):
path = tatlin_api.RESOURCE % vol_id + "/ports/" + port
try:
self._access_api(path, {}, 'PUT',
pass_codes=[requests.codes.conflict])
except TatlinAPIException as e:
message = _('Unable to link port %s for volume %s error %s' %
(port, vol_id, e.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def export_volume(self, vol_id, eth_ports):
raise NotImplementedError()
def export_vol_to_port_list(self, vol_id, port_list):
path = tatlin_api.RESOURCE % vol_id + "/ports/list"
try:
self._access_api(path,
port_list, 'PUT',
pass_codes=[
requests.codes.conflict,
requests.codes.bad_request])
except TatlinAPIException as e:
message = _('Unable to link ports %s for volume %s error %s' %
(port_list, vol_id, e.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def _access_api(self, path, input_data = None, method = None,
pass_codes=None):
@retry(retry_exc, interval=1,
retries=self.access_api_retry_count)
def do_access_api(path, input_data, method,
pass_codes):
if input_data is None:
input_data = {}
if method is None:
method = 'GET'
if pass_codes is None:
pass_codes = []
pass_codes = [requests.codes.ok] + pass_codes
startTime = time.time()
response = self._api.send_request(path, input_data, method)
finishTime = time.time()
duration = str((finishTime - startTime) * 1000) + ' ms'
postfix = '[FAST]' if finishTime - startTime < 15 else '[SLOW]'
try:
result = response.json()
except ValueError:
result = {}
if response.status_code not in pass_codes:
message = _('Request: method: %s path: %s '
'failed with status: %s message: %s in %s %s' %
(method, path, str(response.status_code),
result, duration, postfix))
LOG.debug(message)
raise TatlinAPIException(response.status_code,
message, path=path)
LOG.debug(
'Request %s %s successfully finished with %s code in %s %s',
method, path, str(response.status_code), duration, postfix)
return result, response.status_code
return do_access_api(path, input_data, method,
pass_codes)
def _is_vol_on_host(self, vol_id, host_id):
LOG.debug('Check resource %s in host %s', vol_id, host_id)
try:
result, status = self._access_api(tatlin_api.RESOURCE_MAPPING)
except TatlinAPIException as exp:
raise exception.VolumeBackendAPIException(
message=_('Tatlin API exception %s '
'while getting resource mapping' % exp.message))
for entry in result:
if 'host_id' in entry:
if entry['resource_id'] == vol_id and \
entry['host_id'] == host_id:
LOG.debug('Volume %s already on host %s',
vol_id, host_id)
return True
LOG.debug('Volume %s not on host %s', vol_id, host_id)
return False
def get_unassigned_ports(self, volume_id, eth_ports):
cur_ports = self.get_resource_ports_array(volume_id)
LOG.debug('VOLUME %s: Port needed %s actual %s',
volume_id, list(eth_ports.keys()), cur_ports)
return list(set(eth_ports.keys()) - set(cur_ports))
def is_port_assigned(self, volume_id, port):
LOG.debug('VOLUME %s: Checking port %s ', volume_id, port)
cur_ports = self._get_ports(volume_id)
res = port in cur_ports
LOG.debug('VOLUME %s: port %s assigned %s',
volume_id, port, str(res))
return res
def _check_group_mapping(self, vol_id, group_id):
LOG.debug('Check resource %s in group %s', vol_id, group_id)
try:
result, status = self._access_api(tatlin_api.RESOURCE_MAPPING)
except TatlinAPIException as exp:
raise exception.VolumeBackendAPIException(
message=_('Tatlin API exception %s '
'while getting resource mapping' % exp.message))
for entry in result:
if entry['resource_id'] == vol_id and \
entry['host_group_id'] == group_id:
return True
return False
def update_qos(self, vol_id, iops, bandwith):
pass
def get_host_id_by_name(self, host_name):
try:
result, status = self._access_api(tatlin_api.HOSTS)
for h in result:
LOG.debug('For host %s Host name: %s Host ID %s',
host_name, h['name'], h['id'])
if h['name'] == host_name:
return h['id']
except TatlinAPIException as exp:
message = _('Unable to get host information %s' % exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
raise exception.VolumeBackendAPIException(
message='Unable to get host_id for host %s' % host_name)
class TatlinClientV25 (TatlinClientCommon):
def update_qos(self, vol_id, iops, bandwith):
path = tatlin_api.RESOURCE % vol_id
data = {"limit_iops": int(iops),
"limit_bw": int(bandwith),
"tags": []}
try:
result, status = self._access_api(path, data, 'POST')
LOG.debug('Responce %s stat %s', result, status)
except TatlinAPIException as exp:
message = (_('Unable to update QoS for volume %s due to %s') %
(vol_id, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def export_volume(self, vol_id, eth_ports):
LOG.debug('VOLUME %s: Export to ports %s started',
vol_id, eth_ports)
to_export = self.get_unassigned_ports(vol_id, eth_ports)
if not to_export:
LOG.debug('VOLUME %s: all ports already assigned', vol_id)
return
self.export_vol_to_port_list(vol_id, to_export)
for i in range(self.wait_retry_count):
if not self.get_unassigned_ports(vol_id, eth_ports):
LOG.debug('VOLUME %s: Export ports %s finished',
vol_id, eth_ports)
return
time.sleep(self.wait_interval)
message = (_('VOLUME %s: Unable to export volume to %s') %
(vol_id, eth_ports))
raise exception.VolumeBackendAPIException(message=message)
def get_resource_count(self, p_id):
try:
result, status = self._access_api(tatlin_api.RESOURCE_COUNT)
except TatlinAPIException:
message = _('Unable to get resource count')
LOG.error(message)
raise exception.ManageExistingInvalidReference(message)
poll_resource = 0
cluster_resources = 0
for key in result:
if key == p_id:
poll_resource = result[key]
cluster_resources = cluster_resources + result[key]
return poll_resource, cluster_resources
class TatlinClientV23 (TatlinClientCommon):
def export_volume(self, vol_id, eth_ports):
LOG.debug('Export ports %s for volume %s started',
eth_ports, vol_id)
for port in eth_ports:
LOG.debug('Check port %s for volume %s', port, vol_id)
if not self.is_port_assigned(vol_id, port):
try:
self.set_port(vol_id, port)
except TatlinAPIException as e:
raise exception.VolumeBackendAPIException(
message=e.message)
LOG.debug('Export ports %s for volume %s finished',
eth_ports, vol_id)
for i in range(self.wait_retry_count):
if not self.get_unassigned_ports(vol_id, eth_ports):
LOG.debug('VOLUME %s: Export ports %s finished',
vol_id, eth_ports)
return
time.sleep(self.wait_interval)
message = (_('VOLUME %s: Unable to export volume to %s') %
(vol_id, eth_ports))
raise exception.VolumeBackendAPIException(message=message)
def get_resource_count(self, p_id):
try:
response, status = self._access_api(tatlin_api.ALL_RESOURCES)
if response is not None:
return 0, len(response)
except TatlinAPIException:
message = (_('Unable to get resource list'))
LOG.error(message)
return 0, 0
return 0, 0

View File

@ -0,0 +1,778 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder import context as cinder_context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.yadro.tatlin_client import InitTatlinClient
from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException
from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections
from cinder.volume import qos_specs
from cinder.volume import volume_types
from cinder.volume import volume_utils
from cinder.volume.volume_utils import brick_get_connector_properties
LOG = logging.getLogger(__name__)
tatlin_opts = [
cfg.StrOpt('pool_name',
default='',
help='storage pool name'),
cfg.PortOpt('api_port',
default=443,
help='Port to use to access the Tatlin API'),
cfg.StrOpt('export_ports',
default='',
help='Ports to export Tatlin resource through'),
cfg.StrOpt('host_group',
default='',
help='Tatlin host group name'),
cfg.IntOpt('max_resource_count',
default=500,
help='Max resource count allowed for Tatlin'),
cfg.IntOpt('pool_max_resource_count',
default=250,
help='Max resource count allowed for single pool'),
cfg.IntOpt('tat_api_retry_count',
default=10,
help='Number of retry on Tatlin API'),
cfg.StrOpt('auth_method',
default='CHAP',
help='Authentication method for iSCSI (CHAP)'),
cfg.StrOpt('lba_format',
default='512e',
help='LBA Format for new volume'),
cfg.IntOpt('wait_retry_count',
default=15,
help='Number of checks for a lengthy operation to finish'),
cfg.IntOpt('wait_interval',
default=30,
help='Wait number of seconds before re-checking'),
]
CONF = cfg.CONF
CONF.register_opts(tatlin_opts, group=configuration.SHARED_CONF_GROUP)
class TatlinCommonVolumeDriver(driver.VolumeDriver, object):
def __init__(self, *args, **kwargs):
super(TatlinCommonVolumeDriver, self).__init__(*args, **kwargs)
self._ip = None
self._port = 443
self._user = None
self._password = None
self._pool_name = None
self._pool_id = None
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(tatlin_opts)
self._auth_method = 'CHAP'
self._chap_username = ''
self._chap_password = ''
self.backend_name = None
self.DRIVER_VOLUME_TYPE = None
self._export_ports = None
self._host_group = None
self.verify = None
self.DEFAULT_FILTER_FUNCTION = None
self.DEFAULT_GOODNESS_FUNCTION = None
self._use_multipath = True
self._enforce_multipath = False
self._lba_format = '512e'
self._ssl_cert_path = None
self._max_pool_resource_count = 250
def do_setup(self, context):
"""Initial driver setup"""
required_config = ['san_ip',
'san_login',
'san_password',
'pool_name',
'host_group']
for attr in required_config:
if not getattr(self.configuration, attr, None):
message = (_('config option %s is not set.') % attr)
raise exception.InvalidInput(message=message)
self._ip = self.configuration.san_ip
self._user = self.configuration.san_login
self._password = self.configuration.san_password
self._port = self.configuration.api_port
self._pool_name = self.configuration.pool_name
self._export_ports = self.configuration.export_ports
self._host_group = self.configuration.host_group
self._auth_method = self.configuration.auth_method
self._chap_username = self.configuration.chap_username
self._chap_password = self.configuration.chap_password
self._wait_interval = self.configuration.wait_interval
self._wait_retry_count = self.configuration.wait_retry_count
self._ssl_cert_path = (self.configuration.
safe_get('driver_ssl_cert_path') or None)
self.verify = (self.configuration.
safe_get('driver_ssl_cert_verify') or False)
if self.verify and self._ssl_cert_path:
self.verify = self._ssl_cert_path
LOG.info('Tatlin driver version: %s', self.VERSION)
self.tatlin_api = self._get_tatlin_client()
self.ctx = context
self.MAX_ALLOWED_RESOURCES = self.configuration.max_resource_count
self._max_pool_resource_count = \
self.configuration.pool_max_resource_count
self.DEFAULT_FILTER_FUNCTION = \
'capabilities.pool_resource_count < ' +\
str(self._max_pool_resource_count) +\
' and capabilities.overall_resource_count < ' +\
str(self.MAX_ALLOWED_RESOURCES)
self.DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
self._use_multipath = \
(self.configuration.safe_get(
'use_multipath_for_image_xfer') or False)
self._enforce_multipath = \
(self.configuration.safe_get(
'enforce_multipath_for_image_xfer') or False)
self._lba_format = self.configuration.lba_format
self._wait_interval = self.configuration.wait_interval
self._wait_retry_count = self.configuration.wait_retry_count
self._connections = TatlinVolumeConnections(
os.path.join(CONF.state_path,
'tatlin-volume-connections'))
def check_for_setup_error(self):
pass
@volume_utils.trace
def create_volume(self, volume):
"""Entry point for create new volume"""
if not self.pool_id:
raise exception.VolumeBackendAPIException(
message='Wrong Tatlin pool configuration')
pool_res_count, cluster_res_count = \
self.tatlin_api.get_resource_count(self.pool_id)
LOG.debug('Current pool %(pool)s has %(pool_res)s res.'
'Whole cluster has %(cluster_res)s',
{'pool': self.pool_id,
'pool_res': pool_res_count,
'cluster_res': cluster_res_count})
self._stats['pool_resource_count'] = pool_res_count
self._stats['overall_resource_count'] = cluster_res_count
if pool_res_count > 255:
message = _('TatlinVolumeDriver create volume failed. '
'Too many resources per pool created')
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
if cluster_res_count + 1 > self.MAX_ALLOWED_RESOURCES:
message = _('TatlinVolumeDriver create volume failed. '
'Too many resources per cluster created')
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
LOG.debug('Create volume %(vol_id)s started',
{'vol_id': volume.name_id})
self._create_volume_storage(volume)
LOG.debug('Create volume %s finished', volume.name_id)
def _create_volume_storage(self, volume):
"""Create a volume with a specific name in Tatlin"""
size = volume.size * units.Gi
vol_type = 'snapshot' if 'snapshot_volume' in volume.metadata \
else 'volume'
name = 'cinder-%s-%s' % (vol_type, volume.name_id)
LOG.debug('Creating Tatlin resource %(name)s '
'with %(size)s size in pool %(pool)s',
{'name': name, 'size': size, 'pool': self.pool_id})
self.tatlin_api.create_volume(volume.name_id,
name,
size,
self.pool_id,
lbaFormat=self._lba_format)
self.wait_volume_ready(volume)
self._update_qos(volume)
def wait_volume_ready(self, volume):
for counter in range(self._wait_retry_count):
if self.tatlin_api.is_volume_ready(volume.name_id):
return
LOG.warning('Volume %s is not ready', volume.name_id)
time.sleep(self._wait_interval)
message = _('Volume %s still not ready') % volume.name_id
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
def wait_volume_online(self, volume):
for counter in range(self._wait_retry_count):
if self.tatlin_api.get_volume_status(volume.name_id) == 'online':
return
LOG.warning('Volume %s still not online', volume.name_id)
time.sleep(self._wait_interval)
message = _('Volume %s unable to become online' % volume.name_id)
raise exception.VolumeBackendAPIException(message=message)
@volume_utils.trace
def delete_volume(self, volume):
"""Entry point for delete volume"""
LOG.debug('Delete volume started for %s', volume.name_id)
if not self.tatlin_api.is_volume_exists(volume.name_id):
LOG.debug('Volume %s does not exist', volume.name_id)
return
try:
self.tatlin_api.delete_volume(volume.name_id)
except TatlinAPIException as e:
message = _('Unable to delete volume %s due to %s' %
(volume.name_id, e))
raise exception.VolumeBackendAPIException(message=message)
for counter in range(self._wait_retry_count):
if not self.tatlin_api.is_volume_exists(volume.name_id):
LOG.debug('Delete volume finished for %s', volume.name_id)
return
LOG.debug('Volume %s still exists, waiting for delete...',
volume.name_id)
time.sleep(self._wait_interval)
if self.tatlin_api.is_volume_exists(volume.name_id):
message = _('Unable to delete volume %s' % volume.name_id)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
@volume_utils.trace
def extend_volume(self, volume, new_size):
size = new_size * units.Gi
LOG.debug('Extending volume %s to %s', volume.name_id, size)
self.tatlin_api.extend_volume(volume.name_id, size)
self.wait_volume_ready(volume)
self._update_qos(volume)
@volume_utils.trace
def create_cloned_volume(self, volume, src_vol):
"""Entry point for clone existing volume"""
LOG.debug('Create cloned volume %(target)s from %(source)s started',
{'target': volume.name_id, 'source': src_vol.name_id})
self.create_volume(volume)
self._clone_volume_data(volume, src_vol)
LOG.debug('Create cloned volume %(target)s from %(source)s finished',
{'target': volume.name_id, 'source': src_vol.name_id})
def _clone_volume_data(self, volume, src_vol):
props = brick_get_connector_properties(
self._use_multipath,
self._enforce_multipath)
LOG.debug('Volume %s Connection properties %s',
volume.name_id, props)
dest_attach_info = None
src_attach_info = None
size_in_mb = int(src_vol['size']) * units.Ki
try:
src_attach_info, volume_src = self._attach_volume(
self.ctx, src_vol, props)
LOG.debug('Source attach info: %s volume: %s',
src_attach_info, volume_src)
except Exception as e:
LOG.error('Unable to attach src volume due to %s', e)
raise
try:
dest_attach_info, volume_dest = self._attach_volume(
self.ctx, volume, props)
LOG.debug('Dst attach info: %s volume: %s',
dest_attach_info, volume_dest)
except Exception as e:
LOG.error('Unable to attach dst volume due to %s', e)
self._detach_volume(self.ctx, src_attach_info, src_vol, props)
raise
try:
LOG.debug('Begin copy to %s from %s',
volume.name_id, src_vol.name_id)
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=False)
LOG.debug('End copy to %s from %s',
volume.name_id, src_vol.name_id)
except Exception as e:
LOG.error('Unable to clone volume source: %s dst: %s due to %s',
src_vol.name_id, volume.name_id, e)
raise
finally:
try:
self._detach_volume(self.ctx, src_attach_info, src_vol, props)
finally:
self._detach_volume(self.ctx, dest_attach_info, volume, props)
@volume_utils.trace
def _attach_volume(self, context, volume, properties, remote=False):
@utils.synchronized('tatlin-volume-attachments-%s' % volume.name_id)
def _do_attach_volume():
LOG.debug('Start Tatlin attach volume %s properties %s',
volume.name_id, properties)
return super(driver.VolumeDriver, self)._attach_volume(
context, volume, properties, remote=remote)
return _do_attach_volume()
@volume_utils.trace
def _detach_volume(self, context, attach_info, volume, properties,
force=False, remote=False, ignore_errors=False):
@utils.synchronized('tatlin-volume-attachments-%s' % volume.name_id)
def _do_detach_volume():
LOG.debug('Start Tatlin detach for %s', volume.name_id)
connection_count = self._connections.get(volume.name_id)
if connection_count > 1:
LOG.debug('There are still other connections to volume %s,'
' not detaching', volume.name_id)
self._connections.decrement(volume.name_id)
return
# decrement of connections will happen in terminate_connection()
super(driver.VolumeDriver, self).\
_detach_volume(context, attach_info, volume, properties,
force=force,
remote=remote,
ignore_errors=ignore_errors)
_do_detach_volume()
@volume_utils.trace
def terminate_connection(self, volume, connector, **kwargs):
@utils.synchronized("tatlin-volume-connections-%s" % volume.name_id)
def _terminate_connection():
LOG.debug('Terminate connection for %s with connector %s',
volume.name_id, connector)
if not connector:
return
if self._is_cinder_host_connection(connector):
connections = self._connections.decrement(volume.name_id)
if connections > 0:
LOG.debug('Not terminating connection: '
'volume %s, existing connections: %s',
volume.name_id, connections)
return
hostname = connector['host']
if self._is_nova_multiattached(volume, hostname):
LOG.debug('Volume %s is attached on host %s to multiple VMs.'
' Not terminating connection', volume.name_id,
hostname)
return
host = self.find_current_host(connector['initiator'])
LOG.debug('Terminate connection volume %s removing from host %s',
volume.name_id, host)
self.remove_volume_from_host(volume, host)
_terminate_connection()
def _is_cinder_host_connection(self, connector):
# Check if attachment happens on this Cinder host
properties = brick_get_connector_properties()
return properties['initiator'] == connector['initiator']
def _is_nova_multiattached(self, volume, hostname):
# Check if connection to the volume happens to multiple VMs
# on the same Nova Compute host
if not volume.volume_attachment:
return False
attachments = [a for a in volume.volume_attachment
if a.attach_status ==
objects.fields.VolumeAttachStatus.ATTACHED and
a.attached_host == hostname]
return len(attachments) > 1
def _create_temp_volume_for_snapshot(self, snapshot):
return self._create_temp_volume(
self.ctx,
snapshot.volume,
{
'name_id': snapshot.id,
'display_name': 'snap-vol-%s' % snapshot.id,
'metadata': {'snapshot_volume': 'yes'},
})
@volume_utils.trace
def create_snapshot(self, snapshot):
LOG.debug('Create snapshot for volume %s, snap id %s',
snapshot.volume.name_id,
snapshot.id)
temp_volume = self._create_temp_volume_for_snapshot(snapshot)
try:
self.create_cloned_volume(temp_volume, snapshot.volume)
finally:
temp_volume.destroy()
@volume_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
LOG.debug('Create volume from snapshot %s', snapshot.id)
temp_volume = self._create_temp_volume_for_snapshot(snapshot)
try:
self.create_volume(volume)
self._clone_volume_data(volume, temp_volume)
finally:
temp_volume.destroy()
@volume_utils.trace
def delete_snapshot(self, snapshot):
LOG.debug('Delete snapshot %s', snapshot.id)
temp_volume = self._create_temp_volume_for_snapshot(snapshot)
try:
self.delete_volume(temp_volume)
finally:
temp_volume.destroy()
@volume_utils.trace
def get_volume_stats(self, refresh=False):
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def _update_qos(self, volume):
type_id = volume.volume_type_id
LOG.debug('VOL_TYPE %s', type_id)
if type_id:
ctx = cinder_context.get_admin_context()
volume_type = volume_types.get_volume_type(ctx, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
LOG.debug('VOL_TYPE %s QOS_SPEC %s', volume_type, qos_specs_id)
specs = {}
if qos_specs_id is not None:
sp = qos_specs.get_qos_specs(ctx, qos_specs_id)
if sp.get('consumer') != 'front-end':
specs = qos_specs.get_qos_specs(ctx, qos_specs_id)['specs']
LOG.debug('QoS spec: %s', specs)
param_specs = volume_type.get('extra_specs')
LOG.debug('Param spec is: %s', param_specs)
iops = specs["total_iops_sec_max"] \
if 'total_iops_sec_max' in specs \
else param_specs["YADRO:total_iops_sec_max"] \
if 'YADRO:total_iops_sec_max' in param_specs else '0'
bandwidth = specs["total_bytes_sec_max"] \
if 'total_bytes_sec_max' in specs \
else param_specs["YADRO:total_bytes_sec_max"] \
if 'YADRO:total_bytes_sec_max' in param_specs else '0'
LOG.debug('QOS spec IOPS: %s BANDWIDTH %s', iops, bandwidth)
self.tatlin_api.update_qos(
volume.name_id, int(iops), int(bandwidth))
@volume_utils.trace
def _update_volume_stats(self):
"""Retrieve pool info"""
LOG.debug('Update volume stats for pool: %s', self.pool_name)
if not self.pool_id:
LOG.error('Could not retrieve pool id for %s', self.pool_name)
return
try:
pool_stat = self.tatlin_api.get_pool_detail(self.pool_id)
except TatlinAPIException as exp:
message = (_('TatlinVolumeDriver get volume stats '
'failed %s due to %s') %
(self.pool_name, exp.message))
LOG.error(message)
return
try:
sys_stat = self.tatlin_api.get_sys_statistic()
except TatlinAPIException as exp:
message = (_('TatlinVolumeDriver get system stats detail '
'failed %s due to %s') %
(self.pool_name, exp.message))
LOG.error(message)
return
if sys_stat['iops_bandwidth'] is not None and \
len(sys_stat['iops_bandwidth']) > 0:
self._stats['read_iops'] = \
sys_stat['iops_bandwidth'][0]['value']['read_iops']
self._stats['write_iops'] = \
sys_stat['iops_bandwidth'][0]['value']['write_iops']
self._stats['total_iops'] = \
sys_stat['iops_bandwidth'][0]['value']['total_iops']
self._stats['read_bytes_ps'] = \
sys_stat['iops_bandwidth'][0]['value']['read_bytes_ps']
self._stats['write_bytes_ps'] = \
sys_stat['iops_bandwidth'][0]['value']['write_bytes_ps']
self._stats['total_bytes_ps'] = \
sys_stat['iops_bandwidth'][0]['value']['total_bytes_ps']
self._stats["volume_backend_name"] = self.backend_name
self._stats["vendor_name"] = 'YADRO'
self._stats["driver_version"] = self.VERSION
self._stats["storage_protocol"] = self.DRIVER_VOLUME_TYPE
self._stats["thin_provisioning_support"] = pool_stat['thinProvision']
self._stats["consistencygroup_support"] = False
self._stats["consistent_group_snapshot_enabled"] = False
self._stats["QoS_support"] = True
self._stats["multiattach"] = True
self._stats['total_capacity_gb'] = \
(int(pool_stat['capacity']) - int(pool_stat['failed'])) / units.Gi
self._stats['tatlin_pool'] = self.pool_name
self._stats['tatlin_ip'] = self._ip
pool_res_count, cluster_res_count = \
self.tatlin_api.get_resource_count(self.pool_id)
self._stats['overall_resource_count'] = cluster_res_count
self._stats['pool_resource_count'] = pool_res_count
if pool_stat['thinProvision']:
self._stats['provisioned_capacity_gb'] = \
(int(pool_stat['capacity']) -
int(pool_stat['failed'])) / units.Gi
self._stats['free_capacity_gb'] = \
self._stats['provisioned_capacity_gb']
else:
self._stats['provisioned_capacity_gb'] = \
(int(pool_stat['available']) -
int(pool_stat['failed'])) / units.Gi
self._stats['free_capacity_gb'] = \
self._stats['provisioned_capacity_gb']
self._stats['utilization'] = \
(float(self._stats['total_capacity_gb']) -
float(self._stats['free_capacity_gb'])) / \
float(self._stats['total_capacity_gb']) * 100
LOG.debug(
'Total capacity: %s Free capacity: %s '
'Provisioned capacity: %s '
'Thin provisioning: %s '
'Resource count: %s '
'Pool resource count %s '
'Utilization %s',
self._stats['total_capacity_gb'],
self._stats['free_capacity_gb'],
self._stats['provisioned_capacity_gb'],
pool_stat['thinProvision'], self._stats['overall_resource_count'],
self._stats['pool_resource_count'],
self._stats['utilization'])
def _init_vendor_properties(self):
LOG.debug('Initializing YADRO vendor properties')
properties = {}
self._set_property(
properties,
"YADRO:total_bytes_sec_max",
"YADRO QoS Max bytes Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"YADRO:total_iops_sec_max",
"YADRO QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
LOG.debug('YADRO vendor properties: %s', properties)
return properties, 'YADRO'
def migrate_volume(self, context, volume, host):
"""Migrate volume
Method checks if target volume will be on the same Tatlin/Pool
If not, re-type should be executed.
"""
if 'tatlin_pool' not in host['capabilities']:
return False, None
self._update_qos(volume)
LOG.debug('Migrating volume from pool %s ip %s to pool %s ip %s',
self.pool_name, self._ip,
host['capabilities']['tatlin_pool'],
host['capabilities']['tatlin_ip'])
if host['capabilities']['tatlin_ip'] == self._ip and \
host['capabilities']['tatlin_pool'] == self.pool_name:
return True, None
return False, None
def manage_existing(self, volume, external_ref):
"""Entry point to manage existing resource"""
source_name = external_ref.get('source-name', None)
if source_name is None:
raise exception.ManageExistingInvalidReference(
_('source_name should be provided'))
try:
result = self.tatlin_api.get_volume_info(source_name)
except Exception:
raise exception.ManageExistingInvalidReference(
_('Unable to get resource with %s name' % source_name))
existing_vol = result[0]
existing_vol['name'] = volume.name_id
volume.name_id = existing_vol['id']
pool_id = existing_vol['poolId']
if pool_id != self.pool_id:
raise exception.ManageExistingInvalidReference(
_('Existing volume should be in %s pool' % self.pool_name))
self._update_qos(volume)
def manage_existing_get_size(self, volume, external_ref):
source_name = external_ref.get('source-name', None)
if source_name is None:
raise exception.ManageExistingInvalidReference(
_('source_name should be provided'))
try:
result = self.tatlin_api.get_volume_info(source_name)
except TatlinAPIException:
raise exception.ManageExistingInvalidReference(
_('Unable to get resource with %s name' % source_name))
size = int(result[0]['size']) / units.G
return size
def add_volume_to_host(self, volume, host_id):
self.tatlin_api.add_vol_to_host(volume.name_id, host_id)
self._update_qos(volume)
def remove_volume_from_host(self, volume, host_id):
self.tatlin_api.remove_vol_from_host(volume.name_id, host_id)
def _is_port_assigned(self, volume_id, port):
LOG.debug('VOLUME %s: Checking port %s ', volume_id, port)
cur_ports = self.tatlin_api.get_resource_ports_array(volume_id)
res = port in cur_ports
LOG.debug('VOLUME %s: port %s assigned %s',
volume_id, port, str(res))
return res
def _get_ports_portals(self):
return {}
def _find_mapped_lun(self, volume_id, iqn):
host_id = self.find_current_host(iqn)
result = self.tatlin_api.get_resource_mapping()
for r in result:
if 'host_id' in r:
if r['resource_id'] == volume_id and r['host_id'] == host_id:
LOG.debug('Current mapped lun record %s volume_id: %s '
'host_id: is %s', r, volume_id, host_id)
return r['mapped_lun_id']
mess = (_('Unable to get mapped lun for volume %s on host %s') %
(volume_id, host_id))
LOG.error(mess)
raise exception.VolumeBackendAPIException(message=mess)
@staticmethod
def get_driver_options():
return tatlin_opts
@volume_utils.trace
def ensure_export(self, context, volume):
LOG.debug('Tatlin ensure export')
ports = self._get_ports_portals()
self.tatlin_api.export_volume(volume.name_id, ports)
@volume_utils.trace
def create_export(self, context, volume, connector):
LOG.debug('Create export for %s started', volume.name_id)
self.ensure_export(context, volume)
LOG.debug('Create export for %s finished', volume.name_id)
def remove_export(self, context, volume):
return
def _get_tatlin_client(self):
return InitTatlinClient(
self._ip, self._port, self._user,
self._password, verify=self.verify,
api_retry_count=self.configuration.tat_api_retry_count,
wait_interval=self._wait_interval,
wait_retry_count=self._wait_retry_count)
def find_current_host(self, wwn):
return ''
@property
def pool_id(self):
if not self._pool_id:
try:
self._pool_id = self.tatlin_api.get_pool_id_by_name(
self.pool_name)
except exception.VolumeBackendAPIException:
LOG.error('Unable to get current Tatlin pool')
return self._pool_id
@pool_id.setter
def pool_id(self, value):
self._pool_id = value
@property
def pool_name(self):
return self._pool_name
@pool_name.setter
def pool_name(self, value):
self._pool_name = value
def get_default_filter_function(self):
return self.DEFAULT_FILTER_FUNCTION
def get_default_goodness_function(self):
return self.DEFAULT_GOODNESS_FUNCTION
@volume_utils.trace
def get_backup_device(self, context, backup):
"""Get a backup device from an existing volume.
We currently return original device where possible
due to absence of instant clones and snapshots
"""
if backup.snapshot_id:
return super().get_backup_device(context, backup)
volume = objects.Volume.get_by_id(context, backup.volume_id)
return (volume, False)
def backup_use_temp_snapshot(self):
return False
def snapshot_revert_use_temp_snapshot(self):
return False

View File

@ -0,0 +1,27 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.exception import VolumeBackendAPIException
class TatlinAPIException(VolumeBackendAPIException):
path = ''
code = 500
message = ''
def __init__(self, code, message, path=''):
self.code = code
self.message = message
self.path = path

View File

@ -0,0 +1,174 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver
from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class TatlinISCSIVolumeDriver(TatlinCommonVolumeDriver, driver.ISCSIDriver):
"""ACCESS Tatlin ISCSI Driver.
Executes commands relating to ISCSI.
Supports creation of volumes.
.. code-block:: none
API version history:
1.0 - Initial version.
"""
VERSION = "1.0"
SUPPORTS_ACTIVE_ACTIVE = True
# ThirdPartySystems wiki
CI_WIKI_NAME = "Yadro_Tatlin_Unified_CI"
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(TatlinISCSIVolumeDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.backend_name = (self.configuration.safe_get(
'volume_backend_name') or 'TatlinISCSI')
self.DRIVER_VOLUME_TYPE = 'iSCSI'
@volume_utils.trace
def initialize_connection(self, volume, connector):
@utils.synchronized("tatlin-volume-connections-%s" % volume.name_id)
def _initialize_connection():
LOG.debug('Init %s with connector %s', volume.name_id, connector)
eth_ports = self._get_ports_portals()
current_host = self.find_current_host(connector['initiator'])
self.add_volume_to_host(volume, current_host)
mapped_lun = self._find_mapped_lun(
volume.name_id, connector['initiator'])
port_result = self.tatlin_api.get_volume_ports(volume.name_id)
result = {
'driver_volume_type': 'iscsi',
'data': self._create_volume_data(port_result, eth_ports,
mapped_lun)
}
if self._is_cinder_host_connection(connector):
self._connections.increment(volume.name_id)
LOG.debug('Current connection info %s', result)
return result
return _initialize_connection()
def _get_ports_portals(self):
try:
result = self.tatlin_api.get_port_portal("ip")
except TatlinAPIException as exp:
message = (_('Failed to get ports info due to %s') % exp.message)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
ports = {}
for p in result:
ipaddr = p['params']['ipaddress']
if not ipaddr:
continue
iface = p['params']['ifname']
if iface.startswith('p'):
if self._export_ports and iface not in self._export_ports:
continue
if iface not in ports:
ports[iface] = []
ports[iface].append(ipaddr + ':3260')
return ports
def _create_volume_data(self, port_inf, eth_ports, lun_id):
res = {'target_discovered': True, 'target_lun': lun_id}
tatlin_version = self.tatlin_api.get_tatlin_version()
if tatlin_version > (2, 3):
if self._auth_method == 'CHAP':
res['auth_method'] = 'CHAP'
res['auth_username'] = self._chap_username
res['auth_password'] = self._chap_password
else:
cred = self.tatlin_api.get_iscsi_cred()
res['auth_method'] = 'CHAP'
res['auth_username'] = cred['userid']
res['auth_password'] = cred['password']
target_luns = []
target_iqns = []
target_portals = []
LOG.debug('Port data: %s', port_inf)
for port in port_inf:
if port['port'] not in eth_ports.keys():
continue
ips = eth_ports[port['port']]
target_portals += ips
luns = [lun_id for _ in ips]
target_luns += luns
if 'running' in port:
target_iqns += port['wwn'] * len(port['running'])
else:
target_iqns += port['wwn']
if not target_portals or not target_iqns or not target_luns:
message = (_('Not enough connection data, '
'luns: %s, portals: %s, iqns: %s') %
target_luns, target_portals, target_iqns)
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
res['target_lun'] = target_luns[0]
res['target_luns'] = target_luns
res['target_iqn'] = target_iqns[0]
res['target_iqns'] = target_iqns
res['target_portal'] = target_portals[0]
res['target_portals'] = target_portals
LOG.debug("Volume data = %s", res)
return res
def find_current_host(self, wwn):
LOG.debug('Try to find host id for %s', wwn)
gr_id = self.tatlin_api.get_host_group_id(self._host_group)
group_info = self.tatlin_api.get_host_group_info(gr_id)
LOG.debug('Group info for %s is %s', self._host_group, group_info)
for host_id in group_info['host_ids']:
if wwn in self.tatlin_api.get_host_info(host_id)['initiators']:
LOG.debug('Found host %s for initiator %s', host_id, wwn)
return host_id
mess = _('Unable to find host for initiator %s' % wwn)
LOG.error(mess)
raise exception.VolumeBackendAPIException(message=mess)

View File

@ -0,0 +1,88 @@
# Copyright (C) 2021-2022 YADRO.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from cinder.utils import synchronized
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
class TatlinVolumeConnections:
"""Auxiliary class to keep current host volume connections counted
This class keeps connections of volumes to local host where this
Cinder instance runs. It prevents disconnection of devices and
termination of storage links in cases where two Cinder greenthreads
use the same volume (e.g. creation of new volumes from image cache)
or connection termination of Nova volume if Nova is collocated on
the same host (e.g. with snapshots while volumes are attached).
Once Tatlin implements clones and snaps this class should disappear.
"""
def __init__(self, path):
LOG.debug('Initialize counters for volume connections')
self.counters = path
self.create_store()
@synchronized('tatlin-connections-store', external=True)
def create_store(self):
if not os.path.isdir(self.counters):
os.mkdir(self.counters)
# We won't intersect with other backend processes
# because a volume belongs to one backend. Hence
# no external flag need.
@synchronized('tatlin-connections-store')
def increment(self, id):
counter = os.path.join(self.counters, id)
connections = 0
if os.path.exists(counter):
with open(counter, 'r') as c:
connections = int(c.read())
connections += 1
with open(counter, 'w') as c:
c.write(str(connections))
return connections
@volume_utils.trace
@synchronized('tatlin-connections-store')
def decrement(self, id):
counter = os.path.join(self.counters, id)
if not os.path.exists(counter):
return 0
with open(counter, 'r') as c:
connections = int(c.read())
if connections == 1:
os.remove(counter)
return 0
connections -= 1
with open(counter, 'w') as c:
c.write(str(connections))
return connections
@volume_utils.trace
@synchronized('tatlin-connections-store')
def get(self, id):
counter = os.path.join(self.counters, id)
if not os.path.exists(counter):
return 0
with open(counter, 'r') as c:
connections = int(c.read())
return connections

View File

@ -0,0 +1,125 @@
============================
YADRO Cinder Driver
============================
YADRO Cinder driver provides iSCSI support for
TATLIN.UNIFIED storages.
Supported Functions
~~~~~~~~~~~~~~~~~~~~
Basic Functions
---------------
* Create Volume
* Delete Volume
* Attach Volume
* Detach Volume
* Extend Volume
* Create Volume from Volume (clone)
* Create Image from Volume
* Volume Migration (host assisted)
Additional Functions
--------------------
* Extend an Attached Volume
* Thin Provisioning
* Manage/Unmanage Volume
* Image Cache
* Multiattach
* High Availability
Configuration
~~~~~~~~~~~~~
Set up TATLIN.UNIFIED storage
-----------------------------
You need to specify settings as described below for storage systems. For
details about each setting, see the user's guide of the storage system.
#. User account
Create a storage account belonging to the admin user group.
#. Pool
Create a storage pool that is used by the driver.
#. Ports
Setup data ETH ports you want to export volumes to.
#. Hosts
Create storage hosts and set ports of the initiators. One host must
correspond to one initiator.
#. Host Group
Create storage host group and add hosts created on the previous step
to the host group.
#. CHAP Authentication
Set up CHAP credentials for iSCSI storage hosts (if CHAP is used).
Set up YADRO Cinder Driver
------------------------------------
Add the following configuration to ``/etc/cinder/cinder.conf``:
.. code-block:: ini
[iscsi-1]
volume_driver=cinder.volume.drivers.yadro.tatlin_iscsi.TatlinISCSIVolumeDriver
san_ip=<management_ip>
san_login=<login>
san_password=<password>
tat_api_retry_count=<count>
api_port=<management_port>
pool_name=<cinder_volumes_pool>
export_ports=<port1>,<port2>
host_group=<name>
max_resource_count=<count>
auth_method=<CHAP|NONE>
chap_username=<chap_username>
chap_password=<chap_password>
``volume_driver``
Volume driver name.
``san_ip``
TATLIN.UNIFIED management IP address or FQDN.
``san_login``
TATLIN.UNIFIED user name.
``san_password``
TATLIN.UNIFIED user password.
``tat_api_retry_count``
Number of repeated requests to TATLIN.UNIFIED.
``api_port``
TATLIN.UNIFIED management port. Default: 443.
``pool_name``
TATLIN.UNIFIED name of pool for Cinder Volumes.
``export_ports``
Comma-separated data ports for volumes to be exported to.
``host_group``
TATLIN.UNIFIED host group name.
``max_resource_count``
Limit on the number of resources for TATLIN.UNIFIED. Default: 150
``auth_method`` (only iSCSI)
Authentication method:
* ``CHAP`` — use CHAP authentication (default)
``chap_username``, ``chap_password`` (if ``auth_method=CHAP``)
CHAP credentials to validate the initiator.

View File

@ -213,6 +213,9 @@ title=Windows iSCSI Driver
[driver.win_smb]
title=Windows SMB Driver
[driver.yadro]
title=Yadro Tatlin Unified Driver (iSCSI)
[driver.zadara]
title=Zadara Storage Driver (iSCSI, NFS)
@ -292,6 +295,7 @@ driver.vzstorage=missing
driver.vmware=complete
driver.win_iscsi=complete
driver.win_smb=complete
driver.yadro=complete
driver.zadara=complete
[operation.online_extend_support]
@ -365,6 +369,7 @@ driver.vzstorage=complete
driver.vmware=complete
driver.win_iscsi=complete
driver.win_smb=complete
driver.yadro=complete
driver.zadara=complete
[operation.qos]
@ -441,6 +446,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=complete
driver.zadara=missing
[operation.volume_replication]
@ -516,6 +522,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=missing
driver.zadara=missing
[operation.consistency_groups]
@ -592,6 +599,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=missing
driver.zadara=missing
[operation.thin_provisioning]
@ -667,6 +675,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=complete
driver.yadro=complete
driver.zadara=missing
[operation.volume_migration_storage_assisted]
@ -743,6 +752,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=missing
driver.zadara=missing
[operation.multi-attach]
@ -819,6 +829,7 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=complete
driver.zadara=complete
[operation.revert_to_snapshot_assisted]
@ -892,6 +903,7 @@ driver.vzstorage=missing
driver.vmware=complete
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=complete
driver.zadara=missing
[operation.active_active_ha]
@ -969,4 +981,5 @@ driver.vzstorage=missing
driver.vmware=missing
driver.win_iscsi=missing
driver.win_smb=missing
driver.yadro=complete
driver.zadara=missing

View File

@ -0,0 +1,4 @@
---
features:
- |
Yadro Tatlin Unified: Added initial version of the iSCSI driver.