Hitachi and OEM: Support multi pool
This patch support multi pool feature for Hitachi VSP driver and NEC V driver. Implements: blueprint hitachi-vsp-add-multi-pool Change-Id: I49ac061011293900b04a7a5b90ff5b840521993d
This commit is contained in:
parent
ad184ca698
commit
e9482b7f64
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -275,6 +275,10 @@ GET_POOLS_RESULT = {
|
||||
{
|
||||
"poolId": 30,
|
||||
"poolName": CONFIG_MAP['pool_name'],
|
||||
"availableVolumeCapacity": 480144,
|
||||
"totalPoolCapacity": 507780,
|
||||
"totalLocatedCapacity": 71453172,
|
||||
"virtualVolumeCapacityRate": -1,
|
||||
},
|
||||
],
|
||||
}
|
||||
@ -453,7 +457,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
self.configuration.driver_ssl_cert_verify = False
|
||||
|
||||
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
|
||||
self.configuration.hitachi_pool = "30"
|
||||
self.configuration.hitachi_pool = ["30"]
|
||||
self.configuration.hitachi_snap_pool = None
|
||||
self.configuration.hitachi_ldev_range = "0-1"
|
||||
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
|
||||
@ -654,7 +658,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
configuration=self.configuration)
|
||||
self._setup_config()
|
||||
tmp_pool = self.configuration.hitachi_pool
|
||||
self.configuration.hitachi_pool = CONFIG_MAP['pool_name']
|
||||
self.configuration.hitachi_pool = [CONFIG_MAP['pool_name']]
|
||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||
FakeResponse(200, GET_POOLS_RESULT),
|
||||
FakeResponse(200, GET_PORTS_RESULT),
|
||||
@ -673,6 +677,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
@ -686,6 +693,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
500, ERROR_RESULT,
|
||||
headers={'Content-Type': 'json'})
|
||||
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
self.assertRaises(exception.VolumeDriverException,
|
||||
self.driver.create_volume,
|
||||
fake_volume.fake_volume_obj(self.ctxt))
|
||||
@ -740,7 +750,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_get_volume_stats(
|
||||
self, request, get_filter_function, get_goodness_function):
|
||||
request.return_value = FakeResponse(200, GET_POOL_RESULT)
|
||||
request.return_value = FakeResponse(200, GET_POOLS_RESULT)
|
||||
get_filter_function.return_value = None
|
||||
get_goodness_function.return_value = None
|
||||
stats = self.driver.get_volume_stats(True)
|
||||
@ -757,6 +767,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(4, request.call_count)
|
||||
@ -793,6 +806,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
self.assertEqual(5, request.call_count)
|
||||
@ -804,6 +820,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_volume_from_snapshot(
|
||||
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
@ -1079,6 +1098,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
||||
@ -1095,6 +1117,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
||||
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
||||
@ -1137,6 +1162,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
@ -1160,6 +1188,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -51,6 +51,7 @@ CONFIG_MAP = {
|
||||
'target_iscsi_name': 'iqn.hbsd-test-target',
|
||||
'user_id': 'user',
|
||||
'user_pass': 'password',
|
||||
'pool_name': 'test_pool',
|
||||
'ipv4Address': '111.22.333.44',
|
||||
'tcpPort': '5555',
|
||||
'auth_user': 'auth_user',
|
||||
@ -213,10 +214,17 @@ GET_LDEV_RESULT_PAIR = {
|
||||
"status": "NML",
|
||||
}
|
||||
|
||||
GET_POOL_RESULT = {
|
||||
"availableVolumeCapacity": 480144,
|
||||
"totalPoolCapacity": 507780,
|
||||
"totalLocatedCapacity": 71453172,
|
||||
GET_POOLS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"poolId": 30,
|
||||
"poolName": CONFIG_MAP['pool_name'],
|
||||
"availableVolumeCapacity": 480144,
|
||||
"totalPoolCapacity": 507780,
|
||||
"totalLocatedCapacity": 71453172,
|
||||
"virtualVolumeCapacityRate": -1,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_SNAPSHOTS_RESULT = {
|
||||
@ -322,7 +330,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
self.configuration.driver_ssl_cert_verify = False
|
||||
|
||||
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
|
||||
self.configuration.hitachi_pool = "30"
|
||||
self.configuration.hitachi_pool = ['30']
|
||||
self.configuration.hitachi_snap_pool = None
|
||||
self.configuration.hitachi_ldev_range = "0-1"
|
||||
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
|
||||
@ -502,7 +510,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test__update_volume_stats(
|
||||
self, request, get_filter_function, get_goodness_function):
|
||||
request.return_value = FakeResponse(200, GET_POOL_RESULT)
|
||||
request.return_value = FakeResponse(200, GET_POOLS_RESULT)
|
||||
get_filter_function.return_value = None
|
||||
get_goodness_function.return_value = None
|
||||
self.driver._update_volume_stats()
|
||||
@ -515,6 +523,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
@ -535,6 +546,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(4, request.call_count)
|
||||
@ -555,6 +569,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
self.assertEqual(5, request.call_count)
|
||||
@ -566,6 +583,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_volume_from_snapshot(
|
||||
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
@ -812,6 +832,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
||||
@ -828,6 +851,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
||||
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
||||
@ -870,6 +896,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
@ -893,6 +922,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
|
1135
cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py
Normal file
1135
cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,987 @@
|
||||
# Copyright (C) 2021 NEC corporation
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""Unit tests for NEC Driver."""
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
import requests
|
||||
|
||||
from cinder import context as cinder_context
|
||||
from cinder import db
|
||||
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
||||
from cinder import exception
|
||||
from cinder.objects import group_snapshot as obj_group_snap
|
||||
from cinder.objects import snapshot as obj_snap
|
||||
from cinder.tests.unit import fake_group
|
||||
from cinder.tests.unit import fake_group_snapshot
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import test
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.hitachi import hbsd_common
|
||||
from cinder.volume.drivers.hitachi import hbsd_rest
|
||||
from cinder.volume.drivers.hitachi import hbsd_rest_api
|
||||
from cinder.volume.drivers.nec.v import nec_v_iscsi
|
||||
from cinder.volume import volume_types
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
# Configuration parameter values
|
||||
CONFIG_MAP = {
|
||||
'serial': '886000123456',
|
||||
'my_ip': '127.0.0.1',
|
||||
'rest_server_ip_addr': '172.16.18.108',
|
||||
'rest_server_ip_port': '23451',
|
||||
'port_id': 'CL1-A',
|
||||
'host_grp_name': 'NEC-127.0.0.1',
|
||||
'host_mode': 'LINUX/IRIX',
|
||||
'host_iscsi_name': 'iqn.nec-test-host',
|
||||
'target_iscsi_name': 'iqn.nec-test-target',
|
||||
'user_id': 'user',
|
||||
'user_pass': 'password',
|
||||
'ipv4Address': '111.22.333.44',
|
||||
'tcpPort': '5555',
|
||||
'pool_name': 'test_pool',
|
||||
'auth_user': 'auth_user',
|
||||
'auth_password': 'auth_password',
|
||||
}
|
||||
|
||||
DEFAULT_CONNECTOR = {
|
||||
'host': 'host',
|
||||
'ip': CONFIG_MAP['my_ip'],
|
||||
'initiator': CONFIG_MAP['host_iscsi_name'],
|
||||
'multipath': False,
|
||||
}
|
||||
|
||||
CTXT = cinder_context.get_admin_context()
|
||||
|
||||
TEST_VOLUME = []
|
||||
for i in range(4):
|
||||
volume = {}
|
||||
volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
|
||||
volume['name'] = 'test-volume{0:d}'.format(i)
|
||||
if i == 3:
|
||||
volume['provider_location'] = None
|
||||
else:
|
||||
volume['provider_location'] = '{0:d}'.format(i)
|
||||
volume['size'] = 128
|
||||
if i == 2:
|
||||
volume['status'] = 'in-use'
|
||||
else:
|
||||
volume['status'] = 'available'
|
||||
volume = fake_volume.fake_volume_obj(CTXT, **volume)
|
||||
TEST_VOLUME.append(volume)
|
||||
|
||||
|
||||
def _volume_get(context, volume_id):
|
||||
"""Return predefined volume info."""
|
||||
return TEST_VOLUME[int(volume_id.replace("-", ""))]
|
||||
|
||||
|
||||
TEST_SNAPSHOT = []
|
||||
snapshot = {}
|
||||
snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0)
|
||||
snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0)
|
||||
snapshot['provider_location'] = '{0:d}'.format(1)
|
||||
snapshot['status'] = 'available'
|
||||
snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0)
|
||||
snapshot['volume'] = _volume_get(None, snapshot['volume_id'])
|
||||
snapshot['volume_name'] = 'test-volume{0:d}'.format(0)
|
||||
snapshot['volume_size'] = 128
|
||||
snapshot = obj_snap.Snapshot._from_db_object(
|
||||
CTXT, obj_snap.Snapshot(),
|
||||
fake_snapshot.fake_db_snapshot(**snapshot))
|
||||
TEST_SNAPSHOT.append(snapshot)
|
||||
|
||||
TEST_GROUP = []
|
||||
for i in range(2):
|
||||
group = {}
|
||||
group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i)
|
||||
group['status'] = 'available'
|
||||
group = fake_group.fake_group_obj(CTXT, **group)
|
||||
TEST_GROUP.append(group)
|
||||
|
||||
TEST_GROUP_SNAP = []
|
||||
group_snapshot = {}
|
||||
group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0)
|
||||
group_snapshot['status'] = 'available'
|
||||
group_snapshot = obj_group_snap.GroupSnapshot._from_db_object(
|
||||
CTXT, obj_group_snap.GroupSnapshot(),
|
||||
fake_group_snapshot.fake_db_group_snapshot(**group_snapshot))
|
||||
TEST_GROUP_SNAP.append(group_snapshot)
|
||||
|
||||
# Dummy response for REST API
|
||||
POST_SESSIONS_RESULT = {
|
||||
"token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
|
||||
"sessionId": 0,
|
||||
}
|
||||
|
||||
GET_PORTS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"portId": CONFIG_MAP['port_id'],
|
||||
"portType": "ISCSI",
|
||||
"portAttributes": [
|
||||
"TAR",
|
||||
"MCU",
|
||||
"RCU",
|
||||
"ELUN"
|
||||
],
|
||||
"portSpeed": "AUT",
|
||||
"loopId": "00",
|
||||
"fabricMode": False,
|
||||
"lunSecuritySetting": True,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_PORT_RESULT = {
|
||||
"ipv4Address": CONFIG_MAP['ipv4Address'],
|
||||
"tcpPort": CONFIG_MAP['tcpPort'],
|
||||
}
|
||||
|
||||
GET_HOST_ISCSIS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"hostGroupNumber": 0,
|
||||
"iscsiName": CONFIG_MAP['host_iscsi_name'],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_HOST_GROUP_RESULT = {
|
||||
"hostGroupName": CONFIG_MAP['host_grp_name'],
|
||||
"iscsiName": CONFIG_MAP['target_iscsi_name'],
|
||||
}
|
||||
|
||||
GET_HOST_GROUPS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"hostGroupNumber": 0,
|
||||
"portId": CONFIG_MAP['port_id'],
|
||||
"hostGroupName": "NEC-test",
|
||||
"iscsiName": CONFIG_MAP['target_iscsi_name'],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
COMPLETED_SUCCEEDED_RESULT = {
|
||||
"status": "Completed",
|
||||
"state": "Succeeded",
|
||||
"affectedResources": ('a/b/c/1',),
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT = {
|
||||
"emulationType": "OPEN-V-CVS",
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "DP"],
|
||||
"status": "NML",
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
"emulationType": "OPEN-V-CVS",
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "DP"],
|
||||
"status": "NML",
|
||||
"ports": [
|
||||
{
|
||||
"portId": CONFIG_MAP['port_id'],
|
||||
"hostGroupNumber": 0,
|
||||
"hostGroupName": CONFIG_MAP['host_grp_name'],
|
||||
"lun": 1
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_PAIR = {
|
||||
"emulationType": "OPEN-V-CVS",
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "DP", "SS"],
|
||||
"status": "NML",
|
||||
}
|
||||
|
||||
GET_POOLS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"poolId": 30,
|
||||
"poolName": CONFIG_MAP['pool_name'],
|
||||
"availableVolumeCapacity": 480144,
|
||||
"totalPoolCapacity": 507780,
|
||||
"totalLocatedCapacity": 71453172,
|
||||
"virtualVolumeCapacityRate": -1,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_SNAPSHOTS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"primaryOrSecondary": "S-VOL",
|
||||
"status": "PSUS",
|
||||
"pvolLdevId": 0,
|
||||
"muNumber": 1,
|
||||
"svolLdevId": 1,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_SNAPSHOTS_RESULT_PAIR = {
|
||||
"data": [
|
||||
{
|
||||
"primaryOrSecondary": "S-VOL",
|
||||
"status": "PAIR",
|
||||
"pvolLdevId": 0,
|
||||
"muNumber": 1,
|
||||
"svolLdevId": 1,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
GET_LDEVS_RESULT = {
|
||||
"data": [
|
||||
{
|
||||
"ldevId": 0,
|
||||
"label": "15960cc738c94c5bb4f1365be5eeed44",
|
||||
},
|
||||
{
|
||||
"ldevId": 1,
|
||||
"label": "15960cc738c94c5bb4f1365be5eeed45",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
NOTFOUND_RESULT = {
|
||||
"data": [],
|
||||
}
|
||||
|
||||
|
||||
def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
|
||||
"""Return a predefined connector object."""
|
||||
return DEFAULT_CONNECTOR
|
||||
|
||||
|
||||
class FakeResponse():
|
||||
|
||||
def __init__(self, status_code, data=None, headers=None):
|
||||
self.status_code = status_code
|
||||
self.data = data
|
||||
self.text = data
|
||||
self.content = data
|
||||
self.headers = {'Content-Type': 'json'} if headers is None else headers
|
||||
|
||||
def json(self):
|
||||
return self.data
|
||||
|
||||
|
||||
class VStorageRESTISCSIDriverTest(test.TestCase):
|
||||
"""Unit test class for NEC REST interface iSCSI module."""
|
||||
|
||||
test_existing_ref = {'source-id': '1'}
|
||||
test_existing_ref_name = {
|
||||
'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'}
|
||||
|
||||
def setUp(self):
|
||||
"""Set up the test environment."""
|
||||
def _set_required(opts, required):
|
||||
for opt in opts:
|
||||
opt.required = required
|
||||
|
||||
# Initialize Cinder and avoid checking driver options.
|
||||
rest_required_opts = [
|
||||
opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required]
|
||||
common_required_opts = [
|
||||
opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required]
|
||||
_set_required(rest_required_opts, False)
|
||||
_set_required(common_required_opts, False)
|
||||
super(VStorageRESTISCSIDriverTest, self).setUp()
|
||||
_set_required(rest_required_opts, True)
|
||||
_set_required(common_required_opts, True)
|
||||
|
||||
self.configuration = mock.Mock(conf.Configuration)
|
||||
self.ctxt = cinder_context.get_admin_context()
|
||||
self._setup_config()
|
||||
self._setup_driver()
|
||||
|
||||
def _setup_config(self):
|
||||
"""Set configuration parameter values."""
|
||||
self.configuration.config_group = "REST"
|
||||
|
||||
self.configuration.volume_backend_name = "RESTISCSI"
|
||||
self.configuration.volume_driver = (
|
||||
"cinder.volume.drivers.nec.v.nec_v_iscsi.VStorageISCSIDriver")
|
||||
self.configuration.reserved_percentage = "0"
|
||||
self.configuration.use_multipath_for_image_xfer = False
|
||||
self.configuration.enforce_multipath_for_image_xfer = False
|
||||
self.configuration.max_over_subscription_ratio = 500.0
|
||||
self.configuration.driver_ssl_cert_verify = False
|
||||
|
||||
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
|
||||
self.configuration.nec_v_pool = ["30"]
|
||||
self.configuration.nec_v_snap_pool = None
|
||||
self.configuration.nec_v_ldev_range = "0-1"
|
||||
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
|
||||
self.configuration.nec_v_compute_target_ports = [
|
||||
CONFIG_MAP['port_id']]
|
||||
self.configuration.nec_v_group_create = True
|
||||
self.configuration.nec_v_group_delete = True
|
||||
self.configuration.nec_v_copy_speed = 3
|
||||
self.configuration.nec_v_copy_check_interval = 3
|
||||
self.configuration.nec_v_async_copy_check_interval = 10
|
||||
|
||||
self.configuration.san_login = CONFIG_MAP['user_id']
|
||||
self.configuration.san_password = CONFIG_MAP['user_pass']
|
||||
self.configuration.san_ip = CONFIG_MAP[
|
||||
'rest_server_ip_addr']
|
||||
self.configuration.san_api_port = CONFIG_MAP[
|
||||
'rest_server_ip_port']
|
||||
self.configuration.nec_v_rest_disable_io_wait = True
|
||||
self.configuration.nec_v_rest_tcp_keepalive = True
|
||||
self.configuration.nec_v_discard_zero_page = True
|
||||
self.configuration.nec_v_rest_number = "0"
|
||||
self.configuration.nec_v_lun_timeout = hbsd_rest._LUN_TIMEOUT
|
||||
self.configuration.nec_v_lun_retry_interval = (
|
||||
hbsd_rest._LUN_RETRY_INTERVAL)
|
||||
self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT
|
||||
self.configuration.nec_v_state_transition_timeout = (
|
||||
hbsd_rest._STATE_TRANSITION_TIMEOUT)
|
||||
self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT
|
||||
self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT
|
||||
self.configuration.nec_v_extend_timeout = (
|
||||
hbsd_rest_api._EXTEND_TIMEOUT)
|
||||
self.configuration.nec_v_exec_retry_interval = (
|
||||
hbsd_rest_api._EXEC_RETRY_INTERVAL)
|
||||
self.configuration.nec_v_rest_connect_timeout = (
|
||||
hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT)
|
||||
self.configuration.nec_v_rest_job_api_response_timeout = (
|
||||
hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT)
|
||||
self.configuration.nec_v_rest_get_api_response_timeout = (
|
||||
hbsd_rest_api._GET_API_RESPONSE_TIMEOUT)
|
||||
self.configuration.nec_v_rest_server_busy_timeout = (
|
||||
hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT)
|
||||
self.configuration.nec_v_rest_keep_session_loop_interval = (
|
||||
hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL)
|
||||
self.configuration.nec_v_rest_another_ldev_mapped_retry_timeout = (
|
||||
hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT)
|
||||
self.configuration.nec_v_rest_tcp_keepidle = (
|
||||
hbsd_rest_api._TCP_KEEPIDLE)
|
||||
self.configuration.nec_v_rest_tcp_keepintvl = (
|
||||
hbsd_rest_api._TCP_KEEPINTVL)
|
||||
self.configuration.nec_v_rest_tcp_keepcnt = (
|
||||
hbsd_rest_api._TCP_KEEPCNT)
|
||||
self.configuration.nec_v_host_mode_options = []
|
||||
|
||||
self.configuration.use_chap_auth = True
|
||||
self.configuration.chap_username = CONFIG_MAP['auth_user']
|
||||
self.configuration.chap_password = CONFIG_MAP['auth_password']
|
||||
|
||||
self.configuration.san_thin_provision = True
|
||||
self.configuration.san_private_key = ''
|
||||
self.configuration.san_clustername = ''
|
||||
self.configuration.san_ssh_port = '22'
|
||||
self.configuration.san_is_local = False
|
||||
self.configuration.ssh_conn_timeout = '30'
|
||||
self.configuration.ssh_min_pool_conn = '1'
|
||||
self.configuration.ssh_max_pool_conn = '5'
|
||||
|
||||
self.configuration.safe_get = self._fake_safe_get
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.my_ip = CONFIG_MAP['my_ip']
|
||||
|
||||
def _fake_safe_get(self, value):
|
||||
"""Retrieve a configuration value avoiding throwing an exception."""
|
||||
try:
|
||||
val = getattr(self.configuration, value)
|
||||
except AttributeError:
|
||||
val = None
|
||||
return val
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(
|
||||
volume_utils, 'brick_get_connector_properties',
|
||||
side_effect=_brick_get_connector_properties)
|
||||
def _setup_driver(
|
||||
self, brick_get_connector_properties=None, request=None):
|
||||
"""Set up the driver environment."""
|
||||
self.driver = nec_v_iscsi.VStorageISCSIDriver(
|
||||
configuration=self.configuration, db=db)
|
||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||
FakeResponse(200, GET_PORTS_RESULT),
|
||||
FakeResponse(200, GET_PORT_RESULT),
|
||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT)]
|
||||
self.driver.do_setup(None)
|
||||
self.driver.check_for_setup_error()
|
||||
self.driver.local_path(None)
|
||||
self.driver.create_export(None, None, None)
|
||||
self.driver.ensure_export(None, None)
|
||||
self.driver.remove_export(None, None)
|
||||
self.driver.create_export_snapshot(None, None, None)
|
||||
self.driver.remove_export_snapshot(None, None)
|
||||
# stop the Loopingcall within the do_setup treatment
|
||||
self.driver.common.client.keep_session_loop.stop()
|
||||
|
||||
def tearDown(self):
|
||||
self.client = None
|
||||
super(VStorageRESTISCSIDriverTest, self).tearDown()
|
||||
|
||||
# API test cases
|
||||
def test_driverinfo(self):
|
||||
drv = nec_v_iscsi.VStorageISCSIDriver(
|
||||
configuration=self.configuration, db=db)
|
||||
self.assertEqual(drv.common.driver_info['version'],
|
||||
"1.0.0")
|
||||
self.assertEqual(drv.common.driver_info['proto'],
|
||||
"iSCSI")
|
||||
self.assertEqual(drv.common.driver_info['hba_id'],
|
||||
"initiator")
|
||||
self.assertEqual(drv.common.driver_info['hba_id_type'],
|
||||
"iSCSI initiator IQN")
|
||||
self.assertEqual(drv.common.driver_info['msg_id']['target'].msg_id,
|
||||
309)
|
||||
self.assertEqual(drv.common.driver_info['volume_backend_name'],
|
||||
"NECiSCSI")
|
||||
self.assertEqual(drv.common.driver_info['volume_type'],
|
||||
"iscsi")
|
||||
self.assertEqual(drv.common.driver_info['param_prefix'],
|
||||
"nec_v")
|
||||
self.assertEqual(drv.common.driver_info['vendor_name'],
|
||||
"NEC")
|
||||
self.assertEqual(drv.common.driver_info['driver_prefix'],
|
||||
"NEC")
|
||||
self.assertEqual(drv.common.driver_info['driver_file_prefix'],
|
||||
"nec")
|
||||
self.assertEqual(drv.common.driver_info['target_prefix'],
|
||||
"NEC-")
|
||||
self.assertEqual(drv.common.driver_info['hdp_vol_attr'],
|
||||
"DP")
|
||||
self.assertEqual(drv.common.driver_info['hdt_vol_attr'],
|
||||
"DT")
|
||||
self.assertEqual(drv.common.driver_info['nvol_ldev_type'],
|
||||
"DP-VOL")
|
||||
self.assertEqual(drv.common.driver_info['target_iqn_suffix'],
|
||||
".nec-target")
|
||||
self.assertEqual(drv.common.driver_info['pair_attr'],
|
||||
"SS")
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(
|
||||
volume_utils, 'brick_get_connector_properties',
|
||||
side_effect=_brick_get_connector_properties)
|
||||
def test_do_setup(self, brick_get_connector_properties, request):
|
||||
drv = nec_v_iscsi.VStorageISCSIDriver(
|
||||
configuration=self.configuration, db=db)
|
||||
self._setup_config()
|
||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||
FakeResponse(200, GET_PORTS_RESULT),
|
||||
FakeResponse(200, GET_PORT_RESULT),
|
||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT)]
|
||||
drv.do_setup(None)
|
||||
self.assertEqual(
|
||||
{CONFIG_MAP['port_id']:
|
||||
'%(ip)s:%(port)s' % {
|
||||
'ip': CONFIG_MAP['ipv4Address'],
|
||||
'port': CONFIG_MAP['tcpPort']}},
|
||||
drv.common.storage_info['portals'])
|
||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||
self.assertEqual(5, request.call_count)
|
||||
# stop the Loopingcall within the do_setup treatment
|
||||
self.driver.common.client.keep_session_loop.stop()
|
||||
self.driver.common.client.keep_session_loop.wait()
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(
|
||||
volume_utils, 'brick_get_connector_properties',
|
||||
side_effect=_brick_get_connector_properties)
|
||||
def test_do_setup_create_hg(self, brick_get_connector_properties, request):
|
||||
"""Normal case: The host group not exists."""
|
||||
drv = nec_v_iscsi.VStorageISCSIDriver(
|
||||
configuration=self.configuration, db=db)
|
||||
self._setup_config()
|
||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||
FakeResponse(200, GET_PORTS_RESULT),
|
||||
FakeResponse(200, GET_PORT_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
drv.do_setup(None)
|
||||
self.assertEqual(
|
||||
{CONFIG_MAP['port_id']:
|
||||
'%(ip)s:%(port)s' % {
|
||||
'ip': CONFIG_MAP['ipv4Address'],
|
||||
'port': CONFIG_MAP['tcpPort']}},
|
||||
drv.common.storage_info['portals'])
|
||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||
self.assertEqual(8, request.call_count)
|
||||
# stop the Loopingcall within the do_setup treatment
|
||||
self.driver.common.client.keep_session_loop.stop()
|
||||
self.driver.common.client.keep_session_loop.wait()
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_extend_volume(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.extend_volume(TEST_VOLUME[0], 256)
|
||||
self.assertEqual(3, request.call_count)
|
||||
|
||||
@mock.patch.object(driver.ISCSIDriver, "get_goodness_function")
|
||||
@mock.patch.object(driver.ISCSIDriver, "get_filter_function")
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test__update_volume_stats(
|
||||
self, request, get_filter_function, get_goodness_function):
|
||||
request.return_value = FakeResponse(200, GET_POOLS_RESULT)
|
||||
get_filter_function.return_value = None
|
||||
get_goodness_function.return_value = None
|
||||
self.driver._update_volume_stats()
|
||||
self.assertEqual(
|
||||
'NEC', self.driver._stats['vendor_name'])
|
||||
self.assertTrue(self.driver._stats["pools"][0]['multiattach'])
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertEqual(1, get_filter_function.call_count)
|
||||
self.assertEqual(1, get_goodness_function.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_delete_volume(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.delete_volume(TEST_VOLUME[0])
|
||||
self.assertEqual(4, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||
def test_create_snapshot(self, volume_get, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(4, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_delete_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
|
||||
self.assertEqual(4, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_cloned_volume(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
self.assertEqual(5, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume_from_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
vol = self.driver.create_volume_from_snapshot(
|
||||
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||
self.assertEqual('1', vol['provider_location'])
|
||||
self.assertEqual(5, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_initialize_connection(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.initialize_connection(
|
||||
TEST_VOLUME[0], DEFAULT_CONNECTOR)
|
||||
self.assertEqual('iscsi', ret['driver_volume_type'])
|
||||
self.assertEqual(
|
||||
'%(ip)s:%(port)s' % {
|
||||
'ip': CONFIG_MAP['ipv4Address'],
|
||||
'port': CONFIG_MAP['tcpPort'],
|
||||
},
|
||||
ret['data']['target_portal'])
|
||||
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
|
||||
ret['data']['target_iqn'])
|
||||
self.assertEqual('CHAP', ret['data']['auth_method'])
|
||||
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
|
||||
self.assertEqual(
|
||||
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
|
||||
self.assertEqual(1, ret['data']['target_lun'])
|
||||
self.assertEqual(3, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_initialize_connection_shared_target(self, request):
|
||||
"""Normal case: A target shared with other systems."""
|
||||
request.side_effect = [FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUPS_RESULT),
|
||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.initialize_connection(
|
||||
TEST_VOLUME[0], DEFAULT_CONNECTOR)
|
||||
self.assertEqual('iscsi', ret['driver_volume_type'])
|
||||
self.assertEqual(
|
||||
'%(ip)s:%(port)s' % {
|
||||
'ip': CONFIG_MAP['ipv4Address'],
|
||||
'port': CONFIG_MAP['tcpPort'],
|
||||
},
|
||||
ret['data']['target_portal'])
|
||||
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
|
||||
ret['data']['target_iqn'])
|
||||
self.assertEqual('CHAP', ret['data']['auth_method'])
|
||||
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
|
||||
self.assertEqual(
|
||||
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
|
||||
self.assertEqual(1, ret['data']['target_lun'])
|
||||
self.assertEqual(4, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_terminate_connection(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR)
|
||||
self.assertEqual(6, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_terminate_connection_not_connector(self, request):
|
||||
"""Normal case: Connector is None."""
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUPS_RESULT),
|
||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.terminate_connection(TEST_VOLUME[2], None)
|
||||
self.assertEqual(9, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_initialize_connection_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.initialize_connection_snapshot(
|
||||
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
|
||||
self.assertEqual('iscsi', ret['driver_volume_type'])
|
||||
self.assertEqual(
|
||||
'%(ip)s:%(port)s' % {
|
||||
'ip': CONFIG_MAP['ipv4Address'],
|
||||
'port': CONFIG_MAP['tcpPort'],
|
||||
},
|
||||
ret['data']['target_portal'])
|
||||
self.assertEqual(CONFIG_MAP['target_iscsi_name'],
|
||||
ret['data']['target_iqn'])
|
||||
self.assertEqual('CHAP', ret['data']['auth_method'])
|
||||
self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username'])
|
||||
self.assertEqual(
|
||||
CONFIG_MAP['auth_password'], ret['data']['auth_password'])
|
||||
self.assertEqual(1, ret['data']['target_lun'])
|
||||
self.assertEqual(3, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_terminate_connection_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT_MAPPED),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.terminate_connection_snapshot(
|
||||
TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
|
||||
self.assertEqual(6, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_manage_existing(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.manage_existing(
|
||||
TEST_VOLUME[0], self.test_existing_ref)
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_manage_existing_name(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.manage_existing(
|
||||
TEST_VOLUME[0], self.test_existing_ref_name)
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(3, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_manage_existing_get_size(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
self.driver.manage_existing_get_size(
|
||||
TEST_VOLUME[0], self.test_existing_ref)
|
||||
self.assertEqual(1, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_manage_existing_get_size_name(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT)]
|
||||
self.driver.manage_existing_get_size(
|
||||
TEST_VOLUME[0], self.test_existing_ref_name)
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_unmanage(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT)]
|
||||
self.driver.unmanage(TEST_VOLUME[0])
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_copy_image_to_volume(self, request):
|
||||
image_service = 'fake_image_service'
|
||||
image_id = 'fake_image_id'
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \
|
||||
as mock_copy_image:
|
||||
self.driver.copy_image_to_volume(
|
||||
self.ctxt, TEST_VOLUME[0], image_service, image_id)
|
||||
mock_copy_image.assert_called_with(
|
||||
self.ctxt, TEST_VOLUME[0], image_service, image_id)
|
||||
self.assertEqual(1, request.call_count)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_update_migrated_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.update_migrated_volume,
|
||||
self.ctxt,
|
||||
TEST_VOLUME[0],
|
||||
TEST_VOLUME[1],
|
||||
"available")
|
||||
self.assertEqual(1, request.call_count)
|
||||
|
||||
def test_unmanage_snapshot(self):
|
||||
"""The driver don't support unmange_snapshot."""
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
new_specs = {'nec:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_revert_to_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.revert_to_snapshot(
|
||||
self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||
self.assertEqual(5, request.call_count)
|
||||
|
||||
def test_create_group(self):
|
||||
ret = self.driver.create_group(self.ctxt, TEST_GROUP[0])
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_delete_group(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.delete_group(
|
||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]])
|
||||
self.assertEqual(4, request.call_count)
|
||||
actual = (
|
||||
{'status': TEST_GROUP[0]['status']},
|
||||
[{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}]
|
||||
)
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_group_from_src_volume(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
||||
)
|
||||
self.assertEqual(5, request.call_count)
|
||||
actual = (
|
||||
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_group_from_src_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_from_src(
|
||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
||||
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
||||
)
|
||||
self.assertEqual(5, request.call_count)
|
||||
actual = (
|
||||
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
def test_create_group_from_src_volume_error(self):
|
||||
self.assertRaises(
|
||||
exception.VolumeDriverException, self.driver.create_group_from_src,
|
||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]]
|
||||
)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_update_group(self, is_group_a_cg_snapshot_type):
|
||||
is_group_a_cg_snapshot_type.return_value = False
|
||||
ret = self.driver.update_group(
|
||||
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]])
|
||||
self.assertTupleEqual((None, None, None), ret)
|
||||
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_update_group_error(self, is_group_a_cg_snapshot_type):
|
||||
is_group_a_cg_snapshot_type.return_value = True
|
||||
self.assertRaises(
|
||||
exception.VolumeDriverException, self.driver.update_group,
|
||||
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]],
|
||||
remove_volumes=[TEST_VOLUME[0]]
|
||||
)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_group_snapshot_non_cg(
|
||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
||||
is_group_a_cg_snapshot_type.return_value = False
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
self.assertEqual(4, request.call_count)
|
||||
actual = (
|
||||
{'status': 'available'},
|
||||
[{'id': TEST_SNAPSHOT[0]['id'],
|
||||
'provider_location': '1',
|
||||
'status': 'available'}]
|
||||
)
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_group_snapshot_cg(
|
||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
||||
is_group_a_cg_snapshot_type.return_value = True
|
||||
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||
)
|
||||
self.assertEqual(5, request.call_count)
|
||||
actual = (
|
||||
None,
|
||||
[{'id': TEST_SNAPSHOT[0]['id'],
|
||||
'provider_location': '1',
|
||||
'status': 'available'}]
|
||||
)
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_delete_group_snapshot(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
ret = self.driver.delete_group_snapshot(
|
||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
|
||||
self.assertEqual(10, request.call_count)
|
||||
actual = (
|
||||
{'status': TEST_GROUP_SNAP[0]['status']},
|
||||
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
||||
)
|
||||
self.assertTupleEqual(actual, ret)
|
@ -158,7 +158,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
|
||||
self.configuration.driver_ssl_cert_verify = False
|
||||
|
||||
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
|
||||
self.configuration.nec_v_pool = "30"
|
||||
self.configuration.nec_v_pool = ["30"]
|
||||
self.configuration.nec_v_snap_pool = None
|
||||
self.configuration.nec_v_ldev_range = "0-1"
|
||||
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
|
||||
@ -378,6 +378,9 @@ class VStorageRESTFCDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
@ -180,7 +180,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
|
||||
self.configuration.driver_ssl_cert_verify = False
|
||||
|
||||
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
|
||||
self.configuration.nec_v_pool = "30"
|
||||
self.configuration.nec_v_pool = ["30"]
|
||||
self.configuration.nec_v_snap_pool = None
|
||||
self.configuration.nec_v_ldev_range = "0-1"
|
||||
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
|
||||
@ -398,6 +398,9 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_create_volume(self, request):
|
||||
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||
self.driver.common._stats = {}
|
||||
self.driver.common._stats['pools'] = [
|
||||
{'location_info': {'pool_id': 30}}]
|
||||
ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
|
||||
self.assertEqual('1', ret['provider_location'])
|
||||
self.assertEqual(2, request.call_count)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -48,10 +48,10 @@ COMMON_VOLUME_OPTS = [
|
||||
'hitachi_storage_id',
|
||||
default=None,
|
||||
help='Product number of the storage system.'),
|
||||
cfg.StrOpt(
|
||||
cfg.ListOpt(
|
||||
'hitachi_pool',
|
||||
default=None,
|
||||
help='Pool number or pool name of the DP pool.'),
|
||||
default=[],
|
||||
help='Pool number[s] or pool name[s] of the DP pool.'),
|
||||
cfg.StrOpt(
|
||||
'hitachi_snap_pool',
|
||||
default=None,
|
||||
@ -165,7 +165,17 @@ class HBSDCommon():
|
||||
]
|
||||
self.port_index = {}
|
||||
|
||||
def create_ldev(self, size):
|
||||
def get_pool_id_of_volume(self, volume):
|
||||
pools = self._stats['pools']
|
||||
if len(pools) == 1:
|
||||
return pools[0]['location_info']['pool_id']
|
||||
pool_name = volume_utils.extract_host(volume['host'], 'pool')
|
||||
for pool in pools:
|
||||
if pool['pool_name'] == pool_name:
|
||||
return pool['location_info']['pool_id']
|
||||
return None
|
||||
|
||||
def create_ldev(self, size, pool_id):
|
||||
"""Create an LDEV and return its LDEV number."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -175,8 +185,9 @@ class HBSDCommon():
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume and return its properties."""
|
||||
pool_id = self.get_pool_id_of_volume(volume)
|
||||
try:
|
||||
ldev = self.create_ldev(volume['size'])
|
||||
ldev = self.create_ldev(volume['size'], pool_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.output_log(MSG.CREATE_LDEV_FAILED)
|
||||
@ -193,15 +204,16 @@ class HBSDCommon():
|
||||
"""Create a copy pair on the storage."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _copy_on_storage(self, pvol, size, is_snapshot=False):
|
||||
def _copy_on_storage(
|
||||
self, pvol, size, pool_id, is_snapshot=False):
|
||||
"""Create a copy of the specified LDEV on the storage."""
|
||||
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
||||
if ldev_info['status'] != 'NML':
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
||||
self.raise_error(msg)
|
||||
svol = self.create_ldev(size)
|
||||
svol = self.create_ldev(size, pool_id)
|
||||
try:
|
||||
self.create_pair_on_storage(pvol, svol, is_snapshot)
|
||||
self.create_pair_on_storage(pvol, svol, is_snapshot=is_snapshot)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
@ -221,7 +233,8 @@ class HBSDCommon():
|
||||
self.raise_error(msg)
|
||||
|
||||
size = volume['size']
|
||||
new_ldev = self._copy_on_storage(ldev, size)
|
||||
pool_id = self.get_pool_id_of_volume(volume)
|
||||
new_ldev = self._copy_on_storage(ldev, size, pool_id)
|
||||
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
||||
|
||||
return {
|
||||
@ -309,7 +322,9 @@ class HBSDCommon():
|
||||
type='volume', id=src_vref['id'])
|
||||
self.raise_error(msg)
|
||||
size = snapshot['volume_size']
|
||||
new_ldev = self._copy_on_storage(ldev, size, True)
|
||||
pool_id = self.get_pool_id_of_volume(snapshot['volume'])
|
||||
new_ldev = self._copy_on_storage(
|
||||
ldev, size, pool_id, is_snapshot=True)
|
||||
return {
|
||||
'provider_location': str(new_ldev),
|
||||
}
|
||||
@ -330,10 +345,51 @@ class HBSDCommon():
|
||||
else:
|
||||
raise ex
|
||||
|
||||
def get_pool_info(self):
|
||||
def get_pool_info(self, pool_id, result=None):
|
||||
"""Return the total and free capacity of the storage pool."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_pool_infos(self, pool_ids):
|
||||
"""Return the total and free capacity of the storage pools."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _create_single_pool_data(self, pool_id, pool_name, cap_data):
|
||||
location_info = {
|
||||
'storage_id': self.conf.hitachi_storage_id,
|
||||
'pool_id': pool_id,
|
||||
'snap_pool_id': self.storage_info['snap_pool_id'],
|
||||
'ldev_range': self.storage_info['ldev_range']}
|
||||
single_pool = {}
|
||||
single_pool.update(dict(
|
||||
pool_name=pool_name,
|
||||
reserved_percentage=self.conf.safe_get('reserved_percentage'),
|
||||
QoS_support=False,
|
||||
thick_provisioning_support=False,
|
||||
multiattach=True,
|
||||
consistencygroup_support=True,
|
||||
consistent_group_snapshot_enabled=True,
|
||||
location_info=location_info
|
||||
))
|
||||
if cap_data is None:
|
||||
single_pool.update(dict(
|
||||
provisioned_capacity_gb=0,
|
||||
backend_state='down'))
|
||||
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
|
||||
return single_pool
|
||||
total_capacity, free_capacity, provisioned_capacity = cap_data
|
||||
single_pool.update(dict(
|
||||
total_capacity_gb=total_capacity,
|
||||
free_capacity_gb=free_capacity,
|
||||
provisioned_capacity_gb=provisioned_capacity,
|
||||
max_over_subscription_ratio=(
|
||||
volume_utils.get_max_over_subscription_ratio(
|
||||
self.conf.safe_get('max_over_subscription_ratio'),
|
||||
True)),
|
||||
thin_provisioning_support=True
|
||||
))
|
||||
single_pool.update(dict(backend_state='up'))
|
||||
return single_pool
|
||||
|
||||
def update_volume_stats(self):
|
||||
"""Update properties, capabilities and current states of the driver."""
|
||||
data = {}
|
||||
@ -346,42 +402,15 @@ class HBSDCommon():
|
||||
'storage_protocol': self.storage_info['protocol'],
|
||||
'pools': [],
|
||||
}
|
||||
single_pool = {}
|
||||
single_pool.update(dict(
|
||||
pool_name=data['volume_backend_name'],
|
||||
reserved_percentage=self.conf.safe_get('reserved_percentage'),
|
||||
QoS_support=False,
|
||||
thick_provisioning_support=False,
|
||||
multiattach=True,
|
||||
consistencygroup_support=True,
|
||||
consistent_group_snapshot_enabled=True
|
||||
))
|
||||
try:
|
||||
(total_capacity, free_capacity,
|
||||
provisioned_capacity) = self.get_pool_info()
|
||||
except exception.VolumeDriverException:
|
||||
single_pool.update(dict(
|
||||
provisioned_capacity_gb=0,
|
||||
backend_state='down'))
|
||||
data["pools"].append(single_pool)
|
||||
LOG.debug("Updating volume status. (%s)", data)
|
||||
utils.output_log(
|
||||
MSG.POOL_INFO_RETRIEVAL_FAILED,
|
||||
pool=self.conf.hitachi_pool)
|
||||
return data
|
||||
single_pool.update(dict(
|
||||
total_capacity_gb=total_capacity,
|
||||
free_capacity_gb=free_capacity,
|
||||
provisioned_capacity_gb=provisioned_capacity,
|
||||
max_over_subscription_ratio=(
|
||||
volume_utils.get_max_over_subscription_ratio(
|
||||
self.conf.safe_get('max_over_subscription_ratio'),
|
||||
True)),
|
||||
thin_provisioning_support=True
|
||||
))
|
||||
single_pool.update(dict(backend_state='up'))
|
||||
data["pools"].append(single_pool)
|
||||
for pool_id, pool_name, cap_data in zip(
|
||||
self.storage_info['pool_id'], self.conf.hitachi_pool,
|
||||
self.get_pool_infos(self.storage_info['pool_id'])):
|
||||
single_pool = self._create_single_pool_data(
|
||||
pool_id, pool_name if len(self.conf.hitachi_pool) > 1 else
|
||||
data['volume_backend_name'], cap_data)
|
||||
data['pools'].append(single_pool)
|
||||
LOG.debug("Updating volume status. (%s)", data)
|
||||
self._stats = data
|
||||
return data
|
||||
|
||||
def discard_zero_page(self, volume):
|
||||
@ -532,6 +561,12 @@ class HBSDCommon():
|
||||
if not self.conf.safe_get(opt):
|
||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
|
||||
self.raise_error(msg)
|
||||
for pool in self.conf.hitachi_pool:
|
||||
if len(pool) == 0:
|
||||
msg = utils.output_log(
|
||||
MSG.INVALID_PARAMETER,
|
||||
param=self.driver_info['param_prefix'] + '_pool')
|
||||
self.raise_error(msg)
|
||||
if self.storage_info['protocol'] == 'FC':
|
||||
self.check_param_fc()
|
||||
if self.storage_info['protocol'] == 'iSCSI':
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -71,6 +71,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
||||
2.2.1 - Make the parameters name variable for supporting OEM storages.
|
||||
2.2.2 - Add Target Port Assignment.
|
||||
2.2.3 - Add port scheduler.
|
||||
2.3.0 - Support multi pool.
|
||||
|
||||
"""
|
||||
|
||||
@ -148,9 +149,10 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
||||
"""Return properties, capabilities and current states of the driver."""
|
||||
data = self.common.update_volume_stats()
|
||||
if 'pools' in data:
|
||||
data["pools"][0]["filter_function"] = self.get_filter_function()
|
||||
data["pools"][0]["goodness_function"] = (
|
||||
self.get_goodness_function())
|
||||
for pool in data['pools']:
|
||||
pool["filter_function"] = self.get_filter_function()
|
||||
pool["goodness_function"] = (
|
||||
self.get_goodness_function())
|
||||
self._stats = data
|
||||
|
||||
@volume_utils.trace
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -71,6 +71,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
||||
2.2.1 - Make the parameters name variable for supporting OEM storages.
|
||||
2.2.2 - Add Target Port Assignment.
|
||||
2.2.3 - Add port scheduler.
|
||||
2.3.0 - Support multi pool.
|
||||
|
||||
"""
|
||||
|
||||
@ -144,9 +145,10 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
||||
"""Return properties, capabilities and current states of the driver."""
|
||||
data = self.common.update_volume_stats()
|
||||
if 'pools' in data:
|
||||
data["pools"][0]["filter_function"] = self.get_filter_function()
|
||||
data["pools"][0]["goodness_function"] = (
|
||||
self.get_goodness_function())
|
||||
for pool in data['pools']:
|
||||
pool["filter_function"] = self.get_filter_function()
|
||||
pool["goodness_function"] = (
|
||||
self.get_goodness_function())
|
||||
self._stats = data
|
||||
|
||||
@volume_utils.trace
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -274,11 +274,11 @@ class HBSDREST(common.HBSDCommon):
|
||||
if self.client is not None:
|
||||
self.client.enter_keep_session()
|
||||
|
||||
def _create_ldev_on_storage(self, size):
|
||||
def _create_ldev_on_storage(self, size, pool_id):
|
||||
"""Create an LDEV on the storage system."""
|
||||
body = {
|
||||
'byteFormatCapacity': '%sG' % size,
|
||||
'poolId': self.storage_info['pool_id'],
|
||||
'poolId': pool_id,
|
||||
'isParallelExecutionEnabled': True,
|
||||
}
|
||||
if self.storage_info['ldev_range']:
|
||||
@ -287,9 +287,9 @@ class HBSDREST(common.HBSDCommon):
|
||||
body['endLdevId'] = max_ldev
|
||||
return self.client.add_ldev(body, no_log=True)
|
||||
|
||||
def create_ldev(self, size):
|
||||
def create_ldev(self, size, pool_id):
|
||||
"""Create an LDEV of the specified size and the specified type."""
|
||||
ldev = self._create_ldev_on_storage(size)
|
||||
ldev = self._create_ldev_on_storage(size, pool_id=pool_id)
|
||||
LOG.debug('Created logical device. (LDEV: %s)', ldev)
|
||||
return ldev
|
||||
|
||||
@ -308,6 +308,12 @@ class HBSDREST(common.HBSDCommon):
|
||||
ldev,
|
||||
timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev}))
|
||||
|
||||
def _get_snap_pool_id(self, pvol):
|
||||
return (
|
||||
self.storage_info['snap_pool_id']
|
||||
if self.storage_info['snap_pool_id'] is not None
|
||||
else self.get_ldev_info(['poolId'], pvol)['poolId'])
|
||||
|
||||
def _get_copy_pair_status(self, ldev):
|
||||
"""Return the status of the volume in a copy pair."""
|
||||
params_s = {"svolLdevId": ldev}
|
||||
@ -353,7 +359,7 @@ class HBSDREST(common.HBSDCommon):
|
||||
}
|
||||
try:
|
||||
body = {"snapshotGroupName": snapshot_name,
|
||||
"snapshotPoolId": self.storage_info['snap_pool_id'],
|
||||
"snapshotPoolId": self._get_snap_pool_id(pvol),
|
||||
"pvolLdevId": pvol,
|
||||
"svolLdevId": svol,
|
||||
"autoSplit": True,
|
||||
@ -394,7 +400,7 @@ class HBSDREST(common.HBSDCommon):
|
||||
else:
|
||||
pace = 'faster'
|
||||
body = {"snapshotGroupName": snapshot_name,
|
||||
"snapshotPoolId": self.storage_info['snap_pool_id'],
|
||||
"snapshotPoolId": self._get_snap_pool_id(pvol),
|
||||
"pvolLdevId": pvol,
|
||||
"svolLdevId": svol,
|
||||
"isClone": True,
|
||||
@ -697,23 +703,41 @@ class HBSDREST(common.HBSDCommon):
|
||||
'%sG' % (new_size - old_size)}}
|
||||
self.client.extend_ldev(ldev, body)
|
||||
|
||||
def get_pool_info(self):
|
||||
def get_pool_info(self, pool_id, result=None):
|
||||
"""Return the total and free capacity of the storage pool."""
|
||||
result = self.client.get_pool(
|
||||
self.storage_info['pool_id'],
|
||||
ignore_message_id=[rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
|
||||
if result is None:
|
||||
result = self.client.get_pool(
|
||||
pool_id, ignore_message_id=[
|
||||
rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
|
||||
|
||||
if 'errorSource' in result:
|
||||
msg = utils.output_log(MSG.POOL_NOT_FOUND,
|
||||
pool=self.storage_info['pool_id'])
|
||||
self.raise_error(msg)
|
||||
|
||||
tp_cap = result['totalPoolCapacity'] / units.Ki
|
||||
ta_cap = result['availableVolumeCapacity'] / units.Ki
|
||||
tl_cap = result['totalLocatedCapacity'] / units.Ki
|
||||
if 'errorSource' in result:
|
||||
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
||||
self.raise_error(msg)
|
||||
|
||||
tp_cap = result['totalPoolCapacity'] // units.Ki
|
||||
ta_cap = result['availableVolumeCapacity'] // units.Ki
|
||||
tl_cap = result['totalLocatedCapacity'] // units.Ki
|
||||
return tp_cap, ta_cap, tl_cap
|
||||
|
||||
def get_pool_infos(self, pool_ids):
|
||||
"""Return the total and free capacity of the storage pools."""
|
||||
result = []
|
||||
try:
|
||||
result = self.client.get_pools()
|
||||
except exception.VolumeDriverException:
|
||||
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all')
|
||||
pool_infos = []
|
||||
for pool_id in pool_ids:
|
||||
for pool_data in result:
|
||||
if pool_data['poolId'] == pool_id:
|
||||
cap_data = self.get_pool_info(pool_id, pool_data)
|
||||
break
|
||||
else:
|
||||
utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
||||
cap_data = None
|
||||
pool_infos.append(cap_data)
|
||||
return pool_infos
|
||||
|
||||
def discard_zero_page(self, volume):
|
||||
"""Return the volume's no-data pages to the storage pool."""
|
||||
if self.conf.hitachi_discard_zero_page:
|
||||
@ -805,40 +829,34 @@ class HBSDREST(common.HBSDCommon):
|
||||
_check_ldev_size(ldev_info, ldev, existing_ref)
|
||||
return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE
|
||||
|
||||
def _get_pool_id(self, name):
|
||||
def _get_pool_id(self, pool_list, pool_name_or_id):
|
||||
"""Get the pool id from specified name."""
|
||||
pool_list = self.client.get_pools()
|
||||
for pool_data in pool_list:
|
||||
if pool_data['poolName'] == name:
|
||||
if pool_name_or_id.isdigit():
|
||||
return int(pool_name_or_id)
|
||||
if pool_list['pool_list'] is None:
|
||||
pool_list['pool_list'] = self.client.get_pools()
|
||||
for pool_data in pool_list['pool_list']:
|
||||
if pool_data['poolName'] == pool_name_or_id:
|
||||
return pool_data['poolId']
|
||||
return None
|
||||
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id)
|
||||
self.raise_error(msg)
|
||||
|
||||
def check_pool_id(self):
|
||||
"""Check the pool id of hitachi_pool and hitachi_snap_pool."""
|
||||
pool = self.conf.hitachi_pool
|
||||
if pool is not None:
|
||||
if pool.isdigit():
|
||||
self.storage_info['pool_id'] = int(pool)
|
||||
else:
|
||||
self.storage_info['pool_id'] = self._get_pool_id(pool)
|
||||
if self.storage_info['pool_id'] is None:
|
||||
msg = utils.output_log(
|
||||
MSG.POOL_NOT_FOUND, pool=self.conf.hitachi_pool)
|
||||
self.raise_error(msg)
|
||||
pool_id_list = []
|
||||
pool_list = {'pool_list': None}
|
||||
|
||||
for pool in self.conf.hitachi_pool:
|
||||
pool_id_list.append(self._get_pool_id(pool_list, pool))
|
||||
|
||||
snap_pool = self.conf.hitachi_snap_pool
|
||||
if snap_pool is not None:
|
||||
if snap_pool.isdigit():
|
||||
self.storage_info['snap_pool_id'] = int(snap_pool)
|
||||
else:
|
||||
self.storage_info['snap_pool_id'] = (
|
||||
self._get_pool_id(snap_pool))
|
||||
if self.storage_info['snap_pool_id'] is None:
|
||||
msg = utils.output_log(MSG.POOL_NOT_FOUND,
|
||||
pool=self.conf.hitachi_snap_pool)
|
||||
self.raise_error(msg)
|
||||
else:
|
||||
self.storage_info['snap_pool_id'] = self.storage_info['pool_id']
|
||||
self.storage_info['snap_pool_id'] = self._get_pool_id(
|
||||
pool_list, snap_pool)
|
||||
elif len(pool_id_list) == 1:
|
||||
self.storage_info['snap_pool_id'] = pool_id_list[0]
|
||||
|
||||
self.storage_info['pool_id'] = pool_id_list
|
||||
|
||||
def _to_hostgroup(self, port, gid):
|
||||
"""Get a host group name from host group ID."""
|
||||
@ -1070,8 +1088,8 @@ class HBSDREST(common.HBSDCommon):
|
||||
for pair in pairs:
|
||||
try:
|
||||
body = {"snapshotGroupName": snapshotgroup_name,
|
||||
"snapshotPoolId":
|
||||
self.storage_info['snap_pool_id'],
|
||||
"snapshotPoolId": self._get_snap_pool_id(
|
||||
pair['pvol']),
|
||||
"pvolLdevId": pair['pvol'],
|
||||
"svolLdevId": pair['svol'],
|
||||
"isConsistencyGroup": True,
|
||||
@ -1117,7 +1135,8 @@ class HBSDREST(common.HBSDCommon):
|
||||
type='volume', id=snapshot.volume_id)
|
||||
self.raise_error(msg)
|
||||
size = snapshot.volume_size
|
||||
pair['svol'] = self.create_ldev(size)
|
||||
pool_id = self.get_pool_id_of_volume(snapshot.volume)
|
||||
pair['svol'] = self.create_ldev(size, pool_id)
|
||||
except Exception as exc:
|
||||
pair['msg'] = utils.get_exception_msg(exc)
|
||||
raise loopingcall.LoopingCallDone(pair)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -25,7 +25,7 @@ from oslo_utils import units
|
||||
|
||||
from cinder import exception
|
||||
|
||||
VERSION = '2.2.3'
|
||||
VERSION = '2.3.0'
|
||||
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
||||
PARAM_PREFIX = 'hitachi'
|
||||
VENDOR_NAME = 'Hitachi'
|
||||
|
@ -27,10 +27,10 @@ COMMON_VOLUME_OPTS = [
|
||||
'nec_v_storage_id',
|
||||
default=None,
|
||||
help='Product number of the storage system.'),
|
||||
cfg.StrOpt(
|
||||
cfg.ListOpt(
|
||||
'nec_v_pool',
|
||||
default=None,
|
||||
help='Pool number or pool name of the DP pool.'),
|
||||
default=[],
|
||||
help='Pool number[s] or pool name[s] of the DP pool.'),
|
||||
cfg.StrOpt(
|
||||
'nec_v_snap_pool',
|
||||
default=None,
|
||||
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Supported multi-pool for Hitachi driver and OEM storage driver.
|
||||
|
Loading…
Reference in New Issue
Block a user