Hitachi: Add generic volume groups

This patch adds consistency group capability as generic volume groups
for the Hitachi driver.

DocImpact
Implements: blueprint hitachi-vsp-add-consistency-groups

Change-Id: I101d6899c8e7d4911c64cded2c10da68f5bceed2
This commit is contained in:
Kazumasa Nomura 2021-09-07 10:05:35 +00:00
parent dd00352c35
commit 36e0087577
13 changed files with 805 additions and 16 deletions

View File

@ -24,11 +24,13 @@ from requests import models
from cinder import context as cinder_context from cinder import context as cinder_context
from cinder import db from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder.objects import group_snapshot as obj_group_snap
from cinder.objects import snapshot as obj_snap from cinder.objects import snapshot as obj_snap
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_group_snapshot
from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume from cinder.tests.unit import fake_volume
from cinder.tests.unit import test from cinder.tests.unit import test
from cinder import utils
from cinder.volume import configuration as conf from cinder.volume import configuration as conf
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_common
@ -37,6 +39,7 @@ from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hitachi import hbsd_rest_api
from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume.drivers.hitachi import hbsd_utils
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils
from cinder.zonemanager import utils as fczm_utils from cinder.zonemanager import utils as fczm_utils
# Configuration parameter values # Configuration parameter values
@ -73,11 +76,14 @@ DEFAULT_CONNECTOR = {
CTXT = cinder_context.get_admin_context() CTXT = cinder_context.get_admin_context()
TEST_VOLUME = [] TEST_VOLUME = []
for i in range(3): for i in range(4):
volume = {} volume = {}
volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
volume['name'] = 'test-volume{0:d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i)
volume['provider_location'] = '{0:d}'.format(i) if i == 3:
volume['provider_location'] = None
else:
volume['provider_location'] = '{0:d}'.format(i)
volume['size'] = 128 volume['size'] = 128
if i == 2: if i == 2:
volume['status'] = 'in-use' volume['status'] = 'in-use'
@ -107,6 +113,23 @@ snapshot = obj_snap.Snapshot._from_db_object(
fake_snapshot.fake_db_snapshot(**snapshot)) fake_snapshot.fake_db_snapshot(**snapshot))
TEST_SNAPSHOT.append(snapshot) TEST_SNAPSHOT.append(snapshot)
TEST_GROUP = []
for i in range(2):
group = {}
group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i)
group['status'] = 'available'
group = fake_group.fake_group_obj(CTXT, **group)
TEST_GROUP.append(group)
TEST_GROUP_SNAP = []
group_snapshot = {}
group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0)
group_snapshot['status'] = 'available'
group_snapshot = obj_group_snap.GroupSnapshot._from_db_object(
CTXT, obj_group_snap.GroupSnapshot(),
fake_group_snapshot.fake_db_group_snapshot(**group_snapshot))
TEST_GROUP_SNAP.append(group_snapshot)
# Dummy response for REST API # Dummy response for REST API
POST_SESSIONS_RESULT = { POST_SESSIONS_RESULT = {
"token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
@ -205,6 +228,18 @@ GET_SNAPSHOTS_RESULT = {
], ],
} }
GET_SNAPSHOTS_RESULT_PAIR = {
"data": [
{
"primaryOrSecondary": "S-VOL",
"status": "PAIR",
"pvolLdevId": 0,
"muNumber": 1,
"svolLdevId": 1,
},
],
}
GET_SNAPSHOTS_RESULT_BUSY = { GET_SNAPSHOTS_RESULT_BUSY = {
"data": [ "data": [
{ {
@ -403,6 +438,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
'rest_server_ip_port'] 'rest_server_ip_port']
self.configuration.hitachi_rest_tcp_keepalive = True self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True self.configuration.hitachi_discard_zero_page = True
self.configuration.hitachi_rest_number = "0"
self.configuration.hitachi_zoning_request = False self.configuration.hitachi_zoning_request = False
@ -434,7 +470,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def _setup_driver( def _setup_driver(
self, brick_get_connector_properties=None, request=None): self, brick_get_connector_properties=None, request=None):
@ -462,7 +498,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
# API test cases # API test cases
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def test_do_setup(self, brick_get_connector_properties, request): def test_do_setup(self, brick_get_connector_properties, request):
drv = hbsd_fc.HBSDFCDriver( drv = hbsd_fc.HBSDFCDriver(
@ -483,7 +519,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def test_do_setup_create_hg(self, brick_get_connector_properties, request): def test_do_setup_create_hg(self, brick_get_connector_properties, request):
"""Normal case: The host group not exists.""" """Normal case: The host group not exists."""
@ -510,7 +546,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def test_do_setup_pool_name(self, brick_get_connector_properties, request): def test_do_setup_pool_name(self, brick_get_connector_properties, request):
"""Normal case: Specify a pool name instead of pool id""" """Normal case: Specify a pool name instead of pool id"""
@ -896,3 +932,143 @@ class HBSDRESTFCDriverTest(test.TestCase):
req = models.Response() req = models.Response()
ret = session.__call__(req) ret = session.__call__(req)
self.assertEqual('Session token', ret.headers['Authorization']) self.assertEqual('Session token', ret.headers['Authorization'])
def test_create_group(self):
ret = self.driver.create_group(self.ctxt, TEST_GROUP[0])
self.assertIsNone(ret)
@mock.patch.object(requests.Session, "request")
def test_delete_group(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group(
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]])
self.assertEqual(4, request.call_count)
actual = (
{'status': TEST_GROUP[0]['status']},
[{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_create_group_from_src_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.create_group_from_src(
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_create_group_from_src_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.create_group_from_src(
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
def test_create_group_from_src_volume_error(self):
self.assertRaises(
hbsd_utils.HBSDError, self.driver.create_group_from_src,
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]]
)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_update_group(self, is_group_a_cg_snapshot_type):
is_group_a_cg_snapshot_type.return_value = False
ret = self.driver.update_group(
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]])
self.assertTupleEqual((None, None, None), ret)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_update_group_error(self, is_group_a_cg_snapshot_type):
is_group_a_cg_snapshot_type.return_value = True
self.assertRaises(
hbsd_utils.HBSDError, self.driver.update_group,
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]],
remove_volumes=[TEST_VOLUME[0]]
)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_non_cg(
self, is_group_a_cg_snapshot_type, volume_get, request):
is_group_a_cg_snapshot_type.return_value = False
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
self.assertEqual(4, request.call_count)
actual = (
{'status': 'available'},
[{'id': TEST_SNAPSHOT[0]['id'],
'provider_location': '1',
'status': 'available'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_cg(
self, is_group_a_cg_snapshot_type, volume_get, request):
is_group_a_cg_snapshot_type.return_value = True
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None,
[{'id': TEST_SNAPSHOT[0]['id'],
'provider_location': '1',
'status': 'available'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_delete_group_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
self.assertEqual(10, request.call_count)
actual = (
{'status': TEST_GROUP_SNAP[0]['status']},
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
)
self.assertTupleEqual(actual, ret)

View File

@ -22,17 +22,21 @@ import requests
from cinder import context as cinder_context from cinder import context as cinder_context
from cinder import db from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder.objects import group_snapshot as obj_group_snap
from cinder.objects import snapshot as obj_snap from cinder.objects import snapshot as obj_snap
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_group_snapshot
from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume from cinder.tests.unit import fake_volume
from cinder.tests.unit import test from cinder.tests.unit import test
from cinder import utils
from cinder.volume import configuration as conf from cinder.volume import configuration as conf
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi from cinder.volume.drivers.hitachi import hbsd_iscsi
from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume.drivers.hitachi import hbsd_utils
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils
# Configuration parameter values # Configuration parameter values
CONFIG_MAP = { CONFIG_MAP = {
@ -63,11 +67,14 @@ DEFAULT_CONNECTOR = {
CTXT = cinder_context.get_admin_context() CTXT = cinder_context.get_admin_context()
TEST_VOLUME = [] TEST_VOLUME = []
for i in range(3): for i in range(4):
volume = {} volume = {}
volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
volume['name'] = 'test-volume{0:d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i)
volume['provider_location'] = '{0:d}'.format(i) if i == 3:
volume['provider_location'] = None
else:
volume['provider_location'] = '{0:d}'.format(i)
volume['size'] = 128 volume['size'] = 128
if i == 2: if i == 2:
volume['status'] = 'in-use' volume['status'] = 'in-use'
@ -97,6 +104,23 @@ snapshot = obj_snap.Snapshot._from_db_object(
fake_snapshot.fake_db_snapshot(**snapshot)) fake_snapshot.fake_db_snapshot(**snapshot))
TEST_SNAPSHOT.append(snapshot) TEST_SNAPSHOT.append(snapshot)
TEST_GROUP = []
for i in range(2):
group = {}
group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i)
group['status'] = 'available'
group = fake_group.fake_group_obj(CTXT, **group)
TEST_GROUP.append(group)
TEST_GROUP_SNAP = []
group_snapshot = {}
group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0)
group_snapshot['status'] = 'available'
group_snapshot = obj_group_snap.GroupSnapshot._from_db_object(
CTXT, obj_group_snap.GroupSnapshot(),
fake_group_snapshot.fake_db_group_snapshot(**group_snapshot))
TEST_GROUP_SNAP.append(group_snapshot)
# Dummy response for REST API # Dummy response for REST API
POST_SESSIONS_RESULT = { POST_SESSIONS_RESULT = {
"token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
@ -205,6 +229,18 @@ GET_SNAPSHOTS_RESULT = {
], ],
} }
GET_SNAPSHOTS_RESULT_PAIR = {
"data": [
{
"primaryOrSecondary": "S-VOL",
"status": "PAIR",
"pvolLdevId": 0,
"muNumber": 1,
"svolLdevId": 1,
},
],
}
GET_LDEVS_RESULT = { GET_LDEVS_RESULT = {
"data": [ "data": [
{ {
@ -301,6 +337,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
'rest_server_ip_port'] 'rest_server_ip_port']
self.configuration.hitachi_rest_tcp_keepalive = True self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True self.configuration.hitachi_discard_zero_page = True
self.configuration.hitachi_rest_number = "0"
self.configuration.use_chap_auth = True self.configuration.use_chap_auth = True
self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_username = CONFIG_MAP['auth_user']
@ -330,7 +367,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def _setup_driver( def _setup_driver(
self, brick_get_connector_properties=None, request=None): self, brick_get_connector_properties=None, request=None):
@ -360,7 +397,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
# API test cases # API test cases
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def test_do_setup(self, brick_get_connector_properties, request): def test_do_setup(self, brick_get_connector_properties, request):
drv = hbsd_iscsi.HBSDISCSIDriver( drv = hbsd_iscsi.HBSDISCSIDriver(
@ -386,7 +423,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request") @mock.patch.object(requests.Session, "request")
@mock.patch.object( @mock.patch.object(
utils, 'brick_get_connector_properties', volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties) side_effect=_brick_get_connector_properties)
def test_do_setup_create_hg(self, brick_get_connector_properties, request): def test_do_setup_create_hg(self, brick_get_connector_properties, request):
"""Normal case: The host group not exists.""" """Normal case: The host group not exists."""
@ -700,3 +737,143 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.driver.revert_to_snapshot( self.driver.revert_to_snapshot(
self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual(5, request.call_count) self.assertEqual(5, request.call_count)
def test_create_group(self):
ret = self.driver.create_group(self.ctxt, TEST_GROUP[0])
self.assertIsNone(ret)
@mock.patch.object(requests.Session, "request")
def test_delete_group(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group(
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]])
self.assertEqual(4, request.call_count)
actual = (
{'status': TEST_GROUP[0]['status']},
[{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_create_group_from_src_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.create_group_from_src(
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_create_group_from_src_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.create_group_from_src(
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
def test_create_group_from_src_volume_error(self):
self.assertRaises(
hbsd_utils.HBSDError, self.driver.create_group_from_src,
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]]
)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_update_group(self, is_group_a_cg_snapshot_type):
is_group_a_cg_snapshot_type.return_value = False
ret = self.driver.update_group(
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]])
self.assertTupleEqual((None, None, None), ret)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_update_group_error(self, is_group_a_cg_snapshot_type):
is_group_a_cg_snapshot_type.return_value = True
self.assertRaises(
hbsd_utils.HBSDError, self.driver.update_group,
self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]],
remove_volumes=[TEST_VOLUME[0]]
)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_non_cg(
self, is_group_a_cg_snapshot_type, volume_get, request):
is_group_a_cg_snapshot_type.return_value = False
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
self.assertEqual(4, request.call_count)
actual = (
{'status': 'available'},
[{'id': TEST_SNAPSHOT[0]['id'],
'provider_location': '1',
'status': 'available'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_cg(
self, is_group_a_cg_snapshot_type, volume_get, request):
is_group_a_cg_snapshot_type.return_value = True
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
self.assertEqual(5, request.call_count)
actual = (
None,
[{'id': TEST_SNAPSHOT[0]['id'],
'provider_location': '1',
'status': 'available'}]
)
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
def test_delete_group_snapshot(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
self.assertEqual(10, request.call_count)
actual = (
{'status': TEST_GROUP_SNAP[0]['status']},
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
)
self.assertTupleEqual(actual, ret)

View File

@ -26,7 +26,7 @@ from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume import volume_utils from cinder.volume import volume_utils
VERSION = '2.0.0' VERSION = '2.1.0'
_STR_VOLUME = 'volume' _STR_VOLUME = 'volume'
_STR_SNAPSHOT = 'snapshot' _STR_SNAPSHOT = 'snapshot'
@ -318,7 +318,9 @@ class HBSDCommon():
reserved_percentage=self.conf.safe_get('reserved_percentage'), reserved_percentage=self.conf.safe_get('reserved_percentage'),
QoS_support=False, QoS_support=False,
thick_provisioning_support=False, thick_provisioning_support=False,
multiattach=True multiattach=True,
consistencygroup_support=True,
consistent_group_snapshot_enabled=True
)) ))
try: try:
(total_capacity, free_capacity, (total_capacity, free_capacity,
@ -779,3 +781,22 @@ class HBSDCommon():
self.restore_ldev(pvol, svol) self.restore_ldev(pvol, svol)
else: else:
raise NotImplementedError() raise NotImplementedError()
def create_group(self):
raise NotImplementedError()
def delete_group(self, group, volumes):
raise NotImplementedError()
def create_group_from_src(
self, context, group, volumes, snapshots=None, source_vols=None):
raise NotImplementedError()
def update_group(self, group, add_volumes=None):
raise NotImplementedError()
def create_group_snapshot(self, context, group_snapshot, snapshots):
raise NotImplementedError()
def delete_group_snapshot(self, group_snapshot, snapshots):
raise NotImplementedError()

View File

@ -15,6 +15,7 @@
"""Fibre channel module for Hitachi HBSD Driver.""" """Fibre channel module for Hitachi HBSD Driver."""
from oslo_config import cfg from oslo_config import cfg
from oslo_utils import excutils
from cinder import interface from cinder import interface
from cinder.volume import configuration from cinder.volume import configuration
@ -64,6 +65,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods
2.0.0 - Major redesign of the driver. This version requires the REST 2.0.0 - Major redesign of the driver. This version requires the REST
API for communication with the storage backend. API for communication with the storage backend.
2.1.0 - Add Cinder generic volume groups.
""" """
@ -228,3 +230,37 @@ class HBSDFCDriver(driver.FibreChannelDriver):
def revert_to_snapshot(self, context, volume, snapshot): def revert_to_snapshot(self, context, volume, snapshot):
"""Rollback the specified snapshot""" """Rollback the specified snapshot"""
return self.common.revert_to_snapshot(volume, snapshot) return self.common.revert_to_snapshot(volume, snapshot)
@volume_utils.trace
def create_group(self, context, group):
return self.common.create_group()
@volume_utils.trace
def delete_group(self, context, group, volumes):
return self.common.delete_group(group, volumes)
@volume_utils.trace
def create_group_from_src(
self, context, group, volumes, group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
return self.common.create_group_from_src(
context, group, volumes, snapshots, source_vols)
@volume_utils.trace
def update_group(
self, context, group, add_volumes=None, remove_volumes=None):
try:
return self.common.update_group(group, add_volumes)
except Exception:
with excutils.save_and_reraise_exception():
for remove_volume in remove_volumes:
utils.cleanup_cg_in_volume(remove_volume)
@volume_utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
return self.common.create_group_snapshot(
context, group_snapshot, snapshots)
@volume_utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
return self.common.delete_group_snapshot(group_snapshot, snapshots)

View File

@ -14,6 +14,8 @@
# #
"""iSCSI module for Hitachi HBSD Driver.""" """iSCSI module for Hitachi HBSD Driver."""
from oslo_utils import excutils
from cinder import interface from cinder import interface
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_common as common
@ -49,6 +51,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods
2.0.0 - Major redesign of the driver. This version requires the REST 2.0.0 - Major redesign of the driver. This version requires the REST
API for communication with the storage backend. API for communication with the storage backend.
2.1.0 - Add Cinder generic volume groups.
""" """
@ -212,3 +215,37 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
def revert_to_snapshot(self, context, volume, snapshot): def revert_to_snapshot(self, context, volume, snapshot):
"""Rollback the specified snapshot""" """Rollback the specified snapshot"""
return self.common.revert_to_snapshot(volume, snapshot) return self.common.revert_to_snapshot(volume, snapshot)
@volume_utils.trace
def create_group(self, context, group):
return self.common.create_group()
@volume_utils.trace
def delete_group(self, context, group, volumes):
return self.common.delete_group(group, volumes)
@volume_utils.trace
def create_group_from_src(
self, context, group, volumes, group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
return self.common.create_group_from_src(
context, group, volumes, snapshots, source_vols)
@volume_utils.trace
def update_group(
self, context, group, add_volumes=None, remove_volumes=None):
try:
return self.common.update_group(group, add_volumes)
except Exception:
with excutils.save_and_reraise_exception():
for remove_volume in remove_volumes:
utils.cleanup_cg_in_volume(remove_volume)
@volume_utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
return self.common.create_group_snapshot(
context, group_snapshot, snapshots)
@volume_utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
return self.common.delete_group_snapshot(group_snapshot, snapshots)

View File

@ -24,11 +24,13 @@ from oslo_utils import timeutils
from oslo_utils import units from oslo_utils import units
from cinder import exception from cinder import exception
from cinder.objects import fields
from cinder.volume import configuration from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api
from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume.drivers.san import san from cinder.volume.drivers.san import san
from cinder.volume import volume_utils
_LU_PATH_DEFINED = ('B958', '015A') _LU_PATH_DEFINED = ('B958', '015A')
NORMAL_STS = 'NML' NORMAL_STS = 'NML'
@ -86,6 +88,9 @@ EX_ENLDEV = 'EX_ENLDEV'
EX_INVARG = 'EX_INVARG' EX_INVARG = 'EX_INVARG'
_INVALID_RANGE = [EX_ENLDEV, EX_INVARG] _INVALID_RANGE = [EX_ENLDEV, EX_INVARG]
_MAX_COPY_GROUP_NAME = 29
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT = ('2E10', '2302')
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900')
REST_VOLUME_OPTS = [ REST_VOLUME_OPTS = [
cfg.BoolOpt( cfg.BoolOpt(
@ -789,3 +794,277 @@ class HBSDREST(common.HBSDCommon):
return False return False
return (result[0]['primaryOrSecondary'] == "S-VOL" and return (result[0]['primaryOrSecondary'] == "S-VOL" and
int(result[0]['pvolLdevId']) == pvol) int(result[0]['pvolLdevId']) == pvol)
def create_group(self):
return None
def _delete_group(self, group, objs, is_snapshot):
model_update = {'status': group.status}
objs_model_update = []
events = []
def _delete_group_obj(group, obj, is_snapshot):
obj_update = {'id': obj.id}
try:
if is_snapshot:
self.delete_snapshot(obj)
else:
self.delete_volume(obj)
obj_update['status'] = 'deleted'
except (utils.HBSDError, exception.VolumeIsBusy,
exception.SnapshotIsBusy) as exc:
obj_update['status'] = 'available' if isinstance(
exc, (exception.VolumeIsBusy,
exception.SnapshotIsBusy)) else 'error'
utils.output_log(
MSG.GROUP_OBJECT_DELETE_FAILED,
obj='snapshot' if is_snapshot else 'volume',
group='group snapshot' if is_snapshot else 'group',
group_id=group.id, obj_id=obj.id, ldev=utils.get_ldev(obj),
reason=exc.msg)
raise loopingcall.LoopingCallDone(obj_update)
for obj in objs:
loop = loopingcall.FixedIntervalLoopingCall(
_delete_group_obj, group, obj, is_snapshot)
event = loop.start(interval=0)
events.append(event)
for e in events:
obj_update = e.wait()
if obj_update['status'] != 'deleted':
model_update['status'] = 'error'
objs_model_update.append(obj_update)
return model_update, objs_model_update
def delete_group(self, group, volumes):
return self._delete_group(group, volumes, False)
def delete_group_snapshot(self, group_snapshot, snapshots):
return self._delete_group(group_snapshot, snapshots, True)
def create_group_from_src(
self, context, group, volumes, snapshots=None, source_vols=None):
volumes_model_update = []
new_ldevs = []
events = []
def _create_group_volume_from_src(context, volume, src, from_snapshot):
volume_model_update = {'id': volume.id}
try:
ldev = utils.get_ldev(src)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='snapshot' if from_snapshot else 'volume',
id=src.id)
raise utils.HBSDError(msg)
volume_model_update.update(
self.create_volume_from_snapshot(volume, src) if
from_snapshot else self.create_cloned_volume(volume,
src))
except Exception as exc:
volume_model_update['msg'] = utils.get_exception_msg(exc)
raise loopingcall.LoopingCallDone(volume_model_update)
try:
from_snapshot = True if snapshots else False
for volume, src in zip(volumes,
snapshots if snapshots else source_vols):
loop = loopingcall.FixedIntervalLoopingCall(
_create_group_volume_from_src, context, volume, src,
from_snapshot)
event = loop.start(interval=0)
events.append(event)
is_success = True
for e in events:
volume_model_update = e.wait()
if 'msg' in volume_model_update:
is_success = False
msg = volume_model_update['msg']
else:
volumes_model_update.append(volume_model_update)
ldev = utils.get_ldev(volume_model_update)
if ldev is not None:
new_ldevs.append(ldev)
if not is_success:
raise utils.HBSDError(msg)
except Exception:
with excutils.save_and_reraise_exception():
for new_ldev in new_ldevs:
try:
self.delete_ldev(new_ldev)
except utils.HBSDError:
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev)
return None, volumes_model_update
def update_group(self, group, add_volumes=None):
if add_volumes and volume_utils.is_group_a_cg_snapshot_type(group):
for volume in add_volumes:
ldev = utils.get_ldev(volume)
if ldev is None:
msg = utils.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP,
volume_id=volume.id,
group='consistency group',
group_id=group.id)
raise utils.HBSDError(msg)
return None, None, None
def _create_non_cgsnapshot(self, group_snapshot, snapshots):
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
snapshots_model_update = []
events = []
def _create_non_cgsnapshot_snapshot(group_snapshot, snapshot):
snapshot_model_update = {'id': snapshot.id}
try:
snapshot_model_update.update(self.create_snapshot(snapshot))
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = fields.SnapshotStatus.ERROR
utils.output_log(
MSG.GROUP_SNAPSHOT_CREATE_FAILED,
group=group_snapshot.group_id,
group_snapshot=group_snapshot.id,
group_type=group_snapshot.group_type_id,
volume=snapshot.volume_id, snapshot=snapshot.id)
raise loopingcall.LoopingCallDone(snapshot_model_update)
for snapshot in snapshots:
loop = loopingcall.FixedIntervalLoopingCall(
_create_non_cgsnapshot_snapshot, group_snapshot, snapshot)
event = loop.start(interval=0)
events.append(event)
for e in events:
snapshot_model_update = e.wait()
if (snapshot_model_update['status'] ==
fields.SnapshotStatus.ERROR):
model_update['status'] = fields.GroupSnapshotStatus.ERROR
snapshots_model_update.append(snapshot_model_update)
return model_update, snapshots_model_update
def _create_ctg_snapshot_group_name(self, ldev):
now = timeutils.utcnow()
strnow = now.strftime("%y%m%d%H%M%S%f")
ctg_name = '%(prefix)sC%(ldev)s%(time)s' % {
'prefix': utils.DRIVER_PREFIX,
'ldev': "{0:06X}".format(ldev),
'time': strnow[:len(strnow) - 3],
}
return ctg_name[:_MAX_COPY_GROUP_NAME]
def _delete_pairs_from_storage(self, pairs):
for pair in pairs:
try:
self._delete_pair_from_storage(pair['pvol'], pair['svol'])
except utils.HBSDError:
utils.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'],
svol=pair['svol'])
def _create_ctg_snap_pair(self, pairs):
snapshotgroup_name = self._create_ctg_snapshot_group_name(
pairs[0]['pvol'])
try:
for pair in pairs:
try:
body = {"snapshotGroupName": snapshotgroup_name,
"snapshotPoolId":
self.storage_info['snap_pool_id'],
"pvolLdevId": pair['pvol'],
"svolLdevId": pair['svol'],
"isConsistencyGroup": True,
"canCascade": True,
"isDataReductionForceCopy": True}
self.client.add_snapshot(body)
except utils.HBSDError as ex:
if ((utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT) or
(utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT)):
msg = utils.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT)
raise utils.HBSDError(msg)
elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
msg = utils.output_log(
MSG.INVALID_PARAMETER, param='hitachi_snap_pool')
raise utils.HBSDError(msg)
raise
self._wait_copy_pair_status(pair['svol'], PAIR)
self.client.split_snapshotgroup(snapshotgroup_name)
for pair in pairs:
self._wait_copy_pair_status(pair['svol'], PSUS)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_pairs_from_storage(pairs)
def _create_cgsnapshot(self, context, cgsnapshot, snapshots):
pairs = []
events = []
snapshots_model_update = []
def _create_cgsnapshot_volume(snapshot):
pair = {'snapshot': snapshot}
try:
pair['pvol'] = utils.get_ldev(snapshot.volume)
if pair['pvol'] is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=snapshot.volume_id)
raise utils.HBSDError(msg)
size = snapshot.volume_size
pair['svol'] = self.create_ldev(size)
except Exception as exc:
pair['msg'] = utils.get_exception_msg(exc)
raise loopingcall.LoopingCallDone(pair)
try:
for snapshot in snapshots:
ldev = utils.get_ldev(snapshot.volume)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
id=snapshot.volume_id)
raise utils.HBSDError(msg)
for snapshot in snapshots:
loop = loopingcall.FixedIntervalLoopingCall(
_create_cgsnapshot_volume, snapshot)
event = loop.start(interval=0)
events.append(event)
is_success = True
for e in events:
pair = e.wait()
if 'msg' in pair:
is_success = False
msg = pair['msg']
pairs.append(pair)
if not is_success:
raise utils.HBSDError(msg)
self._create_ctg_snap_pair(pairs)
except Exception:
for pair in pairs:
if 'svol' in pair and pair['svol'] is not None:
try:
self.delete_ldev(pair['svol'])
except utils.HBSDError:
utils.output_log(
MSG.DELETE_LDEV_FAILED, ldev=pair['svol'])
model_update = {'status': fields.GroupSnapshotStatus.ERROR}
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id,
'status': fields.SnapshotStatus.ERROR}
snapshots_model_update.append(snapshot_model_update)
return model_update, snapshots_model_update
for pair in pairs:
snapshot_model_update = {
'id': pair['snapshot'].id,
'status': fields.SnapshotStatus.AVAILABLE,
'provider_location': str(pair['svol'])}
snapshots_model_update.append(snapshot_model_update)
return None, snapshots_model_update
def create_group_snapshot(self, context, group_snapshot, snapshots):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(context, group_snapshot, snapshots)
else:
return self._create_non_cgsnapshot(group_snapshot, snapshots)

View File

@ -750,6 +750,14 @@ class RestApiClient():
} }
self._invoke(url, body=body) self._invoke(url, body=body)
def split_snapshotgroup(self, snapshot_group_id):
url = '%(url)s/snapshot-groups/%(id)s/actions/%(action)s/invoke' % {
'url': self.object_url,
'id': snapshot_group_id,
'action': 'split',
}
self._invoke(url)
def discard_zero_page(self, ldev_id): def discard_zero_page(self, ldev_id):
"""Return the ldev's no-data pages to the storage pool.""" """Return the ldev's no-data pages to the storage pool."""
url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {

View File

@ -344,6 +344,21 @@ class HBSDMsg(enum.Enum):
'to manage the volume.', 'to manage the volume.',
'suffix': ERROR_SUFFIX, 'suffix': ERROR_SUFFIX,
} }
FAILED_CREATE_CTG_SNAPSHOT = {
'msg_id': 712,
'loglevel': base_logging.ERROR,
'msg': 'Failed to create a consistency group snapshot. '
'The number of pairs in the consistency group or the number of '
'consistency group snapshots has reached the limit.',
'suffix': ERROR_SUFFIX,
}
LDEV_NOT_EXIST_FOR_ADD_GROUP = {
'msg_id': 716,
'loglevel': base_logging.ERROR,
'msg': 'No logical device exists in the storage system for the volume '
'%(volume_id)s to be added to the %(group)s %(group_id)s.',
'suffix': ERROR_SUFFIX,
}
SNAPSHOT_UNMANAGE_FAILED = { SNAPSHOT_UNMANAGE_FAILED = {
'msg_id': 722, 'msg_id': 722,
'loglevel': base_logging.ERROR, 'loglevel': base_logging.ERROR,
@ -395,6 +410,23 @@ class HBSDMsg(enum.Enum):
'body: %(body)s)', 'body: %(body)s)',
'suffix': ERROR_SUFFIX, 'suffix': ERROR_SUFFIX,
} }
GROUP_OBJECT_DELETE_FAILED = {
'msg_id': 736,
'loglevel': base_logging.ERROR,
'msg': 'Failed to delete a %(obj)s in a %(group)s. (%(group)s: '
'%(group_id)s, %(obj)s: %(obj_id)s, LDEV: %(ldev)s, reason: '
'%(reason)s)',
'suffix': ERROR_SUFFIX,
}
GROUP_SNAPSHOT_CREATE_FAILED = {
'msg_id': 737,
'loglevel': base_logging.ERROR,
'msg': 'Failed to create a volume snapshot in a group snapshot that '
'does not guarantee consistency. (group: %(group)s, '
'group snapshot: %(group_snapshot)s, group type: '
'%(group_type)s, volume: %(volume)s, snapshot: %(snapshot)s)',
'suffix': ERROR_SUFFIX,
}
def __init__(self, error_info): def __init__(self, error_info):
"""Initialize Enum attributes.""" """Initialize Enum attributes."""
@ -526,3 +558,20 @@ def is_shared_connection(volume, connector):
if attachment.attached_host == host: if attachment.attached_host == host:
connection_count += 1 connection_count += 1
return connection_count > 1 return connection_count > 1
def cleanup_cg_in_volume(volume):
if ('group_id' in volume and volume.group_id and
'consistencygroup_id' in volume and
volume.consistencygroup_id):
volume.consistencygroup_id = None
if 'consistencygroup' in volume:
volume.consistencygroup = None
def get_exception_msg(exc):
if exc.args:
return exc.msg if isinstance(
exc, exception.CinderException) else exc.args[0]
else:
return ""

View File

@ -63,6 +63,8 @@ Supported operations
* Create, delete, attach, and detach volumes. * Create, delete, attach, and detach volumes.
* Create, list, and delete volume snapshots. * Create, list, and delete volume snapshots.
* Create a volume from a snapshot. * Create a volume from a snapshot.
* Create, list, update, and delete consistency groups.
* Create, list, and delete consistency group snapshots.
* Copy a volume to an image. * Copy a volume to an image.
* Copy an image to a volume. * Copy an image to a volume.
* Clone a volume. * Clone a volume.

View File

@ -587,7 +587,7 @@ driver.dell_emc_vnx=complete
driver.dell_emc_powerflex=complete driver.dell_emc_powerflex=complete
driver.dell_emc_xtremio=complete driver.dell_emc_xtremio=complete
driver.fujitsu_eternus=missing driver.fujitsu_eternus=missing
driver.hitachi_vsp=missing driver.hitachi_vsp=complete
driver.hpe_3par=complete driver.hpe_3par=complete
driver.hpe_msa=missing driver.hpe_msa=missing
driver.huawei_t_v1=missing driver.huawei_t_v1=missing

View File

@ -0,0 +1,4 @@
---
features:
- |
Hitachi driver: Add Cinder generic volume groups.