Add support for consistency groups in the Nimble Storage driver
blueprint nimble-consistency-groups-support Change-Id: Ibc3c0f207bf4d4467f4091fd5e789f525ba6a0ab
This commit is contained in:
parent
f602a1d00d
commit
0d06d497c3
@ -22,19 +22,24 @@ from six.moves import http_client
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.objects import fields
|
||||
from cinder.objects import volume as obj_volume
|
||||
from cinder.objects import volume_type
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_group
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import test
|
||||
from cinder.volume.drivers import nimble
|
||||
from cinder.volume import volume_types
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
NIMBLE_CLIENT = 'cinder.volume.drivers.nimble.NimbleRestAPIExecutor'
|
||||
NIMBLE_URLLIB2 = 'cinder.volume.drivers.nimble.requests'
|
||||
NIMBLE_RANDOM = 'cinder.volume.drivers.nimble.random'
|
||||
NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.nimble.NimbleISCSIDriver'
|
||||
NIMBLE_FC_DRIVER = 'cinder.volume.drivers.nimble.NimbleFCDriver'
|
||||
DRIVER_VERSION = '4.0.1'
|
||||
DRIVER_VERSION = '4.1.0'
|
||||
nimble.DEFAULT_SLEEP = 0
|
||||
|
||||
FAKE_POSITIVE_LOGIN_RESPONSE_1 = '2c20aad78a220ed1dae21dcd6f9446f5'
|
||||
@ -193,6 +198,57 @@ FAKE_POSITIVE_GROUP_INFO_RESPONSE = {
|
||||
'compressed_snap_usage_bytes': 36189,
|
||||
'unused_reserve_bytes': 0}
|
||||
|
||||
FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume-cg',
|
||||
'clone': False,
|
||||
'target_name': 'iqn.test',
|
||||
'online': True,
|
||||
'agent_type': 'openstack'}
|
||||
|
||||
FAKE_EXTRA_SPECS_CG = {'consistent_group_snapshot_enabled': "<is> False"}
|
||||
|
||||
FAKE_VOLUME_TYPE = {'extra_specs': FAKE_EXTRA_SPECS_CG}
|
||||
SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6'
|
||||
|
||||
SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID
|
||||
|
||||
volume_src_cg = {'name': SRC_CG_VOLUME_NAME,
|
||||
'id': SRC_CG_VOLUME_ID,
|
||||
'display_name': 'Foo Volume',
|
||||
'size': 2,
|
||||
'host': 'FAKE_CINDER_HOST',
|
||||
'volume_type': None,
|
||||
'volume_type_id': None}
|
||||
|
||||
VOLUME_TYPE_ID_CG = 'd03338a9-9115-48a3-8dfc-44444444444'
|
||||
|
||||
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
|
||||
admin_context = context.get_admin_context()
|
||||
|
||||
VOLUME_NAME = 'volume-' + VOLUME_ID
|
||||
FAKE_GROUP = fake_group.fake_group_obj(
|
||||
admin_context, id=fake.GROUP_ID, status='available')
|
||||
|
||||
|
||||
volume_cg = {'name': VOLUME_NAME,
|
||||
'id': VOLUME_ID,
|
||||
'display_name': 'Foo Volume',
|
||||
'provider_location': 12,
|
||||
'size': 2,
|
||||
'host': 'FAKE_CINDER_HOST',
|
||||
'volume_type': 'cg_type',
|
||||
'volume_type_id': VOLUME_TYPE_ID_CG}
|
||||
|
||||
FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_CG = {
|
||||
'clone': False,
|
||||
'name': "testvolume-cg"}
|
||||
|
||||
FAKE_GET_VOLID_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID}
|
||||
|
||||
FAKE_GET_VOLCOLL_INFO_RESPONSE = {'volcoll_id': fake.VOLUME2_ID}
|
||||
|
||||
FAKE_ASSOCIATE_VOLCOLL_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID,
|
||||
'volcoll_id': fake.VOLUME2_ID}
|
||||
|
||||
FAKE_GENERIC_POSITIVE_RESPONSE = ""
|
||||
FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone"
|
||||
|
||||
@ -204,6 +260,11 @@ NIMBLE_MANAGEMENT_IP = "10.18.108.55"
|
||||
NIMBLE_SAN_LOGIN = "nimble"
|
||||
NIMBLE_SAN_PASS = "nimble_pass"
|
||||
|
||||
SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47'
|
||||
|
||||
FAKE_SRC_GROUP = fake_group.fake_group_obj(
|
||||
admin_context, id = SRC_CONSIS_GROUP_ID, status = 'available')
|
||||
|
||||
|
||||
def create_configuration(username, password, ip_address,
|
||||
pool_name=None, subnet_label=None,
|
||||
@ -1075,7 +1136,8 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
|
||||
'free_capacity_gb': 7463.567649364471,
|
||||
'reserved_percentage': 0,
|
||||
'QoS_support': False,
|
||||
'multiattach': True}]}
|
||||
'multiattach': True,
|
||||
'consistent_group_snapshot_enabled': True}]}
|
||||
self.assertEqual(
|
||||
expected_res,
|
||||
self.driver.get_volume_stats(refresh=True))
|
||||
@ -1709,3 +1771,246 @@ class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase):
|
||||
self.mock_client_service.assert_has_calls(
|
||||
self.mock_client_service.method_calls,
|
||||
expected_calls)
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_group_positive(self, mock_is_cg):
|
||||
mock_is_cg.return_value = True
|
||||
ctx = context.get_admin_context()
|
||||
self.group = fake_group.fake_group_obj(
|
||||
ctx, id = fake.GROUP_ID)
|
||||
model_update = self.driver.create_group(ctx, self.group)
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_generic_group(self, mock_is_cg):
|
||||
mock_is_cg.return_value = False
|
||||
ctx = context.get_admin_context()
|
||||
self.group = fake_group.fake_group_obj(
|
||||
ctx, id=fake.GROUP_ID, status='available')
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.create_group,
|
||||
ctx, self.group
|
||||
)
|
||||
mock_is_cg.assert_called_once_with(self.group)
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_delete_generic_group(self, mock_is_cg):
|
||||
mock_is_cg.return_value = False
|
||||
ctx = context.get_admin_context()
|
||||
group = mock.MagicMock()
|
||||
volumes = [fake_volume.fake_volume_obj(None)]
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.delete_group,
|
||||
ctx, group, volumes
|
||||
)
|
||||
mock_is_cg.assert_called_once()
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||
def test_delete_group_positive(self, mock_get_specs, mock_is_cg):
|
||||
mock_get_specs.return_value = '<is> True'
|
||||
mock_is_cg.return_value = True
|
||||
ctx = context.get_admin_context()
|
||||
group = mock.MagicMock()
|
||||
volumes = [fake_volume.fake_volume_obj(None)]
|
||||
self.driver.delete_group(ctx, group, volumes)
|
||||
self.mock_client_service.delete_volcoll.assert_called_once()
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_update_group(self, mock_is_cg):
|
||||
mock_is_cg.return_value = False
|
||||
group = mock.MagicMock()
|
||||
ctx = context.get_admin_context()
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.update_group,
|
||||
ctx, group
|
||||
)
|
||||
mock_is_cg.assert_called_once_with(group)
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||
@mock.patch(NIMBLE_ISCSI_DRIVER + '.is_volume_group_snap_type')
|
||||
def test_update_group_positive(self, vol_gs_enable,
|
||||
mock_get_specs, mock_is_cg):
|
||||
mock_get_specs.return_value = '<is> True'
|
||||
mock_is_cg.return_value = True
|
||||
self.mock_client_service.get_volume_id_by_name.return_value = (
|
||||
FAKE_GET_VOLID_INFO_RESPONSE)
|
||||
self.mock_client_service.get_volcoll_id_by_name.return_value = (
|
||||
FAKE_GET_VOLCOLL_INFO_RESPONSE)
|
||||
self.mock_client_service.associate_volcoll.return_value = (
|
||||
FAKE_GET_SNAP_INFO_BACKUP_RESPONSE)
|
||||
|
||||
ctx = context.get_admin_context()
|
||||
group = mock.MagicMock()
|
||||
volume1 = fake_volume.fake_volume_obj(
|
||||
ctx, name='testvolume-cg1',
|
||||
host='fakehost@nimble#Openstack',
|
||||
provider_location='12 13',
|
||||
id=12, consistency_group_snapshot_enabled=True)
|
||||
addvollist = [volume1]
|
||||
remvollist = [volume1]
|
||||
model_update = self.driver.update_group(
|
||||
ctx,
|
||||
group,
|
||||
addvollist,
|
||||
remvollist
|
||||
)
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE,
|
||||
model_update[0]['status'])
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_group_from_src(self, mock_is_cg):
|
||||
mock_is_cg.return_value = False
|
||||
group = mock.MagicMock()
|
||||
ctx = context.get_admin_context()
|
||||
volumes = [fake_volume.fake_volume_obj(None)]
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.create_group_from_src,
|
||||
ctx, group, volumes
|
||||
)
|
||||
mock_is_cg.assert_called_once_with(group)
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||
@mock.patch(NIMBLE_ISCSI_DRIVER + ".create_cloned_volume")
|
||||
def test_create_group_from_src_positive(self, mock_clone,
|
||||
mock_get_specs,
|
||||
mock_is_cg):
|
||||
source_volume = volume_src_cg
|
||||
volume = volume_cg
|
||||
volume['source_volid'] = source_volume['id']
|
||||
volume['display_name'] = "cg-volume"
|
||||
source_volume['display_name'] = "source-volume"
|
||||
|
||||
mock_get_specs.return_value = '<is> True'
|
||||
mock_clone.return_value = volume['name']
|
||||
mock_is_cg.return_value = True
|
||||
|
||||
self.driver.create_group_from_src(
|
||||
context.get_admin_context(), FAKE_GROUP,
|
||||
[volume], source_group=FAKE_SRC_GROUP,
|
||||
source_vols=[source_volume])
|
||||
self.mock_client_service.associate_volcoll.assert_called_once()
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||
def test_create_group_snapshot_positive(self, mock_get_specs, mock_is_cg):
|
||||
mock_get_specs.return_value = '<is> True'
|
||||
mock_is_cg.return_value = True
|
||||
ctx = context.get_admin_context()
|
||||
group_snapshot = mock.MagicMock()
|
||||
snapshots = [fake_snapshot.fake_snapshot_obj(None)]
|
||||
|
||||
self.driver.create_group_snapshot(
|
||||
ctx,
|
||||
group_snapshot,
|
||||
snapshots
|
||||
)
|
||||
self.mock_client_service.snapcoll_create.assert_called_once()
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_delete_generic_group_snapshot(self, mock_is_cg):
|
||||
mock_is_cg.return_value = False
|
||||
group_snapshot = mock.MagicMock()
|
||||
snapshots = [fake_snapshot.fake_snapshot_obj(None)]
|
||||
ctx = context.get_admin_context()
|
||||
self.assertRaises(
|
||||
NotImplementedError,
|
||||
self.driver.delete_group_snapshot,
|
||||
ctx, group_snapshot, snapshots
|
||||
)
|
||||
mock_is_cg.assert_called_once_with(group_snapshot)
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||
def test_delete_group_snapshot_positive(self, mock_get_specs, mock_is_cg):
|
||||
mock_get_specs.return_value = '<is> True'
|
||||
mock_is_cg.return_value = True
|
||||
ctx = context.get_admin_context()
|
||||
group_snapshot = mock.MagicMock()
|
||||
snapshots = [mock.Mock()]
|
||||
|
||||
self.driver.delete_group_snapshot(
|
||||
ctx,
|
||||
group_snapshot,
|
||||
snapshots
|
||||
)
|
||||
self.mock_client_service.snapcoll_delete.assert_called_once()
|
||||
|
||||
@mock.patch(NIMBLE_URLLIB2)
|
||||
@mock.patch(NIMBLE_CLIENT)
|
||||
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
|
||||
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
|
||||
'default', '*'))
|
||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||
def test_create_group_negative(self, mock_is_cg):
|
||||
mock_is_cg.return_value = True
|
||||
ctx = context.get_admin_context()
|
||||
self.vol_type = volume_type.VolumeType(
|
||||
name='volume_type',
|
||||
extra_specs=
|
||||
{'consistent_group_snapshot_enabled': '<is> False'})
|
||||
FAKE_GROUP.volume_types = volume_type.VolumeTypeList(
|
||||
objects=[self.vol_type])
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.create_group, ctx, FAKE_GROUP)
|
||||
|
@ -37,15 +37,18 @@ import six
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import interface
|
||||
from cinder.objects import fields
|
||||
from cinder.objects import volume
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume import volume_types
|
||||
from cinder.volume import volume_utils
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
DRIVER_VERSION = "4.0.1"
|
||||
|
||||
DRIVER_VERSION = "4.1.0"
|
||||
AES_256_XTS_CIPHER = 'aes_256_xts'
|
||||
DEFAULT_CIPHER = 'none'
|
||||
EXTRA_SPEC_ENCRYPTION = 'nimble:encryption'
|
||||
@ -130,6 +133,9 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
4.0.0 - Migrate from SOAP to REST API
|
||||
Add support for Group Scoped Target
|
||||
4.0.1 - Add QoS and dedupe support
|
||||
4.1.0 - Added multiattach support
|
||||
Added revert to snapshot support
|
||||
Added consistency groups support
|
||||
"""
|
||||
VERSION = DRIVER_VERSION
|
||||
|
||||
@ -168,6 +174,16 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
self.configuration.nimble_pool_name, reserve,
|
||||
self._storage_protocol,
|
||||
self._group_target_enabled)
|
||||
volume_type = volume.get('volume_type')
|
||||
consis_group_snap_type = False
|
||||
if volume_type is not None:
|
||||
consis_group_snap_type = self.is_volume_group_snap_type(
|
||||
volume_type)
|
||||
cg_id = volume.get('group_id', None)
|
||||
if consis_group_snap_type and cg_id:
|
||||
volume_id = self.APIExecutor.get_volume_id_by_name(volume['name'])
|
||||
cg_volcoll_id = self.APIExecutor.get_volcoll_id_by_name(cg_id)
|
||||
self.APIExecutor.associate_volcoll(volume_id, cg_volcoll_id)
|
||||
return self._get_model_info(volume['name'])
|
||||
|
||||
def is_volume_backup_clone(self, volume):
|
||||
@ -370,7 +386,6 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
group_info['compressed_snap_usage_bytes'] +
|
||||
group_info['unused_reserve_bytes']) /
|
||||
float(units.Gi))
|
||||
|
||||
free_space = total_capacity - used_space
|
||||
LOG.debug('total_capacity=%(capacity)f '
|
||||
'used_space=%(used)f free_space=%(free)f',
|
||||
@ -392,7 +407,8 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
free_capacity_gb=free_space,
|
||||
reserved_percentage=0,
|
||||
QoS_support=False,
|
||||
multiattach=True)
|
||||
multiattach=True,
|
||||
consistent_group_snapshot_enabled=True)
|
||||
self.group_stats['pools'] = [single_pool]
|
||||
return self.group_stats
|
||||
|
||||
@ -759,6 +775,179 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||
'err': ex.message})
|
||||
return self._get_model_info(volume['name'])
|
||||
|
||||
def is_volume_group_snap_type(self, volume_type):
|
||||
consis_group_snap_type = False
|
||||
if volume_type:
|
||||
extra_specs = volume_type.get('extra_specs')
|
||||
if 'consistent_group_snapshot_enabled' in extra_specs:
|
||||
gsnap_val = extra_specs['consistent_group_snapshot_enabled']
|
||||
consis_group_snap_type = (gsnap_val == "<is> True")
|
||||
return consis_group_snap_type
|
||||
|
||||
def create_group(self, context, group):
|
||||
"""Creates a generic group"""
|
||||
if not volume_utils.is_group_a_cg_snapshot_type(group):
|
||||
raise NotImplementedError()
|
||||
cg_type = False
|
||||
cg_name = group.id
|
||||
description = group.description if group.description else group.name
|
||||
LOG.info('Create group: %(name)s, description)s', {'name': cg_name,
|
||||
'description': description})
|
||||
for volume_type in group.volume_types:
|
||||
if volume_type:
|
||||
extra_specs = volume_type.get('extra_specs')
|
||||
if 'consistent_group_snapshot_enabled' in extra_specs:
|
||||
gsnap_val = extra_specs[
|
||||
'consistent_group_snapshot_enabled']
|
||||
cg_type = (gsnap_val == "<is> True")
|
||||
if not cg_type:
|
||||
msg = _('For a volume type to be a part of consistent'
|
||||
' group, volume type extra spec must have '
|
||||
'consistent_group_snapshot_enabled'
|
||||
'="<is> True"')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
self.APIExecutor.create_volcoll(cg_name)
|
||||
return {'status': fields.GroupStatus.AVAILABLE}
|
||||
|
||||
def delete_group(self, context, group, volumes):
|
||||
"""Deletes a group."""
|
||||
if not volume_utils.is_group_a_cg_snapshot_type(group):
|
||||
raise NotImplementedError()
|
||||
LOG.info("Delete Consistency Group %s.", group.id)
|
||||
model_updates = {"status": fields.GroupStatus.DELETED}
|
||||
error_statuses = [
|
||||
fields.GroupStatus.ERROR,
|
||||
fields.GroupStatus.ERROR_DELETING,
|
||||
]
|
||||
volume_model_updates = []
|
||||
for tmp_volume in volumes:
|
||||
update_item = {"id": tmp_volume.id}
|
||||
try:
|
||||
self.delete_volume(tmp_volume)
|
||||
update_item["status"] = "deleted"
|
||||
except exception.VolumeBackendAPIException:
|
||||
update_item["status"] = fields.VolumeStatus.ERROR_DELETING
|
||||
if model_updates["status"] not in error_statuses:
|
||||
model_updates["status"] = fields.GroupStatus.ERROR_DELETING
|
||||
LOG.error("Failed to delete volume %(vol_id)s of "
|
||||
"group %(group_id)s.",
|
||||
{"vol_id": tmp_volume.id, "group_id": group.id})
|
||||
volume_model_updates.append(update_item)
|
||||
cg_name = group.id
|
||||
cg_id = self.APIExecutor.get_volcoll_id_by_name(cg_name)
|
||||
self.APIExecutor.delete_volcoll(cg_id)
|
||||
return model_updates, volume_model_updates
|
||||
|
||||
def update_group(self, context, group, add_volumes=None,
|
||||
remove_volumes=None):
|
||||
if (not volume_utils.is_group_a_cg_snapshot_type(group)):
|
||||
raise NotImplementedError()
|
||||
model_update = {'status': fields.GroupStatus.AVAILABLE}
|
||||
|
||||
for tmp_volume in add_volumes:
|
||||
volume_id = self.APIExecutor.get_volume_id_by_name(
|
||||
tmp_volume['name'])
|
||||
vol_snap_enable = self.is_volume_group_snap_type(
|
||||
tmp_volume.get('volume_type'))
|
||||
cg_id = self.APIExecutor.get_volcoll_id_by_name(group.id)
|
||||
try:
|
||||
if vol_snap_enable:
|
||||
self.APIExecutor.associate_volcoll(volume_id, cg_id)
|
||||
else:
|
||||
msg = (_('Volume with volume id %s is not '
|
||||
'supported as extra specs of this '
|
||||
'volume does not have '
|
||||
'consistent_group_snapshot_enabled="<is> True"'
|
||||
) % volume['id'])
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
except NimbleAPIException:
|
||||
msg = ('Volume collection does not exist.')
|
||||
LOG.error(msg)
|
||||
raise NimbleAPIException(msg)
|
||||
|
||||
for tmp_volume in remove_volumes:
|
||||
volume_id = self.APIExecutor.get_volume_id_by_name(
|
||||
tmp_volume['name'])
|
||||
try:
|
||||
self.APIExecutor.dissociate_volcoll(volume_id)
|
||||
except NimbleAPIException:
|
||||
msg = ('Volume collection does not exist.')
|
||||
LOG.error(msg)
|
||||
raise NimbleAPIException(msg)
|
||||
|
||||
return model_update, None, None
|
||||
|
||||
def create_group_snapshot(self, context, group_snapshot, snapshots):
|
||||
"""Creates a group snapshot."""
|
||||
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
|
||||
raise NotImplementedError()
|
||||
group_id = group_snapshot.group_id
|
||||
snap_name = group_snapshot.id
|
||||
cg_id = self.APIExecutor.get_volcoll_id_by_name(group_id)
|
||||
try:
|
||||
self.APIExecutor.snapcoll_create(snap_name, cg_id)
|
||||
except NimbleAPIException:
|
||||
msg = ('Error creating cg snapshot')
|
||||
LOG.error(msg)
|
||||
raise NimbleAPIException(msg)
|
||||
snapshot_model_updates = []
|
||||
for snapshot in snapshots:
|
||||
snapshot_update = {'id': snapshot['id'],
|
||||
'status': fields.SnapshotStatus.AVAILABLE}
|
||||
snapshot_model_updates.append(snapshot_update)
|
||||
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
|
||||
return model_update, snapshot_model_updates
|
||||
|
||||
def delete_group_snapshot(self, context, group_snapshot, snapshots):
|
||||
"""Deletes a group snapshot."""
|
||||
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
|
||||
raise NotImplementedError()
|
||||
snap_name = group_snapshot.id
|
||||
model_update = {'status': fields.ConsistencyGroupStatus.DELETED}
|
||||
snapshots_model_update = []
|
||||
snapcoll_id = self.APIExecutor.get_snapcoll_id_by_name(snap_name)
|
||||
try:
|
||||
self.APIExecutor.snapcoll_delete(snapcoll_id)
|
||||
for snapshot in snapshots:
|
||||
snapshots_model_update.append(
|
||||
{'id': snapshot.id,
|
||||
'status': fields.SnapshotStatus.DELETED})
|
||||
except Exception as e:
|
||||
LOG.error("Error deleting volume group snapshot."
|
||||
"Error received: %(e)s", {'e': e})
|
||||
model_update = {
|
||||
'status': fields.GroupSnapshotStatus.ERROR_DELETING}
|
||||
return model_update, snapshots_model_update
|
||||
|
||||
def create_group_from_src(self, context, group, volumes,
|
||||
group_snapshot=None, snapshots=None,
|
||||
source_group=None, source_vols=None):
|
||||
"""Creates the volume group from source."""
|
||||
if not volume_utils.is_group_a_cg_snapshot_type(group):
|
||||
raise NotImplementedError()
|
||||
self.create_group(context, group)
|
||||
cg_id = self.APIExecutor.get_volcoll_id_by_name(group.id)
|
||||
try:
|
||||
if group_snapshot is not None and snapshots is not None:
|
||||
for tmp_volume, snapshot in zip(volumes, snapshots):
|
||||
self.create_volume_from_snapshot(tmp_volume, snapshot)
|
||||
volume_id = self.APIExecutor.get_volume_id_by_name(
|
||||
tmp_volume['name'])
|
||||
self.APIExecutor.associate_volcoll(volume_id, cg_id)
|
||||
elif source_group is not None and source_vols is not None:
|
||||
for tmp_volume, src_vol in zip(volumes, source_vols):
|
||||
self.create_cloned_volume(tmp_volume, src_vol)
|
||||
volume_id = self.APIExecutor.get_volume_id_by_name(
|
||||
tmp_volume['name'])
|
||||
self.APIExecutor.associate_volcoll(volume_id, cg_id)
|
||||
except NimbleAPIException:
|
||||
msg = ('Error creating cg snapshot')
|
||||
LOG.error(msg)
|
||||
raise NimbleAPIException(msg)
|
||||
return None, None
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
|
||||
@ -1644,6 +1833,63 @@ class NimbleRestAPIExecutor(object):
|
||||
raise NimbleAPIException(_("Snapshot: %s doesn't exist") % snap_id)
|
||||
return r.json()['data'][0]
|
||||
|
||||
def get_volcoll_id_by_name(self, volcoll_name):
|
||||
api = "volume_collections"
|
||||
filter = {"name": volcoll_name}
|
||||
r = self.get_query(api, filter)
|
||||
if not r.json()['data']:
|
||||
raise Exception("Unable to retrieve information for volcoll: {0}"
|
||||
.format(volcoll_name))
|
||||
return r.json()['data'][0]['id']
|
||||
|
||||
def get_snapcoll_id_by_name(self, snapcoll_name):
|
||||
api = "snapshot_collections"
|
||||
filter = {"name": snapcoll_name}
|
||||
r = self.get_query(api, filter)
|
||||
if not r.json()['data']:
|
||||
raise Exception("Unable to retrieve information for snapcoll: {0}"
|
||||
.format(snapcoll_name))
|
||||
return r.json()['data'][0]['id']
|
||||
|
||||
def create_volcoll(self, volcoll_name):
|
||||
api = "volume_collections"
|
||||
data = {"data": {"name": volcoll_name}}
|
||||
r = self.post(api, data)
|
||||
return r['data']
|
||||
|
||||
def delete_volcoll(self, volcoll_id):
|
||||
api = "volume_collections/" + str(volcoll_id)
|
||||
self.delete(api)
|
||||
|
||||
def dissociate_volcoll(self, volume_id):
|
||||
api = "volumes/" + str(volume_id)
|
||||
data = {'data': {"volcoll_id": ''
|
||||
}
|
||||
}
|
||||
r = self.put(api, data)
|
||||
return r
|
||||
|
||||
def associate_volcoll(self, volume_id, volcoll_id):
|
||||
api = "volumes/" + str(volume_id)
|
||||
data = {'data': {"volcoll_id": volcoll_id
|
||||
}
|
||||
}
|
||||
r = self.put(api, data)
|
||||
return r
|
||||
|
||||
def snapcoll_create(self, snapcoll_name, volcoll_id):
|
||||
data = {'data': {"name": snapcoll_name,
|
||||
"volcoll_id": volcoll_id
|
||||
}
|
||||
}
|
||||
api = 'snapshot_collections'
|
||||
r = self.post(api, data)
|
||||
return r
|
||||
|
||||
def snapcoll_delete(self, snapcoll_id):
|
||||
api = "snapshot_collections/" + str(snapcoll_id)
|
||||
self.delete(api)
|
||||
|
||||
@utils.retry(NimbleAPIException, 2, 3)
|
||||
def online_vol(self, volume_name, online_flag):
|
||||
volume_id = self.get_volume_id_by_name(volume_name)
|
||||
|
@ -39,6 +39,8 @@ Supported operations
|
||||
* Create a Thinly Provisioned Volume
|
||||
* Attach a volume to multiple servers simultaneously (multiattach)
|
||||
* Volume Revert to Snapshot
|
||||
* Create, list, update, and delete consistency groups
|
||||
* Create, list, and delete consistency group snapshots
|
||||
|
||||
Nimble Storage driver configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -611,7 +611,7 @@ driver.netapp_ontap=complete
|
||||
driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.nimble=complete
|
||||
driver.prophetstor=complete
|
||||
driver.pure=complete
|
||||
driver.qnap=missing
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added consistency group support in Nimble Storage driver.
|
Loading…
Reference in New Issue
Block a user