Support for consistency groups in ScaleIO driver

Add support for all consistency groups functionalities
in ScaleIO driver.
Also fixed a small mistake in a test regarding deleting a snapshot.

DocImpact
Implements: blueprint scaleio-consistency-groups
Change-Id: Id8b52aeb546f9f5fa68b98a4e59bd3f12e78bbef
This commit is contained in:
Matan Sabag 2016-01-18 06:35:25 -08:00
parent eac894c09f
commit 494a55973d
5 changed files with 370 additions and 41 deletions

View File

@ -49,10 +49,6 @@ class ScaleIODriver(scaleio.ScaleIODriver):
*args, *args,
**kwargs) **kwargs)
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
pass
def local_path(self, volume): def local_path(self, volume):
pass pass
@ -62,28 +58,12 @@ class ScaleIODriver(scaleio.ScaleIODriver):
def promote_replica(self, context, volume): def promote_replica(self, context, volume):
pass pass
def delete_consistencygroup(self, context, group, volumes):
pass
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None):
pass
def create_replica_test_volume(self, volume, src_vref): def create_replica_test_volume(self, volume, src_vref):
pass pass
def create_consistencygroup(self, context, group):
pass
def unmanage(self, volume): def unmanage(self, volume):
pass pass
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
pass
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
pass
class MockHTTPSResponse(requests.Response): class MockHTTPSResponse(requests.Response):
"""Mock HTTP Response """Mock HTTP Response

View File

@ -0,0 +1,209 @@
# Copyright (c) 2013 - 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from cinder import context
from cinder.tests.unit import fake_consistencygroup
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestConsistencyGroups(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver consistency groups support``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestConsistencyGroups, self).setUp()
self.ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.consistency_group = (
fake_consistencygroup.fake_consistencyobject_obj(self.ctx,
**{'id': 'cgid'}))
fake_volume1 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': 'volid1', 'provider_id': 'pid_1'})
fake_volume2 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': 'volid2', 'provider_id': 'pid_2'})
fake_volume3 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': 'volid3', 'provider_id': 'pid_3'})
fake_volume4 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': 'volid4', 'provider_id': 'pid_4'})
self.volumes = [fake_volume1, fake_volume2]
self.volumes2 = [fake_volume3, fake_volume4]
fake_snapshot1 = fake_snapshot.fake_snapshot_obj(
self.ctx,
**{'id': 'snapid1', 'volume_id': 'volid1',
'volume': fake_volume1})
fake_snapshot2 = fake_snapshot.fake_snapshot_obj(
self.ctx,
**{'id': 'snapid2', 'volume_id': 'volid2', 'volume':
fake_volume2})
self.snapshots = [fake_snapshot1, fake_snapshot2]
self.snapshot_reply = json.dumps({
'volumeIdList': ['sid1', 'sid2'],
'snapshotGroupId': 'sgid1'})
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'instances/Volume::{}/action/removeVolume'.format(
fake_volume1['provider_id']
): fake_volume1['provider_id'],
'instances/Volume::{}/action/removeVolume'.format(
fake_volume2['provider_id']
): fake_volume2['provider_id'],
'instances/Volume::{}/action/removeMappedSdc'.format(
fake_volume1['provider_id']
): fake_volume1['provider_id'],
'instances/Volume::{}/action/removeMappedSdc'.format(
fake_volume2['provider_id']
): fake_volume2['provider_id'],
'instances/System/action/snapshotVolumes':
self.snapshot_reply,
},
self.RESPONSE_MODE.BadStatus: {
'instances/Volume::{}/action/removeVolume'.format(
fake_volume1['provider_id']
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
fake_volume2['provider_id']
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/System/action/snapshotVolumes':
self.BAD_STATUS_RESPONSE
},
}
def _fake_cgsnapshot(self):
cgsnap = {'id': 'cgsid', 'name': 'testsnap',
'consistencygroup_id': 'cgid', 'status': 'available'}
return cgsnap
def test_create_consistencygroup(self):
result = self.driver.create_consistencygroup(self.ctx,
self.consistency_group)
self.assertEqual('available', result['status'])
def test_delete_consistencygroup_valid(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
result_model_update, result_volumes_update = (
self.driver.delete_consistencygroup(self.ctx,
self.consistency_group,
self.volumes))
self.assertTrue(all(volume['status'] == 'deleted' for volume in
result_volumes_update))
self.assertEqual('deleted', result_model_update['status'])
def test_delete_consistency_group_fail(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
result_model_update, result_volumes_update = (
self.driver.delete_consistencygroup(self.ctx,
self.consistency_group,
self.volumes))
self.assertTrue(any(volume['status'] == 'error_deleting' for volume in
result_volumes_update))
self.assertTrue(result_model_update['status'] in ['error_deleting',
'error'])
def test_create_consistencygroup_from_cg(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_volumes_model_update = (
self.driver.create_consistencygroup_from_src(
self.ctx, self.consistency_group, self.volumes2,
source_cg=self.consistency_group, source_vols=self.volumes))
self.assertEqual('available', result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(volume_provider_list, ['sid1', 'sid2'])
def test_create_consistencygroup_from_cgs(self):
self.snapshots[0]['provider_id'] = 'pid_1'
self.snapshots[1]['provider_id'] = 'pid_2'
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_volumes_model_update = (
self.driver.create_consistencygroup_from_src(
self.ctx, self.consistency_group, self.volumes2,
cgsnapshot=self._fake_cgsnapshot(),
snapshots=self.snapshots))
self.assertEqual('available', result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(['sid1', 'sid2'], volume_provider_list)
@mock.patch('cinder.objects.snapshot')
@mock.patch('cinder.objects.snapshot')
def test_create_cgsnapshots(self, snapshot1, snapshot2):
type(snapshot1).volume = mock.PropertyMock(
return_value=self.volumes[0])
type(snapshot2).volume = mock.PropertyMock(
return_value=self.volumes[1])
snapshots = [snapshot1, snapshot2]
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_snapshot_model_update = (
self.driver.create_cgsnapshot(
self.ctx,
self._fake_cgsnapshot(),
snapshots
))
self.assertEqual('available', result_model_update['status'])
self.assertTrue(all(snapshot['status'] == 'available' for snapshot in
result_snapshot_model_update))
get_pid = lambda snapshot: snapshot['provider_id']
snapshot_provider_list = list(map(get_pid,
result_snapshot_model_update))
self.assertListEqual(['sid1', 'sid2'], snapshot_provider_list)
@mock.patch('cinder.objects.snapshot')
@mock.patch('cinder.objects.snapshot')
def test_delete_cgsnapshots(self, snapshot1, snapshot2):
type(snapshot1).volume = mock.PropertyMock(
return_value=self.volumes[0])
type(snapshot2).volume = mock.PropertyMock(
return_value=self.volumes[1])
type(snapshot1).provider_id = mock.PropertyMock(
return_value='pid_1')
type(snapshot2).provider_id = mock.PropertyMock(
return_value='pid_2')
snapshots = [snapshot1, snapshot2]
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_snapshot_model_update = (
self.driver.delete_cgsnapshot(
self.ctx,
self._fake_cgsnapshot(),
snapshots
))
self.assertEqual('deleted', result_model_update['status'])
self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in
result_snapshot_model_update))

View File

@ -88,9 +88,8 @@ class TestDeleteSnapShot(scaleio.TestScaleIODriver):
self.driver.delete_snapshot(self.snapshot) self.driver.delete_snapshot(self.snapshot)
def test_delete_invalid_snapshot(self): def test_delete_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot(self.snapshot)
self.driver.delete_snapshot, self.snapshot)
def test_delete_snapshot(self): def test_delete_snapshot(self):
"""Setting the unmap volume before delete flag for tests """ """Setting the unmap volume before delete flag for tests """

View File

@ -30,7 +30,7 @@ from six.moves import urllib
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LI, _LW from cinder.i18n import _, _LI, _LW, _LE
from cinder.image import image_utils from cinder.image import image_utils
from cinder import utils from cinder import utils
from cinder.volume import driver from cinder.volume import driver
@ -239,11 +239,11 @@ class ScaleIODriver(driver.VolumeDriver):
extraspecs_limit = storage_type.get(extraspecs_key) extraspecs_limit = storage_type.get(extraspecs_key)
if extraspecs_limit is not None: if extraspecs_limit is not None:
if qos_limit is not None: if qos_limit is not None:
LOG.warning(_LW("QoS specs are overriding extraspecs")) LOG.warning(_LW("QoS specs are overriding extra_specs."))
else: else:
LOG.info(_LI("Using extraspecs for defining QoS specs " LOG.info(_LI("Using extra_specs for defining QoS specs "
"will be deprecated in the next " "will be deprecated in the N release "
"version of OpenStack, please use QoS specs")) "of OpenStack. Please use QoS specs."))
return qos_limit if qos_limit is not None else extraspecs_limit return qos_limit if qos_limit is not None else extraspecs_limit
def _id_to_base64(self, id): def _id_to_base64(self, id):
@ -447,16 +447,7 @@ class ScaleIODriver(driver.VolumeDriver):
'server_port': self.server_port} 'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s" request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/System/action/snapshotVolumes") % req_vars "/api/instances/System/action/snapshotVolumes") % req_vars
r = requests.post( r, response = self._execute_scaleio_post_request(params, request)
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
response = r.json()
LOG.info(_LI("Snapshot volume response: %s."), response) LOG.info(_LI("Snapshot volume response: %s."), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response: if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for volume %(volname)s: " msg = (_("Failed creating snapshot for volume %(volname)s: "
@ -468,6 +459,19 @@ class ScaleIODriver(driver.VolumeDriver):
return {'provider_id': response['volumeIdList'][0]} return {'provider_id': response['volumeIdList'][0]}
def _execute_scaleio_post_request(self, params, request):
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
response = r.json()
return r, response
def _check_response(self, response, request, is_get_request=True, def _check_response(self, response, request, is_get_request=True,
params=None): params=None):
if response.status_code == 401 or response.status_code == 403: if response.status_code == 401 or response.status_code == 403:
@ -713,6 +717,7 @@ class ScaleIODriver(driver.VolumeDriver):
stats['free_capacity_gb'] = 'unknown' stats['free_capacity_gb'] = 'unknown'
stats['reserved_percentage'] = 0 stats['reserved_percentage'] = 0
stats['QoS_support'] = True stats['QoS_support'] = True
stats['consistencygroup_support'] = True
pools = [] pools = []
@ -829,6 +834,7 @@ class ScaleIODriver(driver.VolumeDriver):
'total_capacity_gb': total_capacity_gb, 'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb, 'free_capacity_gb': free_capacity_gb,
'QoS_support': True, 'QoS_support': True,
'consistencygroup_support': True,
'reserved_percentage': 0 'reserved_percentage': 0
} }
@ -1024,7 +1030,7 @@ class ScaleIODriver(driver.VolumeDriver):
LOG.info(_LI("Get Volume response: %s"), response) LOG.info(_LI("Get Volume response: %s"), response)
self._manage_existing_check_legal_response(r, existing_ref) self._manage_existing_check_legal_response(r, existing_ref)
if response['mappedSdcInfo'] is not None: if response['mappedSdcInfo'] is not None:
reason = ("manage_existing cannot manage a volume " reason = _("manage_existing cannot manage a volume "
"connected to hosts. Please disconnect this volume " "connected to hosts. Please disconnect this volume "
"from existing hosts before importing") "from existing hosts before importing")
raise exception.ManageExistingInvalidReference( raise exception.ManageExistingInvalidReference(
@ -1087,6 +1093,138 @@ class ScaleIODriver(driver.VolumeDriver):
reason=reason reason=reason
) )
def create_consistencygroup(self, context, group):
"""Creates a consistency group.
ScaleIO won't create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.info(_LI("Creating Consistency Group"))
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group.
ScaleIO will delete the volumes of the CG.
"""
LOG.info(_LI("Deleting Consistency Group"))
model_update = {'status': 'deleted'}
error_statuses = ['error', 'error_deleting']
volumes_model_update = []
for volume in volumes:
try:
self._delete_volume(volume['provider_id'])
update_item = {'id': volume['id'],
'status': 'deleted'}
volumes_model_update.append(update_item)
except exception.VolumeBackendAPIException as err:
update_item = {'id': volume['id'],
'status': 'error_deleting'}
volumes_model_update.append(update_item)
if model_update['status'] not in error_statuses:
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
{'vol': volume['name'], 'exception': err})
return model_update, volumes_model_update
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
get_scaleio_snapshot_params = lambda snapshot: {
'volumeId': snapshot.volume['provider_id'],
'snapshotName': self._id_to_base64(snapshot['id'])}
snapshotDefs = list(map(get_scaleio_snapshot_params, snapshots))
r, response = self._snapshot_volume_group(snapshotDefs)
LOG.info(_LI("Snapshot volume response: %s."), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for group: "
"%(response)s.") %
{'response': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
snapshot_model_update = []
for snapshot, scaleio_id in zip(snapshots, response['volumeIdList']):
update_item = {'id': snapshot['id'],
'status': 'available',
'provider_id': scaleio_id}
snapshot_model_update.append(update_item)
model_update = {'status': 'available'}
return model_update, snapshot_model_update
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
error_statuses = ['error', 'error_deleting']
model_update = {'status': cgsnapshot['status']}
snapshot_model_update = []
for snapshot in snapshots:
try:
self._delete_volume(snapshot.provider_id)
update_item = {'id': snapshot['id'],
'status': 'deleted'}
snapshot_model_update.append(update_item)
except exception.VolumeBackendAPIException as err:
update_item = {'id': snapshot['id'],
'status': 'error_deleting'}
snapshot_model_update.append(update_item)
if model_update['status'] not in error_statuses:
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the snapshot %(snap)s "
"of cgsnapshot: %(cgsnapshot_id)s. "
"Exception: %(exception)s."),
{'snap': snapshot['name'],
'exception': err,
'cgsnapshot_id': cgsnapshot.id})
model_update['status'] = 'deleted'
return model_update, snapshot_model_update
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistency group from a source."""
get_scaleio_snapshot_params = lambda src_volume, trg_volume: {
'volumeId': src_volume['provider_id'],
'snapshotName': self._id_to_base64(trg_volume['id'])}
if cgsnapshot and snapshots:
snapshotDefs = map(get_scaleio_snapshot_params, snapshots, volumes)
else:
snapshotDefs = map(get_scaleio_snapshot_params, source_vols,
volumes)
r, response = self._snapshot_volume_group(list(snapshotDefs))
LOG.info(_LI("Snapshot volume response: %s."), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for group: "
"%(response)s.") %
{'response': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
volumes_model_update = []
for volume, scaleio_id in zip(volumes, response['volumeIdList']):
update_item = {'id': volume['id'],
'status': 'available',
'provider_id': scaleio_id}
volumes_model_update.append(update_item)
model_update = {'status': 'available'}
return model_update, volumes_model_update
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Update a consistency group.
ScaleIO does not handle volume grouping.
Cinder maintains volumes and CG relationship.
"""
return None, None, None
def _snapshot_volume_group(self, snapshotDefs):
LOG.info(_LI("ScaleIO snapshot group of volumes"))
params = {'snapshotDefs': snapshotDefs}
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/System/action/snapshotVolumes") % req_vars
return self._execute_scaleio_post_request(params, request)
def ensure_export(self, context, volume): def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume.""" """Driver entry point to get the export info for an existing volume."""
pass pass

View File

@ -0,0 +1,3 @@
---
features:
- Add Consistency Group support in ScaleIO driver.