ScaleIO Driver: get manageable volumes

Implementation to allow listing of manageable volumes

Change-Id: I76c62034bd45d00eb0e10d18f2ed21f08a3e3d10
This commit is contained in:
Eric Young 2017-06-01 13:44:18 -04:00
parent 34eefbe834
commit c129e80cb0
3 changed files with 368 additions and 2 deletions

View File

@ -0,0 +1,191 @@
# Copyright (C) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import ddt
import mock
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import scaleio
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdabcdabcd"
PROVIDER_ID = "0000000000000001"
MANAGEABLE_SCALEIO_VOLS = [
{
"volumeType": "ThinProvisioned",
"storagePoolId": "6c6dc54500000000",
"sizeInKb": 8388608,
"name": "volume1",
"id": PROVIDER_ID,
"mappedSdcInfo": [],
},
{
"volumeType": "ThinProvisioned",
"storagePoolId": "6c6dc54500000000",
"sizeInKb": 8388608,
"name": "volume2",
"id": "0000000000000002",
"mappedSdcInfo": [],
},
{
"volumeType": "ThickProvisioned",
"storagePoolId": "6c6dc54500000000",
"sizeInKb": 8388608,
"name": "volume3",
"id": "0000000000000003",
"mappedSdcInfo": [],
}
]
SCALEIO_SNAPSHOT = {
"volumeType": "Snapshot",
"storagePoolId": "6c6dc54500000000",
"sizeInKb": 8388608,
"name": "snapshot1",
"id": "1000000000000001",
"mappedSdcInfo": [],
}
MANAGEABLE_SCALEIO_VOL_REFS = [
{
'reference': {'source-id': PROVIDER_ID},
'size': 8,
'safe_to_manage': True,
'reason_not_safe': None,
'cinder_id': None,
'extra_info': {
"volumeType": "ThinProvisioned",
"name": "volume1"
}
},
{
'reference': {'source-id': '0000000000000002'},
'size': 8,
'safe_to_manage': True,
'reason_not_safe': None,
'cinder_id': None,
'extra_info': {
"volumeType": "ThinProvisioned",
"name": "volume2"
}
},
{
'reference': {'source-id': '0000000000000003'},
'size': 8,
'safe_to_manage': True,
'reason_not_safe': None,
'cinder_id': None,
'extra_info': {
"volumeType": "ThickProvisioned",
"name": "volume3"
}
}
]
@ddt.ddt
class ScaleIOManageableCase(scaleio.TestScaleIODriver):
def setUp(self):
"""Setup a test case environment."""
super(ScaleIOManageableCase, self).setUp()
def _test_get_manageable_things(self,
scaleio_objects=MANAGEABLE_SCALEIO_VOLS,
expected_refs=MANAGEABLE_SCALEIO_VOL_REFS,
cinder_objs=list()):
marker = mock.Mock()
limit = mock.Mock()
offset = mock.Mock()
sort_keys = mock.Mock()
sort_dirs = mock.Mock()
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'instances/StoragePool::test_pool/relationships/Volume':
scaleio_objects
},
}
with mock.patch('cinder.volume.utils.paginate_entries_list') as mpage:
test_func = self.driver.get_manageable_volumes
test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs)
mpage.assert_called_once_with(
expected_refs,
marker,
limit,
offset,
sort_keys,
sort_dirs
)
def test_get_manageable_volumes(self):
"""Default success case.
Given a list of scaleio volumes from the REST API, give back a list
of volume references.
"""
self._test_get_manageable_things()
def test_get_manageable_volumes_connected_vol(self):
"""Make sure volumes connected to hosts are flagged as unsafe."""
mapped_sdc = deepcopy(MANAGEABLE_SCALEIO_VOLS)
mapped_sdc[0]['mappedSdcInfo'] = ["host1"]
mapped_sdc[1]['mappedSdcInfo'] = ["host1", "host2"]
# change up the expected results
expected_refs = deepcopy(MANAGEABLE_SCALEIO_VOL_REFS)
for x in range(len(mapped_sdc)):
sdc = mapped_sdc[x]['mappedSdcInfo']
if sdc and len(sdc) > 0:
expected_refs[x]['safe_to_manage'] = False
expected_refs[x]['reason_not_safe'] \
= 'Volume mapped to %d host(s).' % len(sdc)
self._test_get_manageable_things(expected_refs=expected_refs,
scaleio_objects=mapped_sdc)
def test_get_manageable_volumes_already_managed(self):
"""Make sure volumes already owned by cinder are flagged as unsafe."""
cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock())
cinder_vol.id = VOLUME_ID
cinder_vol.provider_id = PROVIDER_ID
cinders_vols = [cinder_vol]
# change up the expected results
expected_refs = deepcopy(MANAGEABLE_SCALEIO_VOL_REFS)
expected_refs[0]['reference'] = {'source-id': PROVIDER_ID}
expected_refs[0]['safe_to_manage'] = False
expected_refs[0]['reason_not_safe'] = 'Volume already managed.'
expected_refs[0]['cinder_id'] = VOLUME_ID
self._test_get_manageable_things(expected_refs=expected_refs,
cinder_objs=cinders_vols)
def test_get_manageable_volumes_no_snapshots(self):
"""Make sure refs returned do not include snapshots."""
volumes = deepcopy(MANAGEABLE_SCALEIO_VOLS)
volumes.append(SCALEIO_SNAPSHOT)
self._test_get_manageable_things(scaleio_objects=volumes)
def test_get_manageable_volumes_no_scaleio_volumes(self):
"""Expect no refs to be found if no volumes are on ScaleIO."""
self._test_get_manageable_things(scaleio_objects=[],
expected_refs=[])

View File

@ -37,6 +37,7 @@ from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import objects
from cinder import utils
from cinder.objects import fields
@ -326,6 +327,10 @@ class ScaleIODriver(driver.VolumeDriver):
def _version_greater_than_or_equal(ver1, ver2):
return version.LooseVersion(ver1) >= version.LooseVersion(ver2)
@staticmethod
def _convert_kb_to_gib(size):
return int(math.ceil(float(size) / units.Mi))
@staticmethod
def _id_to_base64(id):
# Base64 encode the id to get a volume name less than 32 characters due
@ -1159,6 +1164,165 @@ class ScaleIODriver(driver.VolumeDriver):
self._manage_existing_check_legal_response(r, existing_ref)
return response
def _get_protection_domain_id(self):
""""Get the id of the configured protection domain"""
if self.protection_domain_id:
return self.protection_domain_id
if not self.protection_domain_name:
msg = _("Must specify protection domain name or"
" protection domain id.")
raise exception.VolumeBackendAPIException(data=msg)
domain_name = self.protection_domain_name
encoded_domain_name = urllib.parse.quote(domain_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.debug("ScaleIO get domain id by name request: %s.", request)
r, domain_id = self._execute_scaleio_get_request(request)
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
% self.protection_domain_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != http_client.OK and "errorCode" in domain_id:
msg = (_("Error getting domain id from name %(name)s: %(id)s.")
% {'name': self.protection_domain_name,
'id': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Domain id is %s.", domain_id)
return domain_id
def _get_storage_pool_id(self):
"""Get the id of the configured storage pool"""
if self.storage_pool_id:
return self.storage_pool_id
if not self.protection_domain_name:
msg = _("Must specify storage pool name or"
" storage pool id.")
raise exception.VolumeBackendAPIException(data=msg)
domain_id = self._get_protection_domain_id()
pool_name = self.storage_pool_name
encoded_pool_name = urllib.parse.quote(pool_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'domain_id': domain_id,
'encoded_pool_name': encoded_pool_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
LOG.debug("ScaleIO get pool id by name request: %s.", request)
r, pool_id = self._execute_scaleio_get_request(request)
if not pool_id:
msg = (_("Pool with name %(pool_name)s wasn't found in "
"domain %(domain_id)s.")
% {'pool_name': pool_name,
'domain_id': domain_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != http_client.OK and "errorCode" in pool_id:
msg = (_("Error getting pool id from name %(pool_name)s: "
"%(err_msg)s.")
% {'pool_name': pool_name,
'err_msg': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Pool id is %s.", pool_id)
return pool_id
def _get_all_scaleio_volumes(self):
"""Gets list of all SIO volumes in PD and SP"""
sp_id = self._get_storage_pool_id()
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'storage_pool_id': sp_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/StoragePool::%(storage_pool_id)s"
"/relationships/Volume") % req_vars
LOG.info("ScaleIO get volumes in SP: %s.",
request)
r, volumes = self._execute_scaleio_get_request(request)
if r.status_code != http_client.OK:
msg = (_("Error calling api "
"status code: %d") % r.status_code)
raise exception.VolumeBackendAPIException(data=msg)
return volumes
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Rule out volumes that are mapped to an SDC or
are already in the list of cinder_volumes.
Return references of the volume ids for any others.
"""
all_sio_volumes = self._get_all_scaleio_volumes()
# Put together a map of existing cinder volumes on the array
# so we can lookup cinder id's to SIO id
existing_vols = {}
for cinder_vol in cinder_volumes:
provider_id = cinder_vol['provider_id']
existing_vols[provider_id] = cinder_vol.name_id
manageable_volumes = []
for sio_vol in all_sio_volumes:
cinder_id = existing_vols.get(sio_vol['id'])
is_safe = True
reason = None
if sio_vol['mappedSdcInfo']:
is_safe = False
numHosts = len(sio_vol['mappedSdcInfo'])
reason = _('Volume mapped to %d host(s).') % numHosts
if cinder_id:
is_safe = False
reason = _("Volume already managed.")
if sio_vol['volumeType'] != 'Snapshot':
manageable_volumes.append({
'reference': {'source-id': sio_vol['id']},
'size': self._convert_kb_to_gib(sio_vol['sizeInKb']),
'safe_to_manage': is_safe,
'reason_not_safe': reason,
'cinder_id': cinder_id,
'extra_info': {'volumeType': sio_vol['volumeType'],
'name': sio_vol['name']}})
return volume_utils.paginate_entries_list(
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)
def _is_managed(self, volume_id):
lst = objects.VolumeList.get_all_by_host(context.get_admin_context(),
self.host)
for vol in lst:
if vol.provider_id == volume_id:
return True
return False
def manage_existing(self, volume, existing_ref):
"""Manage an existing ScaleIO volume.
@ -1246,8 +1410,7 @@ class ScaleIODriver(driver.VolumeDriver):
LOG.info("ScaleIO get volume by id request: %s.", request)
return request
@staticmethod
def _manage_existing_check_legal_response(response, existing_ref):
def _manage_existing_check_legal_response(self, response, existing_ref):
if response.status_code != http_client.OK:
reason = (_("Error managing volume: %s.") % response.json()[
'message'])
@ -1256,6 +1419,15 @@ class ScaleIODriver(driver.VolumeDriver):
reason=reason
)
# check if it is already managed
if self._is_managed(response.json()['id']):
reason = _("manage_existing cannot manage a volume "
"that is already being managed.")
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=reason
)
if response.json()['mappedSdcInfo'] is not None:
reason = _("manage_existing cannot manage a volume "
"connected to hosts. Please disconnect this volume "

View File

@ -0,0 +1,3 @@
---
features:
- Added ability to list all manageable volumes within ScaleIO Driver.