ScaleIO QoS Support
This patch adds QoS support to the ScaleIO driver by using Cinder QoS specs. Also refactored logging and fixed formatting errors. DocImpact Change-Id: I7608192b91010a538027ab456c5ff5bba569214c Implements: blueprint scaleio-qos-support
This commit is contained in:
parent
adf35d0ee1
commit
56cae96a06
@ -0,0 +1,69 @@
|
||||
# Copyright (c) 2015 EMC Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import mock
|
||||
|
||||
from cinder import context
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit.volume.drivers.emc import scaleio
|
||||
|
||||
|
||||
class TestInitializeConnection(scaleio.TestScaleIODriver):
|
||||
def setUp(self):
|
||||
"""Setup a test case environment."""
|
||||
|
||||
super(TestInitializeConnection, self).setUp()
|
||||
self.connector = {}
|
||||
self.ctx = (
|
||||
context.RequestContext('fake', 'fake', True, auth_token=True))
|
||||
self.volume = fake_volume.fake_volume_obj(self.ctx)
|
||||
|
||||
def test_only_qos(self):
|
||||
qos = {'maxIOPS': 1000, 'maxBWS': 3000}
|
||||
extraspecs = {}
|
||||
connection_properties = (
|
||||
self._initialize_connection(qos, extraspecs)['data'])
|
||||
self.assertEqual(1000, connection_properties['iopsLimit'])
|
||||
self.assertEqual(3000, connection_properties['bandwidthLimit'])
|
||||
|
||||
def test_no_qos(self):
|
||||
qos = {}
|
||||
extraspecs = {}
|
||||
connection_properties = (
|
||||
self._initialize_connection(qos, extraspecs)['data'])
|
||||
self.assertIsNone(connection_properties['iopsLimit'])
|
||||
self.assertIsNone(connection_properties['bandwidthLimit'])
|
||||
|
||||
def test_only_extraspecs(self):
|
||||
qos = {}
|
||||
extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4000}
|
||||
connection_properties = (
|
||||
self._initialize_connection(qos, extraspecs)['data'])
|
||||
self.assertEqual(2000, connection_properties['iopsLimit'])
|
||||
self.assertEqual(4000, connection_properties['bandwidthLimit'])
|
||||
|
||||
def test_qos_and_extraspecs(self):
|
||||
qos = {'maxIOPS': 1000, 'maxBWS': 3000}
|
||||
extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4000}
|
||||
connection_properties = (
|
||||
self._initialize_connection(qos, extraspecs)['data'])
|
||||
self.assertEqual(1000, connection_properties['iopsLimit'])
|
||||
self.assertEqual(3000, connection_properties['bandwidthLimit'])
|
||||
|
||||
def _initialize_connection(self, qos, extraspecs):
|
||||
self.driver._get_volumetype_qos = mock.MagicMock()
|
||||
self.driver._get_volumetype_qos.return_value = qos
|
||||
self.driver._get_volumetype_extraspecs = mock.MagicMock()
|
||||
self.driver._get_volumetype_extraspecs.return_value = extraspecs
|
||||
return self.driver.initialize_connection(self.volume, self.connector)
|
@ -35,6 +35,7 @@ from cinder.image import image_utils
|
||||
from cinder import utils
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume import qos_specs
|
||||
from cinder.volume import volume_types
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -80,6 +81,8 @@ PROTECTION_DOMAIN_ID = 'sio:pd_id'
|
||||
PROVISIONING_KEY = 'sio:provisioning_type'
|
||||
IOPS_LIMIT_KEY = 'sio:iops_limit'
|
||||
BANDWIDTH_LIMIT = 'sio:bandwidth_limit'
|
||||
QOS_IOPS_LIMIT_KEY = 'maxIOPS'
|
||||
QOS_BANDWIDTH_LIMIT = 'maxBWS'
|
||||
|
||||
BLOCK_SIZE = 8
|
||||
OK_STATUS_CODE = 200
|
||||
@ -92,6 +95,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
"""EMC ScaleIO Driver."""
|
||||
|
||||
VERSION = "2.0"
|
||||
scaleio_qos_keys = (QOS_IOPS_LIMIT_KEY, QOS_BANDWIDTH_LIMIT)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ScaleIODriver, self).__init__(*args, **kwargs)
|
||||
@ -110,7 +114,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
self.server_certificate_path = (
|
||||
self.configuration.sio_server_certificate_path)
|
||||
LOG.info(_LI(
|
||||
"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. "
|
||||
"REST server IP: %(ip)s, port: %(port)s, username: %("
|
||||
"user)s. "
|
||||
"Verify server's certificate: %(verify_cert)s."),
|
||||
{'ip': self.server_ip,
|
||||
'port': self.server_port,
|
||||
@ -204,9 +209,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
if not self.storage_pools:
|
||||
msg = _(
|
||||
"Must specify storage pools. Option: sio_storage_pools."
|
||||
)
|
||||
msg = (_("Must specify storage pools. Option: "
|
||||
"sio_storage_pools."))
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def _find_storage_pool_id_from_storage_type(self, storage_type):
|
||||
@ -231,11 +235,17 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
def _find_provisioning_type(self, storage_type):
|
||||
return storage_type.get(PROVISIONING_KEY)
|
||||
|
||||
def _find_iops_limit(self, storage_type):
|
||||
return storage_type.get(IOPS_LIMIT_KEY)
|
||||
|
||||
def _find_bandwidth_limit(self, storage_type):
|
||||
return storage_type.get(BANDWIDTH_LIMIT)
|
||||
def _find_limit(self, storage_type, qos_key, extraspecs_key):
|
||||
qos_limit = storage_type.get(qos_key)
|
||||
extraspecs_limit = storage_type.get(extraspecs_key)
|
||||
if extraspecs_limit is not None:
|
||||
if qos_limit is not None:
|
||||
LOG.warning(_LW("QoS specs are overriding extra_specs."))
|
||||
else:
|
||||
LOG.info(_LI("Using extra_specs for defining QoS specs "
|
||||
"will be deprecated in the N release "
|
||||
"of OpenStack. Please use QoS specs."))
|
||||
return qos_limit if qos_limit is not None else extraspecs_limit
|
||||
|
||||
def _id_to_base64(self, id):
|
||||
# Base64 encode the id to get a volume name less than 32 characters due
|
||||
@ -274,7 +284,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
provisioning_type = self._find_provisioning_type(storage_type)
|
||||
|
||||
LOG.info(_LI(
|
||||
"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, "
|
||||
"Volume type: %(volume_type)s, "
|
||||
"storage pool name: %(pool_name)s, "
|
||||
"storage pool id: %(pool_id)s, protection domain id: "
|
||||
"%(domain_id)s, protection domain name: %(domain_name)s."),
|
||||
{'volume_type': storage_type,
|
||||
@ -418,7 +429,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
self.configuration.sio_round_volume_capacity)
|
||||
if not round_volume_capacity:
|
||||
exception_msg = (_(
|
||||
"Cannot create volume of size %s: not multiple of 8GB.") %
|
||||
"Cannot create volume of size %s: "
|
||||
"not multiple of 8GB.") %
|
||||
size)
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
@ -529,7 +541,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
"""
|
||||
vol_id = volume['provider_id']
|
||||
LOG.info(_LI(
|
||||
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
|
||||
"ScaleIO extend volume:"
|
||||
" volume %(volname)s to size %(new_size)s."),
|
||||
{'volname': vol_id,
|
||||
'new_size': new_size})
|
||||
|
||||
@ -577,8 +590,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
volume_id = src_vref['provider_id']
|
||||
snapname = self._id_to_base64(volume.id)
|
||||
LOG.info(_LI(
|
||||
"ScaleIO create cloned volume: source volume %(src)s to target "
|
||||
"volume %(tgt)s."),
|
||||
"ScaleIO create cloned volume: source volume %(src)s to "
|
||||
"target volume %(tgt)s."),
|
||||
{'src': volume_id,
|
||||
'tgt': snapname})
|
||||
|
||||
@ -606,7 +619,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
"/api/instances/Volume::%(vol_id)s"
|
||||
"/action/removeMappedSdc") % req_vars
|
||||
LOG.info(_LI(
|
||||
"Trying to unmap volume from all sdcs before deletion: %s."),
|
||||
"Trying to unmap volume from all sdcs"
|
||||
" before deletion: %s."),
|
||||
request)
|
||||
r = requests.post(
|
||||
request,
|
||||
@ -641,7 +655,8 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
force_delete = self.configuration.sio_force_delete
|
||||
if force_delete:
|
||||
LOG.warning(_LW(
|
||||
"Ignoring error in delete volume %s: volume not found "
|
||||
"Ignoring error in delete volume %s:"
|
||||
" volume not found "
|
||||
"due to force delete settings."), vol_id)
|
||||
else:
|
||||
msg = (_("Error deleting volume %s: volume not found.") %
|
||||
@ -672,11 +687,16 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
|
||||
volname = self._id_to_base64(volume.id)
|
||||
connection_properties['scaleIO_volname'] = volname
|
||||
storage_type = self._get_volumetype_extraspecs(volume)
|
||||
extra_specs = self._get_volumetype_extraspecs(volume)
|
||||
qos_specs = self._get_volumetype_qos(volume)
|
||||
storage_type = extra_specs.copy()
|
||||
storage_type.update(qos_specs)
|
||||
LOG.info(_LI("Volume type is %s."), storage_type)
|
||||
iops_limit = self._find_iops_limit(storage_type)
|
||||
iops_limit = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY,
|
||||
IOPS_LIMIT_KEY)
|
||||
LOG.info(_LI("iops limit is: %s."), iops_limit)
|
||||
bandwidth_limit = self._find_bandwidth_limit(storage_type)
|
||||
bandwidth_limit = self._find_limit(storage_type, QOS_BANDWIDTH_LIMIT,
|
||||
BANDWIDTH_LIMIT)
|
||||
LOG.info(_LI("Bandwidth limit is: %s."), bandwidth_limit)
|
||||
connection_properties['iopsLimit'] = iops_limit
|
||||
connection_properties['bandwidthLimit'] = bandwidth_limit
|
||||
@ -698,7 +718,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
stats['total_capacity_gb'] = 'unknown'
|
||||
stats['free_capacity_gb'] = 'unknown'
|
||||
stats['reserved_percentage'] = 0
|
||||
stats['QoS_support'] = False
|
||||
stats['QoS_support'] = True
|
||||
|
||||
pools = []
|
||||
|
||||
@ -814,7 +834,7 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
pool = {'pool_name': sp_name,
|
||||
'total_capacity_gb': total_capacity_gb,
|
||||
'free_capacity_gb': free_capacity_gb,
|
||||
'QoS_support': False,
|
||||
'QoS_support': True,
|
||||
'reserved_percentage': 0
|
||||
}
|
||||
|
||||
@ -823,10 +843,6 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
max_free_capacity = free_capacity_gb
|
||||
total_capacity = total_capacity + total_capacity_gb
|
||||
|
||||
stats['volume_backend_name'] = backend_name or 'scaleio'
|
||||
stats['vendor_name'] = 'EMC'
|
||||
stats['driver_version'] = self.VERSION
|
||||
stats['storage_protocol'] = 'scaleio'
|
||||
# Use zero capacities here so we always use a pool.
|
||||
stats['total_capacity_gb'] = total_capacity
|
||||
stats['free_capacity_gb'] = max_free_capacity
|
||||
@ -836,8 +852,6 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
{'free': max_free_capacity,
|
||||
'total': total_capacity})
|
||||
|
||||
stats['reserved_percentage'] = 0
|
||||
stats['QoS_support'] = False
|
||||
stats['pools'] = pools
|
||||
|
||||
LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"])
|
||||
@ -866,6 +880,22 @@ class ScaleIODriver(driver.VolumeDriver):
|
||||
|
||||
return specs
|
||||
|
||||
def _get_volumetype_qos(self, volume):
|
||||
qos = {}
|
||||
ctxt = context.get_admin_context()
|
||||
type_id = volume['volume_type_id']
|
||||
if type_id:
|
||||
volume_type = volume_types.get_volume_type(ctxt, type_id)
|
||||
qos_specs_id = volume_type.get('qos_specs_id')
|
||||
if qos_specs_id is not None:
|
||||
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
|
||||
else:
|
||||
specs = {}
|
||||
for key, value in specs.items():
|
||||
if key in self.scaleio_qos_keys:
|
||||
qos[key] = value
|
||||
return qos
|
||||
|
||||
def _sio_attach_volume(self, volume):
|
||||
"""Call connector.connect_volume() and return the path. """
|
||||
LOG.debug("Calling os-brick to attach ScaleIO volume.")
|
||||
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Add QoS support in ScaleIO driver.
|
Loading…
Reference in New Issue
Block a user