Datera driver 2.2 update

This update adds the following changes to the Datera driver

- Capabilites List
- Extended Volume-Type Support
- Naming convention change
- Volume re-type support
- Volume Manage/Unmanage support

Many driver settings that were originally located in StrOpts have been
moved to VolumeType ExtraSpecs.

DocImpact

Change-Id: I469fe29950984e05b70b9399883f2bd73ec856eb
This commit is contained in:
Matt Smith
2016-08-08 10:16:09 -07:00
parent 9daffb12af
commit fff322b3d5
3 changed files with 795 additions and 155 deletions

View File

@@ -15,9 +15,10 @@
import functools
import json
import time
import re
import uuid
import eventlet
import ipaddress
from oslo_config import cfg
from oslo_log import log as logging
@@ -33,12 +34,11 @@ from cinder import interface
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils as volutils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
DATERA_SI_SLEEP = 4
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
@@ -48,6 +48,7 @@ d_opts = [
help='Datera API version.'),
cfg.IntOpt('datera_num_replicas',
default='3',
deprecated_for_removal=True,
help='Number of replicas to create of an inode.'),
cfg.IntOpt('datera_503_timeout',
default='120',
@@ -55,12 +56,18 @@ d_opts = [
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_acl_allow_all',
default=False,
help="True to set acl 'allow_all' on volumes created"),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging")
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_acl_allow_all',
default=False,
deprecated_for_removal=True,
help="True to set acl 'allow_all' on volumes "
"created"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1")
]
@@ -68,25 +75,41 @@ CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
DEFAULT_STORAGE_NAME = 'storage-1'
DEFAULT_VOLUME_NAME = 'volume-1'
DEFAULT_SI_SLEEP = 10
INITIATOR_GROUP_PREFIX = "IG-"
OS_PREFIX = "OS-"
UNMANAGE_PREFIX = "UNMANAGED-"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12}")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
# Don't use this object to get a url though
_URL_TEMPLATES_BASE = {
URL_TEMPLATES = {
'ai': lambda: 'app_instances',
'ai_inst': lambda: (_URL_TEMPLATES_BASE['ai']() + '/{}'),
'si': lambda: (_URL_TEMPLATES_BASE['ai_inst']() + '/storage_instances'),
'si_inst': lambda: ((_URL_TEMPLATES_BASE['si']() + '/{}').format(
'{}', DEFAULT_STORAGE_NAME)),
'vol': lambda: ((_URL_TEMPLATES_BASE['si_inst']() + '/volumes').format(
'{}', DEFAULT_STORAGE_NAME)),
'vol_inst': lambda: ((_URL_TEMPLATES_BASE['vol']() + '/{}').format(
'{}', DEFAULT_VOLUME_NAME))}
'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'),
'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'),
'si_inst': lambda storage_name: (
(URL_TEMPLATES['si']() + '/{}').format(
'{}', storage_name)),
'vol': lambda storage_name: (
(URL_TEMPLATES['si_inst'](storage_name) + '/volumes')),
'vol_inst': lambda storage_name, volume_name: (
(URL_TEMPLATES['vol'](storage_name) + '/{}').format(
'{}', volume_name))}
# Use this one since I haven't found a way to inline call lambdas
URL_TEMPLATES = {k: v() for k, v in _URL_TEMPLATES_BASE.items()}
def _get_name(name):
return "".join((OS_PREFIX, name))
def _get_unmanaged(name):
return "".join((UNMANAGE_PREFIX, name))
def _authenticated(func):
@@ -123,56 +146,30 @@ class DateraDriver(san.SanISCSIDriver):
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
2.1 - Multipath, ACL and reorg
2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
"""
VERSION = '2.1'
VERSION = '2.2'
CI_WIKI_NAME = "datera-ci"
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.num_replicas = self.configuration.datera_num_replicas
self.username = self.configuration.san_login
self.password = self.configuration.san_password
self.auth_token = None
self.cluster_stats = {}
self.datera_api_token = None
self.retry_attempts = (int(self.configuration.datera_503_timeout /
self.configuration.datera_503_interval))
self.interval = self.configuration.datera_503_interval
self.allow_all = self.configuration.datera_acl_allow_all
self.retry_attempts = (self.configuration.datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = self.configuration.datera_debug
if self.datera_debug:
utils.setup_tracing(['method'])
def _login(self):
"""Use the san_login and san_password to set self.auth_token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.auth_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request('login', 'put', body=body,
sensitive=True)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
def _get_lunid(self):
return 0
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
@@ -186,64 +183,65 @@ class DateraDriver(san.SanISCSIDriver):
self._login()
@utils.retry(exception.VolumeDriverException, retries=3)
def _wait_for_resource(self, id, resource_type):
def _wait_for_resource(self, id, resource_type, policies):
result = self._issue_api_request(resource_type, 'get', id)
if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
DEFAULT_VOLUME_NAME]['op_state'] == 'available':
if result['storage_instances'][
policies['default_storage_name']]['volumes'][
policies['default_volume_name']]['op_state'] == 'available':
return
else:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _create_resource(self, resource, resource_type, body):
type_id = resource.get('volume_type_id', None)
result = None
try:
result = self._issue_api_request(resource_type, 'post', body=body)
except exception.Invalid:
type_id = resource.get('volume_type_id', None)
if resource_type == 'volumes' and type_id:
LOG.error(_LE("Creation request failed. Please verify the "
"extra-specs set for your volume types are "
"entered correctly."))
raise
else:
policies = self._get_policies_for_resource(resource)
# Handle updating QOS Policies
if resource_type == URL_TEMPLATES['ai']:
url = URL_TEMPLATES['vol_inst'] + '/performance_policy'
url = url.format(resource['id'])
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
policies = {k: int(v) for k, v in
self._get_policies_by_volume_type(
type_id).items() if k.endswith("max")}
if policies:
self._issue_api_request(url, 'post', body=policies)
if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
DEFAULT_VOLUME_NAME]['op_state'] == 'available':
if resource_type == URL_TEMPLATES['ai']():
self._update_qos(resource, policies)
if result['storage_instances'][policies['default_storage_name']][
'volumes'][policies['default_volume_name']][
'op_state'] == 'available':
return
self._wait_for_resource(resource['id'], resource_type)
self._wait_for_resource(_get_name(resource['id']),
resource_type,
policies)
def create_volume(self, volume):
"""Create a logical volume."""
# Generate App Instance, Storage Instance and Volume
# Volume ID will be used as the App Instance Name
# Storage Instance and Volumes will have standard names
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': str(volume['id']),
'name': _get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': {
DEFAULT_STORAGE_NAME: {
'name': DEFAULT_STORAGE_NAME,
storage_name: {
'name': storage_name,
'volumes': {
DEFAULT_VOLUME_NAME: {
'name': DEFAULT_VOLUME_NAME,
volume_name: {
'name': volume_name,
'size': volume['size'],
'replica_count': self.num_replicas,
'replica_count': num_replicas,
'snapshot_policies': {
}
}
@@ -251,23 +249,26 @@ class DateraDriver(san.SanISCSIDriver):
}
}
})
self._create_resource(volume, URL_TEMPLATES['ai'], body=app_params)
self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)
def extend_volume(self, volume, new_size):
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
URL_TEMPLATES['ai_inst'].format(volume['id']))
URL_TEMPLATES['ai_inst']().format(_get_name(volume['id'])))
if app_inst['admin_state'] == 'online':
reonline = True
self.detach_volume(None, volume, delete_initiator=False)
# Change Volume Size
app_inst = volume['id']
app_inst = _get_name(volume['id'])
data = {
'size': new_size
}
policies = self._get_policies_for_resource(volume)
self._issue_api_request(
URL_TEMPLATES['vol_inst'].format(app_inst),
URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(app_inst),
method='put',
body=data)
# Online Volume, if it was online before
@@ -275,29 +276,32 @@ class DateraDriver(san.SanISCSIDriver):
self.create_export(None, volume, None)
def create_cloned_volume(self, volume, src_vref):
src = "/" + URL_TEMPLATES['vol_inst'].format(src_vref['id'])
policies = self._get_policies_for_resource(volume)
src = "/" + URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']).format(_get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': str(volume['id']),
'name': _get_name(volume['id']),
'uuid': str(volume['id']),
'clone_src': src,
# 'access_control_mode': 'allow_all'
}
self._issue_api_request(URL_TEMPLATES['ai'], 'post', body=data)
self._issue_api_request(URL_TEMPLATES['ai'](), 'post', body=data)
if volume['size'] > src_vref['size']:
self.extend_volume(volume, volume['size'])
def delete_volume(self, volume):
self.detach_volume(None, volume)
app_inst = volume['id']
app_inst = _get_name(volume['id'])
try:
self._issue_api_request(URL_TEMPLATES['ai_inst'].format(app_inst),
method='delete')
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
app_inst),
method='delete')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, volume['id'])
LOG.info(msg, _get_name(volume['id']))
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
@@ -306,7 +310,7 @@ class DateraDriver(san.SanISCSIDriver):
def initialize_connection(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
url = URL_TEMPLATES['ai_inst'].format(volume['id'])
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
@@ -349,20 +353,23 @@ class DateraDriver(san.SanISCSIDriver):
def create_export(self, context, volume, connector):
# Online volume in case it hasn't been already
url = URL_TEMPLATES['ai_inst'].format(volume['id'])
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(url, method='put', body=data)
# Check if we've already setup everything for this volume
url = (URL_TEMPLATES['si'].format(volume['id']))
url = (URL_TEMPLATES['si']().format(_get_name(volume['id'])))
storage_instances = self._issue_api_request(url)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
if connector and connector.get('initiator') and not self.allow_all:
policies = self._get_policies_for_resource(volume)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = 'IG-' + volume['id']
initiator_group = INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request('initiators')
@@ -390,24 +397,31 @@ class DateraDriver(san.SanISCSIDriver):
conflict_ok=True)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_) We need to avoid changing the ACLs if the
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si_name in storage_instances.keys():
acl_url = (URL_TEMPLATES['si'] + "/{}/acl_policy").format(
volume['id'], si_name)
acl_url = (URL_TEMPLATES['si']() + "/{}/acl_policy").format(
_get_name(volume['id']), si_name)
data = {'initiator_groups': [initiator_group_path]}
self._issue_api_request(acl_url,
method="put",
body=data)
if connector and connector.get('ip'):
# Determine IP Pool from IP and update storage_instance
try:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']))['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = URL_TEMPLATES['si_inst'].format(
volume['id'])
ip_pool_url = URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
_get_name(volume['id']))
ip_pool_data = {'ip_pool': initiator_ip_pool_path}
self._issue_api_request(ip_pool_url,
method="put",
@@ -415,13 +429,12 @@ class DateraDriver(san.SanISCSIDriver):
except exception.DateraAPIException:
# Datera product 1.0 support
pass
# Some versions of Datera software require more time to make the
# ISCSI lun available, but don't report that it's unavailable. We
# can remove this when we deprecate those versions
time.sleep(DATERA_SI_SLEEP)
# Check to ensure we're ready for go-time
self._si_poll(volume, policies)
def detach_volume(self, context, volume, attachment=None):
url = URL_TEMPLATES['ai_inst'].format(volume['id'])
url = URL_TEMPLATES['ai_inst']().format(_get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
@@ -432,7 +445,7 @@ class DateraDriver(san.SanISCSIDriver):
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_) Make acl cleaning multi-attach aware
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl(volume)
def _check_for_acl(self, initiator_path):
@@ -449,8 +462,10 @@ class DateraDriver(san.SanISCSIDriver):
return False
def _clean_acl(self, volume):
acl_url = (URL_TEMPLATES["si_inst"] + "/acl_policy").format(
volume['id'])
policies = self._get_policies_for_resource(volume)
acl_url = (URL_TEMPLATES["si_inst"](
policies['default_storage_name']) + "/acl_policy").format(
_get_name(volume['id']))
try:
initiator_group = self._issue_api_request(acl_url)[
'initiator_groups'][0]
@@ -470,8 +485,11 @@ class DateraDriver(san.SanISCSIDriver):
volume)
def create_snapshot(self, snapshot):
url_template = URL_TEMPLATES['vol_inst'] + '/snapshots'
url = url_template.format(snapshot['volume_id'])
policies = self._get_policies_for_resource(snapshot)
url_template = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
url = url_template.format(_get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
@@ -479,8 +497,11 @@ class DateraDriver(san.SanISCSIDriver):
self._issue_api_request(url, method='post', body=snap_params)
def delete_snapshot(self, snapshot):
snap_temp = URL_TEMPLATES['vol_inst'] + '/snapshots'
snapu = snap_temp.format(snapshot['volume_id'])
policies = self._get_policies_for_resource(snapshot)
snap_temp = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
snapu = snap_temp.format(_get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get')
try:
@@ -495,11 +516,14 @@ class DateraDriver(san.SanISCSIDriver):
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, snapshot['id'])
LOG.info(msg, _get_name(snapshot['id']))
def create_volume_from_snapshot(self, volume, snapshot):
snap_temp = URL_TEMPLATES['vol_inst'] + '/snapshots'
snapu = snap_temp.format(snapshot['volume_id'])
policies = self._get_policies_for_resource(snapshot)
snap_temp = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/snapshots'
snapu = snap_temp.format(_get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get')
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
@@ -508,19 +532,183 @@ class DateraDriver(san.SanISCSIDriver):
else:
raise exception.NotFound
src = "/" + (snap_temp + '/{}').format(snapshot['volume_id'], found_ts)
src = "/" + (snap_temp + '/{}').format(
_get_name(snapshot['volume_id']), found_ts)
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': str(volume['id']),
'name': _get_name(volume['id']),
'clone_src': src,
})
self._issue_api_request(
URL_TEMPLATES['ai'],
URL_TEMPLATES['ai'](),
method='post',
body=app_params)
def manage_existing(self, volume, existing_ref):
"""Manage an existing volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref['source-name'] == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
LOG.debug("Managing existing Datera volume %(volume)s. "
"Changing name to %(existing)s",
existing=existing_ref, volume=_get_name(volume['id']))
data = {'name': _get_name(volume['id'])}
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data)
def manage_existing_get_size(self, volume, existing_ref):
"""Get the size of an unmanaged volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume on the Datera backend
"""
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
URL_TEMPLATES['ai_inst']().format(app_inst_name))
return self._get_size(volume, app_inst, si_name, vol_name)
def _get_size(self, volume, app_inst=None, si_name=None, vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = URL_TEMPLATES['ai_inst']().format(
_get_name(volume['id']))
app_inst = self._issue_api_request(vol_url)
size = app_inst[
'storage_instances'][si_name]['volumes'][vol_name]['size']
return size
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Returns a list of dictionaries, each specifying a volume in the host,
with the following keys:
- reference (dictionary): The reference for a volume, which can be
passed to "manage_existing".
- size (int): The size of the volume according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this volume is safe to
manage according to the storage backend. For example, is the volume
in use or invalid for any reason.
- reason_not_safe (string): If safe_to_manage is False, the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the user
:param cinder_volumes: A list of volumes in this host that Cinder
currently manages, used to determine if
a volume is manageable or not.
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys are
'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
"""
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(URL_TEMPLATES['ai']()).values()
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = None
cinder_id = None
extra_info = None
if re.match(UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(OS_PREFIX) not in cinder_volume_ids):
safe_to_manage = self._is_manageable(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return True
return False
def unmanage(self, volume):
"""Unmanage a currently managed volume in Cinder
:param volume: Cinder volume to unmanage
"""
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], _get_unmanaged(volume['id']))
data = {'name': _get_unmanaged(volume['id'])}
self._issue_api_request(URL_TEMPLATES['ai_inst']().format(
_get_name(volume['id'])), method='put', body=data)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
@@ -558,30 +746,258 @@ class DateraDriver(san.SanISCSIDriver):
self.cluster_stats = stats
def _get_policies_by_volume_type(self, type_id):
def _login(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request('login', 'put', body=body,
sensitive=True)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
def _get_lunid(self):
return 0
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: DF --> Datera Fabric
"""
properties = {}
if self.configuration.get('datera_debug_replica_count_override'):
replica_count = 1
else:
replica_count = 3
self._set_property(
properties,
"DF:replica_count",
"Datera Volume Replica Count",
_("Specifies number of replicas for each volume. Can only be "
"increased once volume is created"),
"integer",
minimum=1,
default=replica_count)
self._set_property(
properties,
"DF:acl_allow_all",
"Datera ACL Allow All",
_("True to set acl 'allow_all' on volumes created. Cannot be "
"changed on volume once set"),
"boolean",
default=False)
self._set_property(
properties,
"DF:ip_pool",
"Datera IP Pool",
_("Specifies IP pool to use for volume"),
"string",
default="default")
# ###### QoS Settings ###### #
self._set_property(
properties,
"DF:read_bandwidth_max",
"Datera QoS Max Bandwidth Read",
_("Max read bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:default_storage_name",
"Datera Default Storage Instance Name",
_("The name to use for storage instances created"),
"string",
default="storage-1")
self._set_property(
properties,
"DF:default_volume_name",
"Datera Default Volume Name",
_("The name to use for volumes created"),
"string",
default="volume-1")
self._set_property(
properties,
"DF:write_bandwidth_max",
"Datera QoS Max Bandwidth Write",
_("Max write bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_bandwidth_max",
"Datera QoS Max Bandwidth Total",
_("Max total bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:read_iops_max",
"Datera QoS Max iops Read",
_("Max read iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:write_iops_max",
"Datera QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_iops_max",
"Datera QoS Max IOPS Total",
_("Max total iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
# ###### End QoS Settings ###### #
return properties, 'DF'
def _get_policies_for_resource(self, resource):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
else:
volume_type = None
specs = {}
policies = {}
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in self._init_vendor_properties()[0].items()}
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
if volume_type:
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
def _si_poll(self, volume, policies):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(_get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url)
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _update_qos(self, resource, policies):
url = URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(_get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies)
def _get_ip_pool_for_string_ip(self, ip):
"""Takes a string ipaddress and return the ip_pool API object dict """
pool = 'default'
@@ -648,7 +1064,7 @@ class DateraDriver(san.SanISCSIDriver):
current_retry = 0
while current_retry <= self.retry_attempts:
LOG.debug("Datera 503 response, trying request again")
time.sleep(self.interval)
eventlet.sleep(self.interval)
resp = self._request(connection_string,
method,
payload,
@@ -662,16 +1078,13 @@ class DateraDriver(san.SanISCSIDriver):
self._raise_response(response)
@_authenticated
def _issue_api_request(self, resource_type, method='get', resource=None,
body=None, action=None, sensitive=False,
conflict_ok=False):
def _issue_api_request(self, resource_url, method='get', body=None,
sensitive=False, conflict_ok=False):
"""All API requests to Datera cluster go through this method.
:param resource_type: the type of the resource
:param resource_url: the url of the resource
:param method: the request verb
:param resource: the identifier of the resource
:param body: a dict with options for the action_type
:param action: the action to perform
:returns: a dict of the response from the Datera cluster
"""
host = self.configuration.san_ip
@@ -682,7 +1095,8 @@ class DateraDriver(san.SanISCSIDriver):
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8'}
header = {'Content-Type': 'application/json; charset=utf-8',
'Datera-Driver': 'OpenStack-Cinder-{}'.format(self.VERSION)}
protocol = 'http'
if self.configuration.driver_use_ssl:
@@ -700,12 +1114,7 @@ class DateraDriver(san.SanISCSIDriver):
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_type)
if resource is not None:
connection_string += '/%s' % resource
if action is not None:
connection_string += '/%s' % action
api_version, resource_url)
response = self._request(connection_string,
method,