Merge "Updating Datera DataFabric Driver to v2 of Datera DataFabric API"

This commit is contained in:
Jenkins 2016-03-02 01:55:04 +00:00 committed by Gerrit Code Review
commit 1a8361ed32
3 changed files with 480 additions and 414 deletions

View File

@ -23,7 +23,12 @@ from cinder.volume.drivers import datera
from cinder.volume import volume_types
DEFAULT_STORAGE_NAME = datera.DEFAULT_STORAGE_NAME
DEFAULT_VOLUME_NAME = datera.DEFAULT_VOLUME_NAME
class DateraVolumeTestCase(test.TestCase):
def setUp(self):
super(DateraVolumeTestCase, self).setUp()
@ -51,17 +56,7 @@ class DateraVolumeTestCase(test.TestCase):
self.addCleanup(self.api_patcher.stop)
def test_volume_create_success(self):
self.mock_api.return_value = {
u'status': u'available',
u'name': u'volume-00000001',
u'parent': u'00000000-0000-0000-0000-000000000000',
u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'snapshots': {},
u'targets': {},
u'num_replicas': u'2',
u'sub_type': u'IS_ORIGINAL',
u'size': u'1073741824'
}
self.mock_api.return_value = stub_single_ai
self.assertIsNone(self.driver.create_volume(self.volume))
def test_volume_create_fails(self):
@ -74,31 +69,14 @@ class DateraVolumeTestCase(test.TestCase):
def _progress_api_return(mock_api):
if mock_api.retry_count == 1:
return {
u'status': u'unavailable',
u'name': u'test',
u'parent': u'00000000-0000-0000-0000-000000000000',
u'uuid': u'9c1666fe-4f1a-4891-b33d-e710549527fe',
u'snapshots': {},
u'targets': {},
u'num_replicas': u'2',
u'sub_type': u'IS_ORIGINAL',
u'size': u'1073741824'
}
_bad_vol_ai = stub_single_ai.copy()
_bad_vol_ai['storage_instances'][
DEFAULT_STORAGE_NAME]['volumes'][DEFAULT_VOLUME_NAME][
'op_status'] = 'unavailable'
return _bad_vol_ai
else:
self.mock_api.retry_count += 1
return {
u'status': u'available',
u'name': u'test',
u'parent': u'00000000-0000-0000-0000-000000000000',
u'uuid': u'9c1666fe-4f1a-4891-b33d-e710549527fe',
u'snapshots': {},
u'targets': {},
u'num_replicas': u'2',
u'sub_type': u'IS_ORIGINAL',
u'size': u'1073741824'
}
return stub_single_ai
self.mock_api.retry_count = 0
self.mock_api.return_value = _progress_api_return(self.mock_api)
self.assertEqual(1, self.mock_api.retry_count)
@ -106,18 +84,7 @@ class DateraVolumeTestCase(test.TestCase):
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_with_extra_specs(self, mock_get_type):
self.mock_api.return_value = {
u'status': u'available',
u'name': u'volume-00000001',
u'parent': u'00000000-0000-0000-0000-000000000000',
u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'snapshots': {},
u'targets': {},
u'num_replicas': u'2',
u'sub_type': u'IS_ORIGINAL',
u'size': u'1073741824'
}
self.mock_api.return_value = stub_single_ai
mock_get_type.return_value = {
'name': u'The Best',
'qos_specs_id': None,
@ -140,33 +107,10 @@ class DateraVolumeTestCase(test.TestCase):
volume_type_id='dffb4a83-b8fb-4c19-9f8c-713bb75db3b1'
)
assert_body = {
u'max_iops_read': u'2000',
'numReplicas': '2',
'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
'size': '1073741824',
u'max_iops_write': u'4000',
u'max_iops_total': u'4000',
'name': u'volume-00000001'
}
self.assertIsNone(self.driver.create_volume(mock_volume))
self.mock_api.assert_called_once_with('volumes', 'post',
body=assert_body)
self.assertTrue(mock_get_type.called)
def test_create_cloned_volume_success(self):
self.mock_api.return_value = {
'status': 'available',
'uuid': 'c20aba21-6ef6-446b-b374-45733b4883ba',
'size': '1073741824',
'name': 'volume-00000001',
'parent': '7f91abfa-7964-41ed-88fc-207c3a290b4f',
'snapshots': {},
'targets': {},
'numReplicas': '2',
'subType': 'IS_CLONE'
}
source_volume = _stub_volume(
id='7f91abfa-7964-41ed-88fc-207c3a290b4f',
display_name='foo'
@ -185,15 +129,6 @@ class DateraVolumeTestCase(test.TestCase):
source_volume)
def test_delete_volume_success(self):
self.mock_api.return_value = {
'uuid': 'c20aba21-6ef6-446b-b374-45733b4883ba',
'size': '1073741824',
'name': 'volume-00000001',
'parent': '00000000-0000-0000-0000-000000000000',
'numReplicas': '2',
'subType': 'IS_ORIGINAL',
'target': None
}
self.assertIsNone(self.driver.delete_volume(self.volume))
def test_delete_volume_not_found(self):
@ -209,36 +144,38 @@ class DateraVolumeTestCase(test.TestCase):
self.mock_api.side_effect = self._generate_fake_api_request()
ctxt = context.get_admin_context()
expected = {
'provider_location': '172.28.121.10:3260 iqn.2013-05.com.daterain'
'c::01:sn:fc372bc0490b2dbe 0'
}
'provider_location': '172.28.94.11:3260 iqn.2013-05.com.daterainc'
':c20aba21-6ef6-446b-b374-45733b4883ba--ST'
'--storage-1:01:sn:34e5b20fbadd3abb 0'}
self.assertEqual(expected, self.driver.ensure_export(ctxt,
self.volume))
self.volume,
None))
def test_ensure_export_fails(self):
self.mock_api.side_effect = exception.DateraAPIException
ctxt = context.get_admin_context()
self.assertRaises(exception.DateraAPIException,
self.driver.ensure_export, ctxt, self.volume)
self.driver.ensure_export, ctxt, self.volume, None)
def test_create_export_target_does_not_exist_success(self):
self.mock_api.side_effect = self._generate_fake_api_request(
targets_exist=False)
ctxt = context.get_admin_context()
expected = {
'provider_location': '172.28.121.10:3260 iqn.2013-05.com.daterainc'
'::01:sn:fc372bc0490b2dbe 0'
}
'provider_location': '172.28.94.11:3260 iqn.2013-05.com.daterainc'
':c20aba21-6ef6-446b-b374-45733b4883ba--ST'
'--storage-1:01:sn:34e5b20fbadd3abb 0'}
self.assertEqual(expected, self.driver.create_export(ctxt,
self.volume,
{}))
None))
def test_create_export_fails(self):
self.mock_api.side_effect = exception.DateraAPIException
ctxt = context.get_admin_context()
self.assertRaises(exception.DateraAPIException,
self.driver.create_export, ctxt, self.volume, {})
self.driver.create_export, ctxt, self.volume, None)
def test_detach_volume_success(self):
self.mock_api.return_value = {}
@ -260,17 +197,6 @@ class DateraVolumeTestCase(test.TestCase):
self.assertIsNone(self.driver.detach_volume(ctxt, volume))
def test_create_snapshot_success(self):
self.mock_api.return_value = {
u'status': u'available',
u'uuid': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
u'parent': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'subType': u'IS_SNAPSHOT',
u'snapshots': {},
u'targets': {},
u'numReplicas': 2,
u'size': u'1073741824',
u'name': u'snapshot-00000001'
}
snapshot = _stub_snapshot(volume_id=self.volume['id'])
self.assertIsNone(self.driver.create_snapshot(snapshot))
@ -281,19 +207,11 @@ class DateraVolumeTestCase(test.TestCase):
self.driver.create_snapshot, snapshot)
def test_delete_snapshot_success(self):
self.mock_api.return_value = {
u'uuid': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
u'parent': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'subType': u'IS_SNAPSHOT',
u'numReplicas': 2,
u'size': u'1073741824',
u'name': u'snapshot-00000001'
}
snapshot = _stub_snapshot(volume_id=self.volume['id'])
self.assertIsNone(self.driver.delete_snapshot(snapshot))
def test_delete_snapshot_not_found(self):
self.mock_api.side_effect = exception.NotFound
self.mock_api.side_effect = [stub_return_snapshots, exception.NotFound]
snapshot = _stub_snapshot(self.volume['id'])
self.assertIsNone(self.driver.delete_snapshot(snapshot))
@ -304,18 +222,8 @@ class DateraVolumeTestCase(test.TestCase):
self.driver.delete_snapshot, snapshot)
def test_create_volume_from_snapshot_success(self):
self.mock_api.return_value = {
u'status': u'available',
u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'parent': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
u'snapshots': {},
u'targets': {},
u'subType': u'IS_ORIGINAL',
u'numReplicas': 2,
u'size': u'1073741824',
u'name': u'volume-00000001'
}
snapshot = _stub_snapshot(volume_id=self.volume['id'])
self.mock_api.side_effect = [stub_return_snapshots, None]
self.assertIsNone(
self.driver.create_volume_from_snapshot(self.volume, snapshot))
@ -327,14 +235,6 @@ class DateraVolumeTestCase(test.TestCase):
snapshot)
def test_extend_volume_success(self):
self.mock_api.return_value = {
u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
u'parent': u'00000000-0000-0000-0000-000000000000',
u'subType': u'IS_ORIGINAL',
u'numReplicas': 2,
u'size': u'2147483648',
u'name': u'volume-00000001'
}
volume = _stub_volume(size=1)
self.assertIsNone(self.driver.extend_volume(volume, 2))
@ -357,91 +257,167 @@ class DateraVolumeTestCase(test.TestCase):
self.assertEqual(1, self.mock_api.call_count)
def _generate_fake_api_request(self, targets_exist=True):
fake_volume = None
if not targets_exist:
fake_volume = _stub_datera_volume(targets={})
else:
fake_volume = _stub_datera_volume()
def _fake_api_request(resource_type, method='get', resource=None,
body=None, action=None, sensitive=False):
if resource_type == 'volumes' and action is None:
return fake_volume
elif resource_type == 'volume' and action == 'export':
return stub_create_export
elif resource_type == 'export_configs':
if resource_type.split('/')[-1] == 'storage-1':
return stub_get_export
elif resource_type == 'app_instances':
return stub_single_ai
elif (resource_type.split('/')[-1] ==
'c20aba21-6ef6-446b-b374-45733b4883ba'):
return stub_app_instance[
'c20aba21-6ef6-446b-b374-45733b4883ba']
return _fake_api_request
stub_create_export = {
u'_ipColl': [u'172.28.121.10', u'172.28.120.10'],
u'active_initiators': [],
u'activeServers': [u'4594953e-f97f-e111-ad85-001e6738c0f0'],
u'admin_state': u'online',
u'atype': u'none',
u'creation_type': u'system_explicit',
u'endpoint_addrs': [u'172.30.128.2'],
u'endpoint_idents': [u'iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe'],
u'initiators': [],
u'name': u'OS-a8b4d666',
u'server_allocation': u'TS_ALLOC_COMPLETED',
u'servers': [u'4594953e-f97f-e111-ad85-001e6738c0f0'],
u'targetIds': {
u'4594953e-f97f-e111-ad85-001e6738c0f0': {
u'ids': [{
u'dev': None,
u'id': u'iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe'
"_ipColl": ["172.28.121.10", "172.28.120.10"],
"acls": {},
"activeServers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"},
"ctype": "TC_BLOCK_ISCSI",
"endpointsExt1": {
"4594953e-f97f-e111-ad85-001e6738c0f0": {
"ipHigh": 0,
"ipLow": "192421036",
"ipStr": "172.28.120.11",
"ipV": 4,
"name": "",
"network": 24
}
},
"endpointsExt2": {
"4594953e-f97f-e111-ad85-001e6738c0f0": {
"ipHigh": 0,
"ipLow": "192486572",
"ipStr": "172.28.121.11",
"ipV": 4,
"name": "",
"network": 24
}
},
"inodes": {"c20aba21-6ef6-446b-b374-45733b4883ba": "1"},
"name": "",
"networkPort": 0,
"serverAllocation": "TS_ALLOC_COMPLETED",
"servers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"},
"targetAllocation": "TS_ALLOC_COMPLETED",
"targetIds": {
"4594953e-f97f-e111-ad85-001e6738c0f0": {
"ids": [{
"dev": None,
"id": "iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe"
}]
}
},
u'target_allocation': u'TS_ALLOC_COMPLETED',
u'type': u'iscsi',
u'uuid': u'7071efd7-9f22-4996-8f68-47e9ab19d0fd',
u'volumes': []
"typeName": "TargetIscsiConfig",
"uuid": "7071efd7-9f22-4996-8f68-47e9ab19d0fd"
}
stub_get_export = {
"uuid": "744e1bd8-d741-4919-86cd-806037d98c8a",
"active_initiators": [],
"active_servers": [
"472764aa-584b-4c1d-a7b7-e50cf7f5518f"
],
"endpoint_addrs": [
"172.28.121.10",
"172.28.120.10"
],
"endpoint_idents": [
"iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe"
],
"initiators": [],
"servers": [
"472764aa-584b-4c1d-a7b7-e50cf7f5518f"
],
"volumes": [
"10305aa4-1343-4363-86fe-f49eb421a48c"
],
"type": "iscsi",
"creation_type": "system_explicit",
"server_allocation": "TS_ALLOC_COMPLETED",
stub_app_instance = {
"c20aba21-6ef6-446b-b374-45733b4883ba": {
"admin_state": "online",
"target_allocation": "TS_ALLOC_COMPLETED",
"atype": "none",
"name": "OS-10305aa4",
"targetIds": {
"472764aa-584b-4c1d-a7b7-e50cf7f5518f": {
"ids": [{
"dev": "",
"id": ("iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe")
}]
"clone_src": {},
"create_mode": "openstack",
"descr": "",
"health": "ok",
"name": "c20aba21-6ef6-446b-b374-45733b4883ba",
"path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba",
"storage_instances": {
"storage-1": {
"access": {
"ips": [
"172.28.94.11"
],
"iqn": "iqn.2013-05.com.daterainc:c20aba21-6ef6-446b-"
"b374-45733b4883ba--ST--storage-1:01:sn:"
"34e5b20fbadd3abb",
"path": "/app_instances/c20aba21-6ef6-446b-b374"
"-45733b4883ba/storage_instances/storage-1/access"
},
"access_control": {
"initiator_groups": [],
"initiators": [],
"path": "/app_instances/c20aba21-6ef6-446b-b374-"
"45733b4883ba/storage_instances/storage-1"
"/access_control"
},
"access_control_mode": "allow_all",
"active_initiators": [],
"active_storage_nodes": [
"/storage_nodes/1c4feac4-17c7-478b-8928-c76e8ec80b72"
],
"admin_state": "online",
"auth": {
"initiator_pswd": "",
"initiator_user_name": "",
"path": "/app_instances/c20aba21-6ef6-446b-b374-"
"45733b4883ba/storage_instances/storage-1/auth",
"target_pswd": "",
"target_user_name": "",
"type": "none"
},
"creation_type": "user",
"descr": "c20aba21-6ef6-446b-b374-45733b4883ba__ST__storage-1",
"name": "storage-1",
"path": "/app_instances/c20aba21-6ef6-446b-b374-"
"45733b4883ba/storage_instances/storage-1",
"uuid": "b9897b84-149f-43c7-b19c-27d6af8fa815",
"volumes": {
"volume-1": {
"capacity_in_use": 0,
"name": "volume-1",
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b-b374-"
"45733b4883ba/storage_instances/storage-1"
"/volumes/volume-1",
"replica_count": 3,
"size": 500,
"snapshot_policies": {},
"snapshots": {
"1445384931.322468627": {
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b"
"-b374-45733b4883ba/storage_instances"
"/storage-1/volumes/volume-1/snapshots"
"/1445384931.322468627",
"uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c"
}
},
"uuid": "c20aba21-6ef6-446b-b374-45733b4883ba"
}
}
}
},
"uuid": "c20aba21-6ef6-446b-b374-45733b4883ba"
}
}
stub_get_export = stub_app_instance[
'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1']
stub_single_ai = stub_app_instance['c20aba21-6ef6-446b-b374-45733b4883ba']
stub_return_snapshots = \
{
"1446076293.118600738": {
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba"
"/storage_instances/storage-1/volumes/volume-1/snapshots/"
"1446076293.118600738",
"uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c"
},
"1446076384.00607846": {
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba"
"/storage_instances/storage-1/volumes/volume-1/snapshots/"
"1446076384.00607846",
"uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1"
}
}
def _stub_datera_volume(*args, **kwargs):
return {
"status": "available",

View File

@ -1,4 +1,4 @@
# Copyright 2015 Datera
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -17,7 +17,6 @@ import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import excutils
from oslo_utils import units
import requests
@ -25,7 +24,7 @@ import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _, _LE, _LI
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
@ -34,28 +33,25 @@ from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_token',
help='DEPRECATED: This will be removed in the Liberty release. '
'Use san_login and san_password instead. This directly '
'sets the Datera API token.'),
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='1',
default='2',
help='Datera API version.'),
cfg.StrOpt('datera_num_replicas',
default='3',
default='1',
help='Number of replicas to create of an inode.')
]
CONF = cfg.CONF
CONF.import_opt('driver_client_cert_key', 'cinder.volume.driver')
CONF.import_opt('driver_client_cert', 'cinder.volume.driver')
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
DEFAULT_STORAGE_NAME = 'storage-1'
DEFAULT_VOLUME_NAME = 'volume-1'
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
@ -63,6 +59,7 @@ def _authenticated(func):
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
@ -80,13 +77,15 @@ def _authenticated(func):
class DateraDriver(san.SanISCSIDriver):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
"""
VERSION = '1.1'
VERSION = '2.0'
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
@ -96,202 +95,7 @@ class DateraDriver(san.SanISCSIDriver):
self.password = self.configuration.san_password
self.auth_token = None
self.cluster_stats = {}
def do_setup(self, context):
# If any of the deprecated options are set, we'll warn the operator to
# use the new authentication method.
DEPRECATED_OPTS = [
self.configuration.driver_client_cert_key,
self.configuration.driver_client_cert,
self.configuration.datera_api_token
]
if any(DEPRECATED_OPTS):
msg = _LW("Client cert verification and datera_api_token are "
"deprecated in the Datera driver, and will be removed "
"in the Liberty release. Please set the san_login and "
"san_password in your cinder.conf instead.")
versionutils.report_deprecated_feature(LOG, msg)
return
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self._login()
@utils.retry(exception.VolumeDriverException, retries=3)
def _wait_for_resource(self, id, resource_type):
result = self._issue_api_request(resource_type, 'get', id)
if result['status'] == 'available':
return
else:
raise exception.VolumeDriverException(message=
_('Resource not ready.'))
def _create_resource(self, resource, resource_type, body):
type_id = resource.get('volume_type_id', None)
if resource_type == 'volumes':
if type_id is not None:
policies = self._get_policies_by_volume_type(type_id)
if policies:
body.update(policies)
result = None
try:
result = self._issue_api_request(resource_type, 'post', body=body)
except exception.Invalid:
if resource_type == 'volumes' and type_id:
LOG.error(_LE("Creation request failed. Please verify the "
"extra-specs set for your volume types are "
"entered correctly."))
raise
else:
if result['status'] == 'available':
return
self._wait_for_resource(resource['id'], resource_type)
def create_volume(self, volume):
"""Create a logical volume."""
body = {
'name': volume['display_name'] or volume['id'],
'size': str(volume['size'] * units.Gi),
'uuid': volume['id'],
'numReplicas': self.num_replicas
}
self._create_resource(volume, 'volumes', body)
def create_cloned_volume(self, volume, src_vref):
body = {
'name': volume['display_name'] or volume['id'],
'uuid': volume['id'],
'clone_uuid': src_vref['id'],
'numReplicas': self.num_replicas
}
self._create_resource(volume, 'volumes', body)
def delete_volume(self, volume):
try:
self._issue_api_request('volumes', 'delete', volume['id'])
except exception.NotFound:
LOG.info(_LI("Tried to delete volume %s, but it was not found in "
"the Datera cluster. Continuing with delete."),
volume['id'])
def _do_export(self, context, volume):
"""Gets the associated account, retrieves CHAP info and updates."""
portal = None
iqn = None
datera_volume = self._issue_api_request('volumes',
resource=volume['id'])
if len(datera_volume['targets']) == 0:
export = self._issue_api_request(
'volumes', action='export', method='post',
body={'ctype': 'TC_BLOCK_ISCSI'}, resource=volume['id'])
portal = "%s:3260" % export['endpoint_addrs'][0]
iqn = export['endpoint_idents'][0]
else:
export = self._issue_api_request(
'export_configs',
resource=datera_volume['targets'][0]
)
portal = export['endpoint_addrs'][0] + ':3260'
iqn = export['endpoint_idents'][0]
provider_location = '%s %s %s' % (portal, iqn, 0)
return {'provider_location': provider_location}
def ensure_export(self, context, volume):
return self._do_export(context, volume)
def create_export(self, context, volume, connector):
return self._do_export(context, volume)
def detach_volume(self, context, volume, attachment=None):
try:
self._issue_api_request('volumes', 'delete', resource=volume['id'],
action='export')
except exception.NotFound:
LOG.info(_LI("Tried to delete export for volume %s, but it was "
"not found in the Datera cluster. Continuing with "
"volume detach"), volume['id'])
def delete_snapshot(self, snapshot):
try:
self._issue_api_request('snapshots', 'delete', snapshot['id'])
except exception.NotFound:
LOG.info(_LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete."),
snapshot['id'])
def create_snapshot(self, snapshot):
body = {
'uuid': snapshot['id'],
'parentUUID': snapshot['volume_id']
}
self._create_resource(snapshot, 'snapshots', body)
def create_volume_from_snapshot(self, volume, snapshot):
body = {
'name': volume['display_name'] or volume['id'],
'uuid': volume['id'],
'snapshot_uuid': snapshot['id'],
'numReplicas': self.num_replicas
}
self._create_resource(volume, 'volumes', body)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
if refresh:
try:
self._update_cluster_stats()
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
body = {
'size': str(new_size * units.Gi)
}
self._issue_api_request('volumes', 'put', body=body,
resource=volume['id'])
def _update_cluster_stats(self):
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request('cluster')
if 'uuid' not in results:
LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get('volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': int(results['totalRawSpace']),
'free_capacity_gb': int(results['availableSpace']),
'reserved_percentage': 0,
}
self.cluster_stats = stats
self.datera_api_token = None
def _login(self):
"""Use the san_login and san_password to set self.auth_token."""
@ -306,16 +110,285 @@ class DateraDriver(san.SanISCSIDriver):
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request('login', 'post', body=body,
results = self._issue_api_request('login', 'put', body=body,
sensitive=True)
self.auth_token = results['key']
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume'
'cinder.conf and start the cinder-volume '
'service again.'))
def _get_lunid(self):
return 0
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self._login()
@utils.retry(exception.VolumeDriverException, retries=3)
def _wait_for_resource(self, id, resource_type):
result = self._issue_api_request(resource_type, 'get', id)
if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
DEFAULT_VOLUME_NAME]['op_state'] == 'available':
return
else:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _create_resource(self, resource, resource_type, body):
type_id = resource.get('volume_type_id', None)
result = None
try:
result = self._issue_api_request(resource_type, 'post', body=body)
except exception.Invalid:
if resource_type == 'volumes' and type_id:
LOG.error(_LE("Creation request failed. Please verify the "
"extra-specs set for your volume types are "
"entered correctly."))
raise
else:
# Handle updating QOS Policies
if resource_type == 'app_instances':
url = ('app_instances/{}/storage_instances/{}/volumes/{'
'}/performance_policy')
url = url.format(
resource['id'],
DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME)
if type_id is not None:
policies = self._get_policies_by_volume_type(type_id)
if policies:
self._issue_api_request(url, 'post', body=policies)
if result['storage_instances'][DEFAULT_STORAGE_NAME]['volumes'][
DEFAULT_VOLUME_NAME]['op_state'] == 'available':
return
self._wait_for_resource(resource['id'], resource_type)
def create_volume(self, volume):
"""Create a logical volume."""
# Generate App Instance, Storage Instance and Volume
# Volume ID will be used as the App Instance Name
# Storage Instance and Volumes will have standard names
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': str(volume['id']),
'access_control_mode': 'allow_all',
'storage_instances': {
DEFAULT_STORAGE_NAME: {
'name': DEFAULT_STORAGE_NAME,
'volumes': {
DEFAULT_VOLUME_NAME: {
'name': DEFAULT_VOLUME_NAME,
'size': volume['size'],
'replica_count': int(self.num_replicas),
'snapshot_policies': {
}
}
}
}
}
})
self._create_resource(volume, 'app_instances', body=app_params)
def extend_volume(self, volume, new_size):
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
"app_instances/{}".format(volume['id']))
if app_inst['admin_state'] == 'online':
reonline = True
self.detach_volume(None, volume)
# Change Volume Size
app_inst = volume['id']
storage_inst = DEFAULT_STORAGE_NAME
data = {
'size': new_size
}
self._issue_api_request(
'app_instances/{}/storage_instances/{}/volumes/{}'.format(
app_inst, storage_inst, DEFAULT_VOLUME_NAME),
method='put', body=data)
# Online Volume, if it was online before
if reonline:
self.create_export(None, volume)
def create_cloned_volume(self, volume, src_vref):
clone_src_template = ("/app_instances/{}/storage_instances/{"
"}/volumes/{}")
src = clone_src_template.format(src_vref['id'], DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME)
data = {
'create_mode': 'openstack',
'name': str(volume['id']),
'uuid': str(volume['id']),
'clone_src': src,
'access_control_mode': 'allow_all'
}
self._issue_api_request('app_instances', 'post', body=data)
def delete_volume(self, volume):
self.detach_volume(None, volume)
app_inst = volume['id']
try:
self._issue_api_request('app_instances/{}'.format(app_inst),
method='delete')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, volume['id'])
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
return self.create_export(context, volume, connector)
def create_export(self, context, volume, connector):
url = "app_instances/{}".format(volume['id'])
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(url, method='put', body=data)
storage_instance = app_inst['storage_instances'][
DEFAULT_STORAGE_NAME]
portal = storage_instance['access']['ips'][0] + ':3260'
iqn = storage_instance['access']['iqn']
# Portal, IQN, LUNID
provider_location = '%s %s %s' % (portal, iqn, self._get_lunid())
return {'provider_location': provider_location}
def detach_volume(self, context, volume, attachment=None):
url = "app_instances/{}".format(volume['id'])
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data)
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
def create_snapshot(self, snapshot):
url_template = ('app_instances/{}/storage_instances/{}/volumes/{'
'}/snapshots')
url = url_template.format(snapshot['volume_id'],
DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME)
snap_params = {
'uuid': snapshot['id'],
}
self._issue_api_request(url, method='post', body=snap_params)
def delete_snapshot(self, snapshot):
snap_temp = ('app_instances/{}/storage_instances/{}/volumes/{'
'}/snapshots')
snapu = snap_temp.format(snapshot['volume_id'],
DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME)
snapshots = self._issue_api_request(snapu, method='get')
try:
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(ts)
self._issue_api_request(url, method='delete')
break
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, snapshot['id'])
def create_volume_from_snapshot(self, volume, snapshot):
snap_temp = ('app_instances/{}/storage_instances/{}/volumes/{'
'}/snapshots')
snapu = snap_temp.format(snapshot['volume_id'],
DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME)
snapshots = self._issue_api_request(snapu, method='get')
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
found_ts = ts
break
else:
raise exception.NotFound
src = ('/app_instances/{}/storage_instances/{}/volumes/{'
'}/snapshots/{}'.format(
snapshot['volume_id'],
DEFAULT_STORAGE_NAME,
DEFAULT_VOLUME_NAME,
found_ts))
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': str(volume['id']),
'clone_src': src,
'access_control_mode': 'allow_all'
})
self._issue_api_request(
'app_instances',
method='post',
body=app_params)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
if refresh or not self.cluster_stats:
try:
self._update_cluster_stats()
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
return self.cluster_stats
def _update_cluster_stats(self):
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request('system')
if 'uuid' not in results:
LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get('volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': int(results['total_capacity']) / units.Gi,
'free_capacity_gb': int(results['available_capacity']) / units.Gi,
'reserved_percentage': 0,
}
self.cluster_stats = stats
def _get_policies_by_volume_type(self, type_id):
"""Get extra_specs and qos_specs of a volume_type.
@ -354,7 +427,7 @@ class DateraDriver(san.SanISCSIDriver):
"""
host = self.configuration.san_ip
port = self.configuration.datera_api_port
api_token = self.configuration.datera_api_token
api_token = self.datera_api_token
api_version = self.configuration.datera_api_version
payload = json.dumps(body, ensure_ascii=False)
@ -363,10 +436,7 @@ class DateraDriver(san.SanISCSIDriver):
if not sensitive:
LOG.debug("Payload for Datera API call: %s", payload)
header = {
'Content-Type': 'application/json; charset=utf-8',
'auth-token': self.auth_token
}
header = {'Content-Type': 'application/json; charset=utf-8'}
protocol = 'http'
if self.configuration.driver_use_ssl:
@ -399,8 +469,10 @@ class DateraDriver(san.SanISCSIDriver):
data=payload, headers=header,
verify=False, cert=cert_data)
except requests.exceptions.RequestException as ex:
msg = _('Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % six.text_type(ex.message)
msg = _(
'Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % six.text_type(
ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)
@ -409,6 +481,12 @@ class DateraDriver(san.SanISCSIDriver):
LOG.debug("Results of Datera API call: %s", data)
if not response.ok:
LOG.debug(("Datera Response URL: %s\n"
"Datera Response Payload: %s\n"
"Response Object: %s\n"),
response.url,
payload,
vars(response))
if response.status_code == 404:
raise exception.NotFound(data['message'])
elif response.status_code in [403, 401]:

View File

@ -0,0 +1,12 @@
---
features:
- All Datera DataFabric backed volume-types will now use
API version 2 with Datera DataFabric
upgrade:
- Users of the Datera Cinder driver are now required to use
Datera DataFabric version 1.0+. Versions before 1.0 will
not be able to utilize this new driver since they still
function on v1 of the Datera DataFabric API
deprecations:
- datera_api_token -- this has been replaced by
san_login and san_password