Merge "Remove Blockbridge volume driver"

This commit is contained in:
Jenkins 2017-09-13 23:57:23 +00:00 committed by Gerrit Code Review
commit d43849d22c
7 changed files with 1 additions and 1470 deletions

View File

@ -70,8 +70,6 @@ from cinder import ssh_utils as cinder_sshutils
from cinder.transfer import api as cinder_transfer_api
from cinder.volume import api as cinder_volume_api
from cinder.volume import driver as cinder_volume_driver
from cinder.volume.drivers import blockbridge as \
cinder_volume_drivers_blockbridge
from cinder.volume.drivers.coprhd import common as \
cinder_volume_drivers_coprhd_common
from cinder.volume.drivers.coprhd import scaleio as \
@ -297,7 +295,6 @@ def list_opts():
itertools.chain(
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_drivers_blockbridge.blockbridge_opts,
cinder_volume_drivers_coprhd_common.volume_opts,
cinder_volume_drivers_coprhd_scaleio.scaleio_opts,
cinder_volume_drivers_datera_dateraiscsi.d_opts,

View File

@ -1,582 +0,0 @@
# Copyright 2015 Blockbridge Networks, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Blockbridge EPS iSCSI Volume Driver Tests
"""
import base64
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
import cinder.volume.drivers.blockbridge as bb
DEFAULT_POOL_NAME = "OpenStack"
DEFAULT_POOL_QUERY = "+openstack"
FIXTURE_VOL_EXPORT_OK = """{
"target_ip":"127.0.0.1",
"target_port":3260,
"target_iqn":"iqn.2009-12.com.blockbridge:t-pjxczxh-t001",
"target_lun":0,
"initiator_login":"mock-user-abcdef123456"
}
"""
POOL_STATS_WITHOUT_USAGE = {
'driver_version': '1.3.0',
'pools': [{
'filter_function': None,
'free_capacity_gb': 'unknown',
'goodness_function': None,
'location_info': 'BlockbridgeDriver:unknown:OpenStack',
'max_over_subscription_ratio': None,
'pool_name': 'OpenStack',
'thin_provisioning_support': True,
'reserved_percentage': 0,
'total_capacity_gb': 'unknown'},
],
'storage_protocol': 'iSCSI',
'vendor_name': 'Blockbridge',
'volume_backend_name': 'BlockbridgeISCSIDriver',
}
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch("six.moves.http_client.HTTPSConnection", autospec=True)
def _common_inner_inner2(mock_conn):
inst.mock_httplib = mock_conn
inst.mock_conn = mock_conn.return_value
inst.mock_response = mock.Mock()
inst.mock_response.read.return_value = '{}'
inst.mock_response.status = http_client.OK
inst.mock_conn.request.return_value = True
inst.mock_conn.getresponse.return_value = inst.mock_response
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class BlockbridgeISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(BlockbridgeISCSIDriverTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.blockbridge_api_host = 'ut-api.blockbridge.com'
self.cfg.blockbridge_api_port = None
self.cfg.blockbridge_auth_scheme = 'token'
self.cfg.blockbridge_auth_token = '0//kPIw7Ck7PUkPSKY...'
self.cfg.blockbridge_pools = {DEFAULT_POOL_NAME: DEFAULT_POOL_QUERY}
self.cfg.blockbridge_default_pool = None
self.cfg.filter_function = None
self.cfg.goodness_function = None
def _cfg_safe_get(arg):
return getattr(self.cfg, arg, None)
self.cfg.safe_get.side_effect = _cfg_safe_get
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.real_client = bb.BlockbridgeAPIClient(configuration=self.cfg)
self.mock_client = mock.Mock(wraps=self.real_client)
self.driver = bb.BlockbridgeISCSIDriver(execute=mock_exec,
client=self.mock_client,
configuration=self.cfg)
self.user_id = '2c13bc8ef717015fda1e12e70dab24654cb6a6da'
self.project_id = '62110b9d37f1ff3ea1f51e75812cb92ed9a08b28'
self.volume_name = u'testvol-1'
self.volume_id = '6546b9e9-1980-4241-a4e9-0ad9d382c032'
self.volume_size = 1
self.volume = dict(
name=self.volume_name,
size=self.volume_size,
id=self.volume_id,
user_id=self.user_id,
project_id=self.project_id,
host='fake-host')
self.snapshot_name = u'testsnap-1'
self.snapshot_id = '207c12af-85a7-4da6-8d39-a7457548f965'
self.snapshot = dict(
volume_name=self.volume_name,
name=self.snapshot_name,
id=self.snapshot_id,
volume_id='55ff8a46-c35f-4ca3-9991-74e1697b220e',
user_id=self.user_id,
project_id=self.project_id)
self.connector = dict(
initiator='iqn.1994-05.com.redhat:6a528422b61')
self.driver.do_setup(context.get_admin_context())
@common_mocks
def test_http_mock_success(self):
self.mock_response.read.return_value = '{}'
self.mock_response.status = http_client.OK
conn = http_client.HTTPSConnection('whatever', None)
conn.request('GET', '/blah', '{}', {})
rsp = conn.getresponse()
self.assertEqual('{}', rsp.read())
self.assertEqual(http_client.OK, rsp.status)
@common_mocks
def test_http_mock_failure(self):
mock_body = '{"error": "no results matching query", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
conn = http_client.HTTPSConnection('whatever', None)
conn.request('GET', '/blah', '{}', {})
rsp = conn.getresponse()
self.assertEqual(mock_body, rsp.read())
self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, rsp.status)
@common_mocks
def test_cfg_api_host(self):
with mock.patch.object(self.cfg, 'blockbridge_api_host', 'test.host'):
self.driver.get_volume_stats(True)
self.mock_httplib.assert_called_once_with('test.host', None)
@common_mocks
def test_cfg_api_port(self):
with mock.patch.object(self.cfg, 'blockbridge_api_port', 1234):
self.driver.get_volume_stats(True)
self.mock_httplib.assert_called_once_with(
self.cfg.blockbridge_api_host, 1234)
@common_mocks
def test_cfg_api_auth_scheme_password(self):
self.cfg.blockbridge_auth_scheme = 'password'
self.cfg.blockbridge_auth_user = 'mock-user'
self.cfg.blockbridge_auth_password = 'mock-password'
with mock.patch.object(self.driver, 'hostname', 'mock-hostname'):
self.driver.get_volume_stats(True)
creds = "%s:%s" % (self.cfg.blockbridge_auth_user,
self.cfg.blockbridge_auth_password)
if six.PY3:
creds = creds.encode('utf-8')
b64_creds = base64.encodestring(creds).decode('ascii')
else:
b64_creds = base64.encodestring(creds)
params = dict(
hostname='mock-hostname',
version=self.driver.VERSION,
backend_name='BlockbridgeISCSIDriver',
pool='OpenStack',
query='+openstack')
headers = {
'Accept': 'application/vnd.blockbridge-3+json',
'Authorization': "Basic %s" % b64_creds.replace("\n", ""),
'User-Agent': "cinder-volume/%s" % self.driver.VERSION,
}
self.mock_conn.request.assert_called_once_with(
'GET', mock.ANY, None, headers)
# Parse the URL instead of comparing directly both URLs.
# On Python 3, parameters are formatted in a random order because
# of the hash randomization.
conn_url = self.mock_conn.request.call_args[0][1]
conn_params = dict(urllib.parse.parse_qsl(conn_url.split("?", 1)[1]))
self.assertTrue(conn_url.startswith("/api/cinder/status?"),
repr(conn_url))
self.assertEqual(params, conn_params)
@common_mocks
def test_create_volume(self):
self.driver.create_volume(self.volume)
url = "/volumes/%s" % self.volume_id
create_params = dict(
name=self.volume_name,
query=DEFAULT_POOL_QUERY,
capacity=self.volume_size * units.Gi)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
full_url = "/api/cinder" + url
tsk_header = "ext_auth=keystone/%(project_id)s/%(user_id)s" % kwargs
authz_header = "Bearer %s" % self.cfg.blockbridge_auth_token
headers = {
'X-Blockbridge-Task': tsk_header,
'Accept': 'application/vnd.blockbridge-3+json',
'Content-Type': 'application/json',
'Authorization': authz_header,
'User-Agent': "cinder-volume/%s" % self.driver.VERSION,
}
# This is split up because assert_called_once_with won't handle
# randomly ordered dictionaries.
args, kwargs = self.mock_conn.request.call_args
self.assertEqual(args[0], 'PUT')
self.assertEqual(args[1], full_url)
self.assertDictEqual(jsonutils.loads(args[2]), create_params)
self.assertDictEqual(args[3], headers)
@common_mocks
def test_create_volume_no_results(self):
mock_body = '{"message": "no results matching query", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"no results matching query",
self.driver.create_volume,
self.volume)
create_params = dict(
name=self.volume_name,
query=DEFAULT_POOL_QUERY,
capacity=self.volume_size * units.Gi)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(
"/volumes/%s" % self.volume_id, **kwargs)
@common_mocks
def test_create_volume_from_snapshot(self):
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
vol_src = dict(
snapshot_id=self.snapshot_id,
volume_id=self.snapshot['volume_id'])
create_params = dict(
name=self.volume_name,
capacity=self.volume_size * units.Gi,
src=vol_src)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(
"/volumes/%s" % self.volume_id, **kwargs)
@common_mocks
def test_create_volume_from_snapshot_overquota(self):
mock_body = '{"message": "over quota", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"over quota",
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot)
vol_src = dict(
snapshot_id=self.snapshot_id,
volume_id=self.snapshot['volume_id'])
create_params = dict(
name=self.volume_name,
capacity=self.volume_size * units.Gi,
src=vol_src)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(
"/volumes/%s" % self.volume_id, **kwargs)
@common_mocks
def test_create_cloned_volume(self):
src_vref = dict(
name='cloned_volume_source',
size=self.volume_size,
id='5d734467-5d77-461c-b5ac-5009dbeaa5d5',
user_id=self.user_id,
project_id=self.project_id)
self.driver.create_cloned_volume(self.volume, src_vref)
create_params = dict(
name=self.volume_name,
capacity=self.volume_size * units.Gi,
src=dict(volume_id=src_vref['id']))
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(
"/volumes/%s" % self.volume_id, **kwargs)
@common_mocks
def test_create_cloned_volume_overquota(self):
mock_body = '{"message": "over quota", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
src_vref = dict(
name='cloned_volume_source',
size=self.volume_size,
id='5d734467-5d77-461c-b5ac-5009dbeaa5d5',
user_id=self.user_id,
project_id=self.project_id)
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"over quota",
self.driver.create_cloned_volume,
self.volume,
src_vref)
create_params = dict(
name=self.volume_name,
capacity=self.volume_size * units.Gi,
src=dict(volume_id=src_vref['id']))
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(
"/volumes/%s" % self.volume_id, **kwargs)
@common_mocks
def test_extend_volume(self):
self.driver.extend_volume(self.volume, 2)
url = "/volumes/%s" % self.volume_id
kwargs = dict(
action='grow',
method='POST',
params=dict(capacity=(2 * units.Gi)),
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_extend_volume_overquota(self):
mock_body = '{"message": "over quota", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"over quota",
self.driver.extend_volume,
self.volume,
2)
url = "/volumes/%s" % self.volume_id
kwargs = dict(
action='grow',
method='POST',
params=dict(capacity=(2 * units.Gi)),
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_delete_volume(self):
self.driver.delete_volume(self.volume)
url = "/volumes/%s" % self.volume_id
kwargs = dict(
method='DELETE',
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_create_snapshot(self):
self.driver.create_snapshot(self.snapshot)
url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
self.snapshot['id'])
create_params = dict(
name=self.snapshot_name)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_create_snapshot_overquota(self):
mock_body = '{"message": "over quota", "status": 413}'
self.mock_response.read.return_value = mock_body
self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE
self.assertRaisesRegex(exception.VolumeBackendAPIException,
"over quota",
self.driver.create_snapshot,
self.snapshot)
url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
self.snapshot['id'])
create_params = dict(
name=self.snapshot_name)
kwargs = dict(
method='PUT',
params=create_params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_delete_snapshot(self):
self.driver.delete_snapshot(self.snapshot)
url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
self.snapshot['id'])
kwargs = dict(
method='DELETE',
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
@mock.patch('cinder.volume.utils.generate_username')
@mock.patch('cinder.volume.utils.generate_password')
def test_initialize_connection(self,
mock_generate_password,
mock_generate_username):
mock_generate_username.return_value = 'mock-user-abcdef123456'
mock_generate_password.return_value = 'mock-password-abcdef123456'
self.mock_response.read.return_value = FIXTURE_VOL_EXPORT_OK
self.mock_response.status = http_client.OK
props = self.driver.initialize_connection(self.volume, self.connector)
expected_props = dict(
driver_volume_type="iscsi",
data=dict(
auth_method="CHAP",
auth_username='mock-user-abcdef123456',
auth_password='mock-password-abcdef123456',
target_discovered=False,
target_iqn="iqn.2009-12.com.blockbridge:t-pjxczxh-t001",
target_lun=0,
target_portal="127.0.0.1:3260",
volume_id=self.volume_id))
self.assertEqual(expected_props, props)
ini_name = urllib.parse.quote(self.connector["initiator"], "")
url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name)
params = dict(
chap_user="mock-user-abcdef123456",
chap_secret="mock-password-abcdef123456")
kwargs = dict(
method='PUT',
params=params,
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_terminate_connection(self):
self.driver.terminate_connection(self.volume, self.connector)
ini_name = urllib.parse.quote(self.connector["initiator"], "")
url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name)
kwargs = dict(
method='DELETE',
user_id=self.user_id,
project_id=self.project_id)
self.mock_client.submit.assert_called_once_with(url, **kwargs)
@common_mocks
def test_get_volume_stats_without_usage(self):
with mock.patch.object(self.driver, 'hostname', 'mock-hostname'):
self.driver.get_volume_stats(True)
p = {
'query': '+openstack',
'pool': 'OpenStack',
'hostname': 'mock-hostname',
'version': '1.3.0',
'backend_name': 'BlockbridgeISCSIDriver',
}
self.mock_client.submit.assert_called_once_with('/status', params=p)
self.assertEqual(POOL_STATS_WITHOUT_USAGE, self.driver._stats)
@common_mocks
def test_get_volume_stats_forbidden(self):
self.mock_response.status = http_client.FORBIDDEN
self.assertRaisesRegex(exception.NotAuthorized,
"Insufficient privileges",
self.driver.get_volume_stats,
True)
@common_mocks
def test_get_volume_stats_unauthorized(self):
self.mock_response.status = http_client.UNAUTHORIZED
self.assertRaisesRegex(exception.NotAuthorized,
"Invalid credentials",
self.driver.get_volume_stats,
True)

View File

@ -1,604 +0,0 @@
# Copyright 2013-2015 Blockbridge Networks, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Blockbridge EPS iSCSI Volume Driver
"""
import base64
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
blockbridge_opts = [
cfg.StrOpt("blockbridge_api_host",
help="IP address/hostname of Blockbridge API."),
cfg.IntOpt("blockbridge_api_port",
help="Override HTTPS port to connect to Blockbridge "
"API server."),
cfg.StrOpt("blockbridge_auth_scheme",
default='token',
choices=['token', 'password'],
help="Blockbridge API authentication scheme (token "
"or password)"),
cfg.StrOpt("blockbridge_auth_token",
help="Blockbridge API token (for auth scheme 'token')",
secret=True),
cfg.StrOpt("blockbridge_auth_user",
help="Blockbridge API user (for auth scheme 'password')"),
cfg.StrOpt("blockbridge_auth_password",
help="Blockbridge API password (for auth scheme 'password')",
secret=True),
cfg.DictOpt("blockbridge_pools",
default={'OpenStack': '+openstack'},
help="Defines the set of exposed pools and their associated "
"backend query strings"),
cfg.StrOpt("blockbridge_default_pool",
help="Default pool name if unspecified."),
]
CONF = cfg.CONF
CONF.register_opts(blockbridge_opts, group=configuration.SHARED_CONF_GROUP)
class BlockbridgeAPIClient(object):
_api_cfg = None
def __init__(self, configuration=None):
self.configuration = configuration
def _get_api_cfg(self):
if self._api_cfg:
# return cached configuration
return self._api_cfg
if self.configuration.blockbridge_auth_scheme == 'password':
user = self.configuration.safe_get('blockbridge_auth_user')
pw = self.configuration.safe_get('blockbridge_auth_password')
creds = "%s:%s" % (user, pw)
if six.PY3:
creds = creds.encode('utf-8')
b64_creds = base64.encodestring(creds).decode('ascii')
else:
b64_creds = base64.encodestring(creds)
authz = "Basic %s" % b64_creds.replace("\n", "")
elif self.configuration.blockbridge_auth_scheme == 'token':
token = self.configuration.blockbridge_auth_token or ''
authz = "Bearer %s" % token
# set and return cached api cfg
self._api_cfg = {
'host': self.configuration.blockbridge_api_host,
'port': self.configuration.blockbridge_api_port,
'base_url': '/api/cinder',
'default_headers': {
'User-Agent': ("cinder-volume/%s" %
BlockbridgeISCSIDriver.VERSION),
'Accept': 'application/vnd.blockbridge-3+json',
'Authorization': authz,
},
}
return self._api_cfg
def submit(self, rel_url, method='GET', params=None, user_id=None,
project_id=None, req_id=None, action=None, **kwargs):
"""Submit a request to the configured API endpoint."""
cfg = self._get_api_cfg()
if cfg is None:
msg = _("Failed to determine blockbridge API configuration")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# alter the url appropriately if an action is requested
if action:
rel_url += "/actions/%s" % action
headers = cfg['default_headers'].copy()
url = cfg['base_url'] + rel_url
body = None
# include user, project and req-id, if supplied
tsk_ctx = []
if user_id and project_id:
tsk_ctx.append("ext_auth=keystone/%s/%s" % (project_id, user_id))
if req_id:
tsk_ctx.append("id=%s", req_id)
if tsk_ctx:
headers['X-Blockbridge-Task'] = ','.join(tsk_ctx)
# encode params based on request method
if method in ['GET', 'DELETE']:
# For GET method add parameters to the URL
if params:
url += '?' + urllib.parse.urlencode(params)
elif method in ['POST', 'PUT', 'PATCH']:
body = jsonutils.dumps(params)
headers['Content-Type'] = 'application/json'
else:
raise exception.UnknownCmd(cmd=method)
# connect and execute the request
connection = http_client.HTTPSConnection(cfg['host'], cfg['port'])
connection.request(method, url, body, headers)
response = connection.getresponse()
# read response data
rsp_body = response.read()
rsp_data = jsonutils.loads(rsp_body)
connection.close()
code = response.status
if code in [200, 201, 202, 204]:
pass
elif code == 401:
raise exception.NotAuthorized(_("Invalid credentials"))
elif code == 403:
raise exception.NotAuthorized(_("Insufficient privileges"))
else:
raise exception.VolumeBackendAPIException(data=rsp_data['message'])
return rsp_data
@interface.volumedriver
class BlockbridgeISCSIDriver(driver.ISCSIDriver):
"""Manages volumes hosted on Blockbridge EPS."""
VERSION = '1.3.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Blockbridge_EPS_CI"
# TODO(smcginnis) Either remove this if CI requirements are met, or
# remove this driver in the Queens release per normal deprecation
SUPPORTED = False
def __init__(self, *args, **kwargs):
super(BlockbridgeISCSIDriver, self).__init__(*args, **kwargs)
self.client = kwargs.get('client', None) or (
BlockbridgeAPIClient(configuration=self.configuration))
self.configuration.append_config_values(blockbridge_opts)
self.hostname = socket.gethostname()
def do_setup(self, context):
"""Set up the Blockbridge volume driver."""
pass
def check_for_setup_error(self):
"""Verify configuration is valid."""
# ensure the host is configured
if self.configuration.safe_get('blockbridge_api_host') is None:
raise exception.InvalidInput(
reason=_("Blockbridge api host not configured"))
# ensure the auth scheme is valid and has the necessary configuration.
auth_scheme = self.configuration.safe_get("blockbridge_auth_scheme")
if auth_scheme == 'password':
auth_user = self.configuration.safe_get('blockbridge_auth_user')
auth_pw = self.configuration.safe_get('blockbridge_auth_password')
if auth_user is None:
raise exception.InvalidInput(
reason=_("Blockbridge user not configured (required for "
"auth scheme 'password')"))
if auth_pw is None:
raise exception.InvalidInput(
reason=_("Blockbridge password not configured (required "
"for auth scheme 'password')"))
elif auth_scheme == 'token':
token = self.configuration.safe_get('blockbridge_auth_token')
if token is None:
raise exception.InvalidInput(
reason=_("Blockbridge token not configured (required "
"for auth scheme 'token')"))
else:
raise exception.InvalidInput(
reason=(_("Blockbridge configured with invalid auth scheme "
"'%(auth_scheme)s'") % {'auth_scheme': auth_scheme}))
# ensure at least one pool is defined
pools = self.configuration.safe_get('blockbridge_pools')
if pools is None:
raise exception.InvalidInput(
reason=_("Blockbridge pools not configured"))
default_pool = self.configuration.safe_get('blockbridge_default_pool')
if default_pool and default_pool not in pools:
raise exception.InvalidInput(
reason=_("Blockbridge default pool does not exist"))
def _vol_api_submit(self, vol_id, **kwargs):
vol_id = urllib.parse.quote(vol_id, '')
rel_url = "/volumes/%s" % vol_id
return self.client.submit(rel_url, **kwargs)
def _create_volume(self, vol_id, params, **kwargs):
"""Execute a backend volume create operation."""
self._vol_api_submit(vol_id, method='PUT', params=params, **kwargs)
def _delete_volume(self, vol_id, **kwargs):
"""Execute a backend volume delete operation."""
self._vol_api_submit(vol_id, method='DELETE', **kwargs)
def _extend_volume(self, vol_id, capacity, **kwargs):
"""Execute a backend volume grow operation."""
params = kwargs.get('params', {})
params['capacity'] = capacity
self._vol_api_submit(vol_id, method='POST', action='grow',
params=params, **kwargs)
def _snap_api_submit(self, vol_id, snap_id, **kwargs):
vol_id = urllib.parse.quote(vol_id, '')
snap_id = urllib.parse.quote(snap_id, '')
rel_url = "/volumes/%s/snapshots/%s" % (vol_id, snap_id)
return self.client.submit(rel_url, **kwargs)
def _create_snapshot(self, vol_id, snap_id, params, **kwargs):
"""Execute a backend snapshot create operation."""
self._snap_api_submit(vol_id, snap_id, method='PUT',
params=params, **kwargs)
def _delete_snapshot(self, vol_id, snap_id, **kwargs):
"""Execute a backend snapshot delete operation."""
return self._snap_api_submit(vol_id, snap_id, method='DELETE',
**kwargs)
def _export_api_submit(self, vol_id, ini_name, **kwargs):
vol_id = urllib.parse.quote(vol_id, '')
ini_name = urllib.parse.quote(ini_name, '')
rel_url = "/volumes/%s/exports/%s" % (vol_id, ini_name)
return self.client.submit(rel_url, **kwargs)
def _create_export(self, vol_id, ini_name, params, **kwargs):
"""Execute a backend volume export operation."""
return self._export_api_submit(vol_id, ini_name, method='PUT',
params=params, **kwargs)
def _delete_export(self, vol_id, ini_name, **kwargs):
"""Remove a previously created volume export."""
self._export_api_submit(vol_id, ini_name, method='DELETE',
**kwargs)
def _get_pool_stats(self, pool, query, **kwargs):
"""Retrieve pool statistics and capabilities."""
pq = {
'pool': pool,
'query': query,
}
pq.update(kwargs)
return self.client.submit('/status', params=pq)
def _get_dbref_name(self, ref):
display_name = ref.get('display_name')
if not display_name:
return ref.get('name')
return display_name
def _get_query_string(self, ctxt, volume):
pools = self.configuration.blockbridge_pools
default_pool = self.configuration.blockbridge_default_pool
explicit_pool = volume_utils.extract_host(volume['host'], 'pool')
pool_name = explicit_pool or default_pool
if pool_name:
return pools[pool_name]
else:
# no pool specified or defaulted -- just pick whatever comes out of
# the dictionary first.
return list(pools.values())[0]
def create_volume(self, volume):
"""Create a volume on a Blockbridge EPS backend.
:param volume: volume reference
"""
ctxt = context.get_admin_context()
create_params = {
'name': self._get_dbref_name(volume),
'query': self._get_query_string(ctxt, volume),
'capacity': int(volume['size'] * units.Gi),
}
LOG.debug("Provisioning %(capacity)s byte volume "
"with query '%(query)s'", create_params, resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
create_params = {
'name': self._get_dbref_name(volume),
'capacity': int(volume['size'] * units.Gi),
'src': {
'volume_id': src_vref['id'],
},
}
LOG.debug("Cloning source volume %(id)s", src_vref, resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def delete_volume(self, volume):
"""Remove an existing volume.
:param volume: volume reference
"""
LOG.debug("Removing volume %(id)s", volume, resource=volume)
return self._delete_volume(volume['id'],
user_id=volume['user_id'],
project_id=volume['project_id'])
def create_snapshot(self, snapshot):
"""Create snapshot of existing volume.
:param snapshot: shapshot reference
"""
create_params = {
'name': self._get_dbref_name(snapshot),
}
LOG.debug("Creating snapshot of volume %(volume_id)s", snapshot,
resource=snapshot)
return self._create_snapshot(snapshot['volume_id'],
snapshot['id'],
create_params,
user_id=snapshot['user_id'],
project_id=snapshot['project_id'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from existing snapshot.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
create_params = {
'name': self._get_dbref_name(volume),
'capacity': int(volume['size'] * units.Gi),
'src': {
'volume_id': snapshot['volume_id'],
'snapshot_id': snapshot['id'],
},
}
LOG.debug("Creating volume from snapshot %(id)s", snapshot,
resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot.
:param snapshot: shapshot reference
"""
LOG.debug("Deleting snapshot of volume %(volume_id)s", snapshot,
resource=snapshot)
self._delete_snapshot(snapshot['volume_id'],
snapshot['id'],
user_id=snapshot['user_id'],
project_id=snapshot['project_id'])
def create_export(self, _ctx, volume, connector):
"""Do nothing: target created during instance attachment."""
pass
def ensure_export(self, _ctx, volume):
"""Do nothing: target created during instance attachment."""
pass
def remove_export(self, _ctx, volume):
"""Do nothing: target created during instance attachment."""
pass
def initialize_connection(self, volume, connector, **kwargs):
"""Attach volume to initiator/host.
Creates a profile for the initiator, and adds the new profile to the
target ACL.
"""
# generate a CHAP secret here -- there is no way to retrieve an
# existing CHAP secret over the Blockbridge API, so it must be
# supplied by the volume driver.
export_params = {
'chap_user': (
kwargs.get('user', volume_utils.generate_username(16))),
'chap_secret': (
kwargs.get('password', volume_utils.generate_password(32))),
}
LOG.debug("Configuring export for %(initiator)s", connector,
resource=volume)
rsp = self._create_export(volume['id'],
connector['initiator'],
export_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
# combine locally generated chap credentials with target iqn/lun to
# present the attach properties.
target_portal = "%s:%s" % (rsp['target_ip'], rsp['target_port'])
properties = {
'target_discovered': False,
'target_portal': target_portal,
'target_iqn': rsp['target_iqn'],
'target_lun': rsp['target_lun'],
'volume_id': volume['id'],
'auth_method': 'CHAP',
'auth_username': rsp['initiator_login'],
'auth_password': export_params['chap_secret'],
}
LOG.debug("Attach properties: %(properties)s",
{'properties': properties})
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def terminate_connection(self, volume, connector, **kwargs):
"""Detach volume from the initiator.
Removes initiator profile entry from target ACL.
"""
LOG.debug("Unconfiguring export for %(initiator)s", connector,
resource=volume)
self._delete_export(volume['id'],
connector['initiator'],
user_id=volume['user_id'],
project_id=volume['project_id'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
capacity = new_size * units.Gi
LOG.debug("Extending volume to %(capacity)s bytes",
{'capacity': capacity}, resource=volume)
self._extend_volume(volume['id'],
int(new_size * units.Gi),
user_id=volume['user_id'],
project_id=volume['project_id'])
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
if self.configuration:
cfg_name = self.configuration.safe_get('volume_backend_name')
backend_name = cfg_name or self.__class__.__name__
driver_cfg = {
'hostname': self.hostname,
'version': self.VERSION,
'backend_name': backend_name,
}
filter_function = self.get_filter_function()
goodness_function = self.get_goodness_function()
pools = []
LOG.debug("Updating volume driver statistics",
resource={'type': 'driver', 'id': backend_name})
for pool_name, query in self.configuration.blockbridge_pools.items():
stats = self._get_pool_stats(pool_name, query, **driver_cfg)
system_serial = stats.get('system_serial', 'unknown')
free_capacity = stats.get('free_capacity', None)
total_capacity = stats.get('total_capacity', None)
provisioned_capacity = stats.get('provisioned_capacity', None)
if free_capacity is None:
free_capacity = 'unknown'
else:
free_capacity = int(free_capacity / units.Gi)
if total_capacity is None:
total_capacity = 'unknown'
else:
total_capacity = int(total_capacity / units.Gi)
pool = {
'pool_name': pool_name,
'location_info': ('BlockbridgeDriver:%(sys_id)s:%(pool)s' %
{'sys_id': system_serial,
'pool': pool_name}),
'max_over_subscription_ratio': (
self.configuration.safe_get('max_over_subscription_ratio')
),
'free_capacity_gb': free_capacity,
'total_capacity_gb': total_capacity,
'reserved_percentage': 0,
'thin_provisioning_support': True,
'filter_function': filter_function,
'goodness_function': goodness_function,
}
if provisioned_capacity is not None:
pool['provisioned_capacity_gb'] = int(
provisioned_capacity / units.Gi
)
pools.append(pool)
self._stats = {
'volume_backend_name': backend_name,
'vendor_name': 'Blockbridge',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'pools': pools,
}

View File

@ -1,244 +0,0 @@
===============
Blockbridge EPS
===============
Introduction
~~~~~~~~~~~~
Blockbridge is software that transforms commodity infrastructure into
secure multi-tenant storage that operates as a programmable service. It
provides automatic encryption, secure deletion, quality of service (QoS),
replication, and programmable security capabilities on your choice of
hardware. Blockbridge uses micro-segmentation to provide isolation that allows
you to concurrently operate OpenStack, Docker, and bare-metal workflows on
shared resources. When used with OpenStack, isolated management domains are
dynamically created on a per-project basis. All volumes and clones, within and
between projects, are automatically cryptographically isolated and implement
secure deletion.
Architecture reference
~~~~~~~~~~~~~~~~~~~~~~
**Blockbridge architecture**
.. figure:: ../../figures/bb-cinder-fig1.png
:width: 100%
Control paths
-------------
The Blockbridge driver is packaged with the core distribution of
OpenStack. Operationally, it executes in the context of the Block
Storage service. The driver communicates with an OpenStack-specific API
provided by the Blockbridge EPS platform. Blockbridge optionally
communicates with Identity, Compute, and Block Storage
services.
Block storage API
-----------------
Blockbridge is API driven software-defined storage. The system
implements a native HTTP API that is tailored to the specific needs of
OpenStack. Each Block Storage service operation maps to a single
back-end API request that provides ACID semantics. The API is
specifically designed to reduce, if not eliminate, the possibility of
inconsistencies between the Block Storage service and external storage
infrastructure in the event of hardware, software or data center
failure.
Extended management
-------------------
OpenStack users may utilize Blockbridge interfaces to manage
replication, auditing, statistics, and performance information on a
per-project and per-volume basis. In addition, they can manage low-level
data security functions including verification of data authenticity and
encryption key delegation. Native integration with the Identity Service
allows tenants to use a single set of credentials. Integration with
Block storage and Compute services provides dynamic metadata mapping
when using Blockbridge management APIs and tools.
Attribute-based provisioning
----------------------------
Blockbridge organizes resources using descriptive identifiers called
*attributes*. Attributes are assigned by administrators of the
infrastructure. They are used to describe the characteristics of storage
in an application-friendly way. Applications construct queries that
describe storage provisioning constraints and the Blockbridge storage
stack assembles the resources as described.
Any given instance of a Blockbridge volume driver specifies a *query*
for resources. For example, a query could specify
``'+ssd +10.0.0.0 +6nines -production iops.reserve=1000
capacity.reserve=30%'``. This query is satisfied by selecting SSD
resources, accessible on the 10.0.0.0 network, with high resiliency, for
non-production workloads, with guaranteed IOPS of 1000 and a storage
reservation for 30% of the volume capacity specified at create time.
Queries and parameters are completely administrator defined: they
reflect the layout, resource, and organizational goals of a specific
deployment.
Supported operations
~~~~~~~~~~~~~~~~~~~~
- Create, delete, clone, attach, and detach volumes
- Create and delete volume snapshots
- Create a volume from a snapshot
- Copy an image to a volume
- Copy a volume to an image
- Extend a volume
- Get volume statistics
Supported protocols
~~~~~~~~~~~~~~~~~~~
Blockbridge provides iSCSI access to storage. A unique iSCSI data fabric
is programmatically assembled when a volume is attached to an instance.
A fabric is disassembled when a volume is detached from an instance.
Each volume is an isolated SCSI device that supports persistent
reservations.
Configuration steps
~~~~~~~~~~~~~~~~~~~
.. _cg_create_an_authentication_token:
Create an authentication token
------------------------------
Whenever possible, avoid using password-based authentication. Even if
you have created a role-restricted administrative user via Blockbridge,
token-based authentication is preferred. You can generate persistent
authentication tokens using the Blockbridge command-line tool as
follows:
.. code-block:: console
$ bb -H bb-mn authorization create --notes "OpenStack" --restrict none
Authenticating to https://bb-mn/api
Enter user or access token: system
Password for system:
Authenticated; token expires in 3599 seconds.
== Authorization: ATH4762894C40626410
notes OpenStack
serial ATH4762894C40626410
account system (ACT0762594C40626440)
user system (USR1B62094C40626440)
enabled yes
created at 2015-10-24 22:08:48 +0000
access type online
token suffix xaKUy3gw
restrict none
== Access Token
access token 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw
*** Remember to record your access token!
Create volume type
------------------
Before configuring and enabling the Blockbridge volume driver, register
an OpenStack volume type and associate it with a
``volume_backend_name``. In this example, a volume type, 'Production',
is associated with the ``volume_backend_name`` 'blockbridge\_prod':
.. code-block:: console
$ openstack volume type create Production
$ openstack volume type set --property volume_backend_name=blockbridge_prod Production
Specify volume driver
---------------------
Configure the Blockbridge volume driver in ``/etc/cinder/cinder.conf``.
Your ``volume_backend_name`` must match the value specified in the
:command:`openstack volume type set` command in the previous step.
.. code-block:: ini
volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver
volume_backend_name = blockbridge_prod
Specify API endpoint and authentication
---------------------------------------
Configure the API endpoint and authentication. The following example
uses an authentication token. You must create your own as described in
:ref:`cg_create_an_authentication_token`.
.. code-block:: ini
blockbridge_api_host = [ip or dns of management cluster]
blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw
Specify resource query
----------------------
By default, a single pool is configured (implied) with a default
resource query of ``'+openstack'``. Within Blockbridge, datastore
resources that advertise the 'openstack' attribute will be selected to
fulfill OpenStack provisioning requests. If you prefer a more specific
query, define a custom pool configuration.
.. code-block:: ini
blockbridge_pools = Production: +production +qos iops.reserve=5000
Pools support storage systems that offer multiple classes of service.
You may wish to configure multiple pools to implement more sophisticated
scheduling capabilities.
Configuration options
~~~~~~~~~~~~~~~~~~~~~
.. include:: ../../tables/cinder-blockbridge.inc
.. _cg_configuration_example:
Configuration example
~~~~~~~~~~~~~~~~~~~~~
``cinder.conf`` example file
.. code-block:: ini
[Default]
enabled_backends = bb_devel bb_prod
[bb_prod]
volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver
volume_backend_name = blockbridge_prod
blockbridge_api_host = [ip or dns of management cluster]
blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw
blockbridge_pools = Production: +production +qos iops.reserve=5000
[bb_devel]
volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver
volume_backend_name = blockbridge_devel
blockbridge_api_host = [ip or dns of management cluster]
blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw
blockbridge_pools = Development: +development
Multiple volume types
~~~~~~~~~~~~~~~~~~~~~
Volume *types* are exposed to tenants, *pools* are not. To offer
multiple classes of storage to OpenStack tenants, you should define
multiple volume types. Simply repeat the process above for each desired
type. Be sure to specify a unique ``volume_backend_name`` and pool
configuration for each type. The
:ref:`cinder.conf <cg_configuration_example>` example included with
this documentation illustrates configuration of multiple types.
Testing resources
~~~~~~~~~~~~~~~~~
Blockbridge is freely available for testing purposes and deploys in
seconds as a Docker container. This is the same container used to run
continuous integration for OpenStack. For more information visit
`www.blockbridge.io <http://www.blockbridge.io>`__.

View File

@ -13,7 +13,6 @@ Volume drivers
drivers/nfs-volume-driver.rst
drivers/sheepdog-driver.rst
drivers/smbfs-volume-driver.rst
drivers/blockbridge-eps-driver.rst
drivers/cloudbyte-driver.rst
drivers/coprhd-driver.rst
drivers/datera-volume-driver.rst

View File

@ -1,36 +0,0 @@
..
Warning: Do not edit this file. It is automatically generated from the
software project's code and your changes will be overwritten.
The tool to generate this file lives in openstack-doc-tools repository.
Please make any changes needed in the code, then run the
autogenerate-config-doc tool from the openstack-doc-tools repository, or
ask for help on the documentation mailing list, IRC channel or meeting.
.. _cinder-blockbridge:
.. list-table:: Description of BlockBridge EPS volume driver configuration options
:header-rows: 1
:class: config-ref-table
* - Configuration option = Default value
- Description
* - **[DEFAULT]**
-
* - ``blockbridge_api_host`` = ``None``
- (String) IP address/hostname of Blockbridge API.
* - ``blockbridge_api_port`` = ``None``
- (Integer) Override HTTPS port to connect to Blockbridge API server.
* - ``blockbridge_auth_password`` = ``None``
- (String) Blockbridge API password (for auth scheme 'password')
* - ``blockbridge_auth_scheme`` = ``token``
- (String) Blockbridge API authentication scheme (token or password)
* - ``blockbridge_auth_token`` = ``None``
- (String) Blockbridge API token (for auth scheme 'token')
* - ``blockbridge_auth_user`` = ``None``
- (String) Blockbridge API user (for auth scheme 'password')
* - ``blockbridge_default_pool`` = ``None``
- (String) Default pool name if unspecified.
* - ``blockbridge_pools`` = ``{'OpenStack': '+openstack'}``
- (Dict) Defines the set of exposed pools and their associated backend query strings

View File

@ -5,5 +5,6 @@ upgrade:
now been removed:
* Block device driver
* Blockbridge
* Coho