Reintroduce the StorPool driver

Change-Id: I5188e66d9a9204fc9fa15785d0ef3cf10e57ffb5
Implements: blueprint storpool-block-driver-reintroduce
DocImpact
This commit is contained in:
Peter Penchev 2015-08-11 01:34:58 +03:00
parent 016431808c
commit b5832afb3a
7 changed files with 1097 additions and 0 deletions

View File

@ -1211,6 +1211,12 @@ class XtremIOSnapshotsLimitExceeded(VolumeDriverException):
message = _("Exceeded the limit of snapshots per volume")
# StorPool driver
class StorPoolConfigurationInvalid(CinderException):
message = _("Invalid parameter %(param)s in the %(section)s section "
"of the /etc/storpool.conf file: %(error)s")
# DOTHILL drivers
class DotHillInvalidBackend(VolumeDriverException):
message = _("Backend doesn't exist (%(backend)s)")

View File

@ -155,6 +155,7 @@ from cinder.volume.drivers.san.hp import hpmsa_common as \
from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san
from cinder.volume.drivers import sheepdog as cinder_volume_drivers_sheepdog
from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire
from cinder.volume.drivers import storpool as cinder_volume_drivers_storpool
from cinder.volume.drivers.synology import synology_common as \
cinder_volume_drivers_synology_synologycommon
from cinder.volume.drivers import tintri as cinder_volume_drivers_tintri
@ -257,6 +258,7 @@ def list_opts():
instorage_mcs_opts,
cinder_volume_drivers_inspur_instorage_instorageiscsi.
instorage_mcs_iscsi_opts,
cinder_volume_drivers_storpool.storpool_opts,
cinder_volume_manager.volume_manager_opts,
cinder_wsgi_eventletserver.socket_opts,
)),

View File

@ -0,0 +1,510 @@
# Copyright 2014 - 2017 StorPool
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import ddt
import mock
from oslo_utils import units
fakeStorPool = mock.Mock()
fakeStorPool.spopenstack = mock.Mock()
fakeStorPool.spapi = mock.Mock()
fakeStorPool.spconfig = mock.Mock()
fakeStorPool.sptypes = mock.Mock()
sys.modules['storpool'] = fakeStorPool
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import storpool as driver
volume_types = {
1: {},
2: {'storpool_template': 'ssd'},
3: {'storpool_template': 'hdd'}
}
volumes = {}
snapshots = {}
def MockExtraSpecs(vtype):
return volume_types[vtype]
def mock_volume_types(f):
def _types_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs',
new=MockExtraSpecs)
def _types_inner_inner2():
return f(inst, *args, **kwargs)
return _types_inner_inner2()
return _types_inner_inner1
def volumeName(vid):
return 'os--volume--{id}'.format(id=vid)
def snapshotName(vtype, vid):
return 'os--snap--{t}--{id}'.format(t=vtype, id=vid)
class MockDisk(object):
def __init__(self, diskId):
self.id = diskId
self.generationLeft = -1
self.agCount = 14
self.agFree = 12
self.agAllocated = 1
class MockVolume(object):
def __init__(self, v):
self.name = v['name']
class MockTemplate(object):
def __init__(self, name):
self.name = name
class MockApiError(Exception):
def __init__(self, msg):
super(MockApiError, self).__init__(msg)
class MockAPI(object):
def __init__(self):
self._disks = {diskId: MockDisk(diskId) for diskId in (1, 2, 3, 4)}
self._disks[3].generationLeft = 42
self._templates = [MockTemplate(name) for name in ('ssd', 'hdd')]
def setlog(self, log):
self._log = log
def disksList(self):
return self._disks
def snapshotCreate(self, vname, snap):
snapshots[snap['name']] = dict(volumes[vname])
def snapshotDelete(self, name):
del snapshots[name]
def volumeCreate(self, v):
if v['name'] in volumes:
raise MockApiError('volume already exists')
volumes[v['name']] = v
def volumeDelete(self, name):
del volumes[name]
def volumesList(self):
return [MockVolume(v[1]) for v in volumes.items()]
def volumeTemplatesList(self):
return self._templates
def volumesReassign(self, json):
pass
def volumeUpdate(self, name, data):
if 'size' in data:
volumes[name]['size'] = data['size']
if 'rename' in data and data['rename'] != name:
volumes[data['rename']] = volumes[name]
del volumes[name]
class MockAttachDB(object):
def __init__(self, log):
self._api = MockAPI()
def api(self):
return self._api
def volumeName(self, vid):
return volumeName(vid)
def snapshotName(self, vtype, vid):
return snapshotName(vtype, vid)
def MockVolumeUpdateDesc(size):
return {'size': size}
def MockSPConfig(section = 's01'):
res = {}
m = re.match('^s0*([A-Za-z0-9]+)$', section)
if m:
res['SP_OURID'] = m.group(1)
return res
fakeStorPool.spapi.ApiError = MockApiError
fakeStorPool.spconfig.SPConfig = MockSPConfig
fakeStorPool.spopenstack.AttachDB = MockAttachDB
fakeStorPool.sptypes.VolumeUpdateDesc = MockVolumeUpdateDesc
@ddt.ddt
class StorPoolTestCase(test.TestCase):
def setUp(self):
super(StorPoolTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_backend_name = 'storpool_test'
self.cfg.storpool_template = None
self.cfg.storpool_replication = 3
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.StorPoolDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.check_for_setup_error()
@ddt.data(
(5, TypeError),
({'no-host': None}, KeyError),
({'host': 'sbad'}, exception.StorPoolConfigurationInvalid),
({'host': 's01'}, None),
({'host': 'none'}, None),
)
@ddt.unpack
def test_validate_connector(self, conn, exc):
if exc is None:
self.assertTrue(self.driver.validate_connector(conn))
else:
self.assertRaises(exc,
self.driver.validate_connector,
conn)
@ddt.data(
(5, TypeError),
({'no-host': None}, KeyError),
({'host': 'sbad'}, exception.StorPoolConfigurationInvalid),
)
@ddt.unpack
def test_initialize_connection_bad(self, conn, exc):
self.assertRaises(exc,
self.driver.initialize_connection,
None, conn)
@ddt.data(
(1, '42', 's01'),
(2, '616', 's02'),
(65, '1610', 'none'),
)
@ddt.unpack
def test_initialize_connection_good(self, cid, hid, name):
c = self.driver.initialize_connection({'id': hid}, {'host': name})
self.assertEqual('storpool', c['driver_volume_type'])
self.assertDictEqual({'client_id': cid, 'volume': hid}, c['data'])
def test_noop_functions(self):
self.driver.terminate_connection(None, None)
self.driver.create_export(None, None, {})
self.driver.remove_export(None, None)
def test_stats(self):
stats = self.driver.get_volume_stats(refresh=True)
self.assertEqual('StorPool', stats['vendor_name'])
self.assertEqual('storpool', stats['storage_protocol'])
self.assertListEqual(['default', 'template_hdd', 'template_ssd'],
sorted([p['pool_name'] for p in stats['pools']]))
r = re.compile('^template_([A-Za-z0-9_]+)$')
for pool in stats['pools']:
self.assertEqual(21, pool['total_capacity_gb'])
self.assertEqual(5, int(pool['free_capacity_gb']))
if pool['pool_name'] != 'default':
m = r.match(pool['pool_name'])
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(1))
self.assertEqual(m.group(1), pool['storpool_template'])
def assertVolumeNames(self, names):
self.assertListEqual(sorted([volumeName(n) for n in names]),
sorted(volumes.keys()))
@mock_volume_types
def test_create_delete_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertListEqual([volumeName('1')], list(volumes.keys()))
self.assertVolumeNames(('1',))
v = volumes[volumeName('1')]
self.assertEqual(1 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
caught = False
try:
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 0,
'volume_type': None})
except exception.VolumeBackendAPIException:
caught = True
self.assertTrue(caught)
self.driver.delete_volume({'id': '1'})
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 2,
'volume_type': None})
self.assertVolumeNames(('1',))
v = volumes[volumeName('1')]
self.assertEqual(2 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3,
'volume_type': {'id': 1}})
self.assertVolumeNames(('1', '2'))
v = volumes[volumeName('2')]
self.assertEqual(3 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
self.driver.create_volume({'id': '3', 'name': 'v2', 'size': 4,
'volume_type': {'id': 2}})
self.assertVolumeNames(('1', '2', '3'))
v = volumes[volumeName('3')]
self.assertEqual(4 * units.Gi, v['size'])
self.assertEqual('ssd', v['template'])
self.assertNotIn('replication', v.keys())
self.driver.create_volume({'id': '4', 'name': 'v2', 'size': 5,
'volume_type': {'id': 3}})
self.assertVolumeNames(('1', '2', '3', '4'))
v = volumes[volumeName('4')]
self.assertEqual(5 * units.Gi, v['size'])
self.assertEqual('hdd', v['template'])
self.assertNotIn('replication', v.keys())
# Make sure the dictionary is not corrupted somehow...
v = volumes[volumeName('1')]
self.assertEqual(2 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
for vid in ('1', '2', '3', '4'):
self.driver.delete_volume({'id': vid})
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@mock_volume_types
def test_update_migrated_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
# Create two volumes
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 1,
'volume_type': None})
self.assertListEqual([volumeName('1'), volumeName('2')],
list(volumes.keys()))
self.assertVolumeNames(('1', '2',))
# Failure: the "migrated" volume does not even exist
res = self.driver.update_migrated_volume(None, {'id': '1'},
{'id': '3', '_name_id': '1'},
'available')
self.assertDictEqual({'_name_id': '1'}, res)
# Failure: a volume with the original volume's name already exists
res = self.driver.update_migrated_volume(None, {'id': '1'},
{'id': '2', '_name_id': '1'},
'available')
self.assertDictEqual({'_name_id': '1'}, res)
# Success: rename the migrated volume to match the original
res = self.driver.update_migrated_volume(None, {'id': '3'},
{'id': '2', '_name_id': '3'},
'available')
self.assertDictEqual({'_name_id': None}, res)
self.assertListEqual([volumeName('1'), volumeName('3')],
list(volumes.keys()))
self.assertVolumeNames(('1', '3',))
for vid in ('1', '3'):
self.driver.delete_volume({'id': vid})
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
def test_clone_extend_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('1',))
self.driver.extend_volume({'id': '1'}, 2)
self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size'])
self.driver.create_cloned_volume({'id': '2', 'name': 'clo', 'size': 3},
{'id': 1})
self.assertVolumeNames(('1', '2'))
self.assertDictEqual({}, snapshots)
# Note: this would not be true in a real environment (the snapshot will
# have been deleted, the volume would have no parent), but with this
# fake implementation it helps us make sure that the second volume was
# created with the proper options.
self.assertEqual(volumes[volumeName('2')]['parent'],
snapshotName('clone', '2'))
self.driver.delete_volume({'id': 1})
self.driver.delete_volume({'id': 2})
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@mock_volume_types
def test_config_replication(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
save_repl = self.driver.configuration.storpool_replication
self.driver.configuration.storpool_replication = 3
stats = self.driver.get_volume_stats(refresh=True)
pool = stats['pools'][0]
self.assertEqual(21, pool['total_capacity_gb'])
self.assertEqual(5, int(pool['free_capacity_gb']))
self.driver.create_volume({'id': 'cfgrepl1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgrepl1',))
v = volumes[volumeName('cfgrepl1')]
self.assertEqual(3, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgrepl1'})
self.driver.configuration.storpool_replication = 2
stats = self.driver.get_volume_stats(refresh=True)
pool = stats['pools'][0]
self.assertEqual(21, pool['total_capacity_gb'])
self.assertEqual(8, int(pool['free_capacity_gb']))
self.driver.create_volume({'id': 'cfgrepl2', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgrepl2',))
v = volumes[volumeName('cfgrepl2')]
self.assertEqual(2, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgrepl2'})
self.driver.create_volume({'id': 'cfgrepl3', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgrepl3',))
v = volumes[volumeName('cfgrepl3')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgrepl3'})
self.driver.configuration.storpool_replication = save_repl
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@mock_volume_types
def test_config_template(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
save_template = self.driver.configuration.storpool_template
self.driver.configuration.storpool_template = None
self.driver.create_volume({'id': 'cfgtempl1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgtempl1',))
v = volumes[volumeName('cfgtempl1')]
self.assertEqual(3, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgtempl1'})
self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgtempl2',))
v = volumes[volumeName('cfgtempl2')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl2'})
self.driver.configuration.storpool_template = 'hdd'
self.driver.create_volume({'id': 'cfgtempl3', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgtempl3',))
v = volumes[volumeName('cfgtempl3')]
self.assertNotIn('replication', v)
self.assertEqual('hdd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl3'})
self.driver.create_volume({'id': 'cfgtempl4', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgtempl4',))
v = volumes[volumeName('cfgtempl4')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl4'})
self.driver.configuration.storpool_template = save_template
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@ddt.data(
# No volume type at all: 'default'
('default', None),
# No storpool_template in the type extra specs: 'default'
('default', {'id': 1}),
# An actual template specified: 'template_*'
('template_ssd', {'id': 2}),
('template_hdd', {'id': 3}),
)
@ddt.unpack
@mock_volume_types
def test_get_pool(self, pool, volume_type):
self.assertEqual(pool,
self.driver.get_pool({
'volume_type': volume_type
}))

View File

@ -0,0 +1,504 @@
# Copyright (c) 2014 - 2017 StorPool
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""StorPool block device driver"""
from __future__ import absolute_import
import platform
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
storpool = importutils.try_import('storpool')
if storpool:
from storpool import spapi
from storpool import spconfig
from storpool import spopenstack
from storpool import sptypes
storpool_opts = [
cfg.StrOpt('storpool_template',
default=None,
help='The StorPool template for volumes with no type.'),
cfg.IntOpt('storpool_replication',
default=3,
help='The default StorPool chain replication value. '
'Used when creating a volume with no specified type if '
'storpool_template is not set. Also used for calculating '
'the apparent free space reported in the stats.'),
]
CONF = cfg.CONF
CONF.register_opts(storpool_opts)
@interface.volumedriver
class StorPoolDriver(driver.VolumeDriver):
"""The StorPool block device driver.
Version history:
.. code-block:: none
0.1.0 - Initial driver
0.2.0 - Bring the driver up to date with Kilo and Liberty:
- implement volume retyping and migrations
- use the driver.*VD ABC metaclasses
- bugfix: fall back to the configured StorPool template
1.0.0 - Imported into OpenStack Liberty with minor fixes
1.1.0 - Bring the driver up to date with Liberty and Mitaka:
- drop the CloneableVD and RetypeVD base classes
- enable faster volume copying by specifying
sparse_volume_copy=true in the stats report
1.1.1 - Fix the internal _storpool_client_id() method to
not break on an unknown host name or UUID; thus,
remove the StorPoolConfigurationMissing exception.
1.1.2 - Bring the driver up to date with Pike: do not
translate the error messages
1.2.0 - Inherit from VolumeDriver, implement get_pool()
1.2.1 - Implement interface.volumedriver, add CI_WIKI_NAME,
fix the docstring formatting
1.2.2 - Reintroduce the driver into OpenStack Queens,
add ignore_errors to the internal _detach_volume() method
"""
VERSION = '1.2.1'
CI_WIKI_NAME = 'StorPool_CI'
def __init__(self, *args, **kwargs):
super(StorPoolDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storpool_opts)
self._sp_config = None
self._ourId = None
self._ourIdInt = None
self._attach = None
def _backendException(self, e):
return exception.VolumeBackendAPIException(data=six.text_type(e))
def _template_from_volume(self, volume):
default = self.configuration.storpool_template
vtype = volume['volume_type']
if vtype is not None:
specs = volume_types.get_volume_type_extra_specs(vtype['id'])
if specs is not None:
return specs.get('storpool_template', default)
return default
def get_pool(self, volume):
template = self._template_from_volume(volume)
if template is None:
return 'default'
else:
return 'template_' + template
def create_volume(self, volume):
size = int(volume['size']) * units.Gi
name = self._attach.volumeName(volume['id'])
template = self._template_from_volume(volume)
try:
if template is None:
self._attach.api().volumeCreate({
'name': name,
'size': size,
'replication': self.configuration.storpool_replication
})
else:
self._attach.api().volumeCreate({
'name': name,
'size': size,
'template': template
})
except spapi.ApiError as e:
raise self._backendException(e)
def _storpool_client_id(self, connector):
hostname = connector['host']
if hostname == self.host or hostname == CONF.host:
hostname = platform.node()
try:
cfg = spconfig.SPConfig(section=hostname)
return int(cfg['SP_OURID'])
except KeyError:
return 65
except Exception as e:
raise exception.StorPoolConfigurationInvalid(
section=hostname, param='SP_OURID', error=e)
def validate_connector(self, connector):
return self._storpool_client_id(connector) >= 0
def initialize_connection(self, volume, connector):
return {'driver_volume_type': 'storpool',
'data': {
'client_id': self._storpool_client_id(connector),
'volume': volume['id'],
}}
def terminate_connection(self, volume, connector, **kwargs):
pass
def create_snapshot(self, snapshot):
volname = self._attach.volumeName(snapshot['volume_id'])
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
def create_volume_from_snapshot(self, volume, snapshot):
size = int(volume['size']) * units.Gi
volname = self._attach.volumeName(volume['id'])
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().volumeCreate({
'name': volname,
'size': size,
'parent': name
})
except spapi.ApiError as e:
raise self._backendException(e)
def create_cloned_volume(self, volume, src_vref):
refname = self._attach.volumeName(src_vref['id'])
snapname = self._attach.snapshotName('clone', volume['id'])
try:
self._attach.api().snapshotCreate(refname, {'name': snapname})
except spapi.ApiError as e:
raise self._backendException(e)
size = int(volume['size']) * units.Gi
volname = self._attach.volumeName(volume['id'])
try:
self._attach.api().volumeCreate({
'name': volname,
'size': size,
'parent': snapname
})
except spapi.ApiError as e:
raise self._backendException(e)
finally:
try:
self._attach.api().snapshotDelete(snapname)
except spapi.ApiError as e:
# ARGH!
LOG.error("Could not delete the temp snapshot %(name)s: "
"%(msg)s",
{'name': snapname, 'msg': e})
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def delete_volume(self, volume):
name = self._attach.volumeName(volume['id'])
try:
self._attach.api().volumesReassign(
json=[{"volume": name, "detach": "all"}])
self._attach.api().volumeDelete(name)
except spapi.ApiError as e:
if e.name == 'objectDoesNotExist':
pass
else:
raise self._backendException(e)
def delete_snapshot(self, snapshot):
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().volumesReassign(
json=[{"snapshot": name, "detach": "all"}])
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
if e.name == 'objectDoesNotExist':
pass
else:
raise self._backendException(e)
def check_for_setup_error(self):
if storpool is None:
msg = _('storpool libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
self._attach = spopenstack.AttachDB(log=LOG)
try:
self._attach.api()
except Exception as e:
LOG.error("StorPoolDriver API initialization failed: %s", e)
raise
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
try:
dl = self._attach.api().disksList()
templates = self._attach.api().volumeTemplatesList()
except spapi.ApiError as e:
raise self._backendException(e)
total = 0
used = 0
free = 0
agSize = 512 * units.Mi
for (id, desc) in dl.items():
if desc.generationLeft != -1:
continue
total += desc.agCount * agSize
used += desc.agAllocated * agSize
free += desc.agFree * agSize * 4096 / (4096 + 128)
# Report the free space as if all new volumes will be created
# with StorPool replication 3; anything else is rare.
free /= self.configuration.storpool_replication
space = {
'total_capacity_gb': total / units.Gi,
'free_capacity_gb': free / units.Gi,
'reserved_percentage': 0,
'QoS_support': False,
}
pools = [dict(space, pool_name='default')]
pools += [dict(space,
pool_name='template_' + t.name,
storpool_template=t.name
) for t in templates]
self._stats = {
'volume_backend_name': self.configuration.safe_get(
'volume_backend_name') or 'storpool',
'vendor_name': 'StorPool',
'driver_version': self.VERSION,
'storage_protocol': 'storpool',
'sparse_copy_volume': True,
'pools': pools
}
def _attach_volume(self, context, volume, properties, remote=False):
if remote:
return super(StorPoolDriver, self)._attach_volume(
context, volume, properties, remote=remote)
req_id = context.request_id
req = self._attach.get().get(req_id, None)
if req is None:
req = {
'volume': self._attach.volumeName(volume['id']),
'type': 'cinder-attach',
'id': context.request_id,
'rights': 2,
'volsnap': False,
'remove_on_detach': True
}
self._attach.add(req_id, req)
name = req['volume']
self._attach.sync(req_id, None)
return {'device': {'path': '/dev/storpool/' + name,
'storpool_attach_req': req_id}}, volume
def _detach_volume(self, context, attach_info, volume, properties,
force=False, remote=False, ignore_errors=False):
if remote:
return super(StorPoolDriver, self)._detach_volume(
context, attach_info, volume, properties,
force=force, remote=remote, ignore_errors=ignore_errors)
try:
req_id = attach_info.get('device', {}).get(
'storpool_attach_req', context.request_id)
req = self._attach.get()[req_id]
name = req['volume']
self._attach.sync(req_id, name)
if req.get('remove_on_detach', False):
self._attach.remove(req_id)
except BaseException:
if not ignore_errors:
raise
def backup_volume(self, context, backup, backup_service):
volume = self.db.volume_get(context, backup['volume_id'])
req_id = context.request_id
volname = self._attach.volumeName(volume['id'])
name = self._attach.volsnapName(volume['id'], req_id)
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
self._attach.add(req_id, {
'volume': name,
'type': 'backup',
'id': req_id,
'rights': 1,
'volsnap': True
})
try:
return super(StorPoolDriver, self).backup_volume(
context, backup, backup_service)
finally:
self._attach.remove(req_id)
try:
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
LOG.error(
'Could not remove the temp snapshot %(name)s for '
'%(vol)s: %(err)s',
{'name': name, 'vol': volname, 'err': e})
def copy_volume_to_image(self, context, volume, image_service, image_meta):
req_id = context.request_id
volname = self._attach.volumeName(volume['id'])
name = self._attach.volsnapName(volume['id'], req_id)
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
self._attach.add(req_id, {
'volume': name,
'type': 'copy-from',
'id': req_id,
'rights': 1,
'volsnap': True
})
try:
return super(StorPoolDriver, self).copy_volume_to_image(
context, volume, image_service, image_meta)
finally:
self._attach.remove(req_id)
try:
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
LOG.error(
'Could not remove the temp snapshot %(name)s for '
'%(vol)s: %(err)s',
{'name': name, 'vol': volname, 'err': e})
def copy_image_to_volume(self, context, volume, image_service, image_id):
req_id = context.request_id
name = self._attach.volumeName(volume['id'])
self._attach.add(req_id, {
'volume': name,
'type': 'copy-to',
'id': req_id,
'rights': 2
})
try:
return super(StorPoolDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
finally:
self._attach.remove(req_id)
def extend_volume(self, volume, new_size):
size = int(new_size) * units.Gi
name = self._attach.volumeName(volume['id'])
try:
upd = sptypes.VolumeUpdateDesc(size=size)
self._attach.api().volumeUpdate(name, upd)
except spapi.ApiError as e:
raise self._backendException(e)
def ensure_export(self, context, volume):
# Already handled by Nova's AttachDB, we hope.
# Maybe it should move here, but oh well.
pass
def retype(self, context, volume, new_type, diff, host):
update = {}
if diff['encryption']:
LOG.error('Retype of encryption type not supported.')
return False
templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication
if diff['extra_specs']:
for (k, v) in diff['extra_specs'].items():
if k == 'volume_backend_name':
if v[0] != v[1]:
# Retype of a volume backend not supported yet,
# the volume needs to be migrated.
return False
elif k == 'storpool_template':
if v[0] != v[1]:
if v[1] is not None:
update['template'] = v[1]
elif templ is not None:
update['template'] = templ
else:
update['replication'] = repl
elif v[0] != v[1]:
LOG.error('Retype of extra_specs "%s" not '
'supported yet.', k)
return False
if update:
name = self._attach.volumeName(volume['id'])
try:
upd = sptypes.VolumeUpdateDesc(**update)
self._attach.api().volumeUpdate(name, upd)
except spapi.ApiError as e:
raise self._backendException(e)
return True
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
orig_id = volume['id']
orig_name = self._attach.volumeName(orig_id)
temp_id = new_volume['id']
temp_name = self._attach.volumeName(temp_id)
vols = {v.name: True for v in self._attach.api().volumesList()}
if temp_name not in vols:
LOG.error('StorPool update_migrated_volume(): it seems '
'that the StorPool volume "%(tid)s" was not '
'created as part of the migration from '
'"%(oid)s".', {'tid': temp_id, 'oid': orig_id})
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
elif orig_name in vols:
LOG.error('StorPool update_migrated_volume(): both '
'the original volume "%(oid)s" and the migrated '
'StorPool volume "%(tid)s" seem to exist on '
'the StorPool cluster.',
{'oid': orig_id, 'tid': temp_id})
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
else:
try:
self._attach.api().volumeUpdate(temp_name,
{'rename': orig_name})
return {'_name_id': None}
except spapi.ApiError as e:
LOG.error('StorPool update_migrated_volume(): '
'could not rename %(tname)s to %(oname)s: '
'%(err)s',
{'tname': temp_name, 'oname': orig_name, 'err': e})
return {'_name_id': new_volume['_name_id'] or new_volume['id']}

View File

@ -0,0 +1,71 @@
======================
StorPool volume driver
======================
StorPool is distributed data storage software running on standard x86
servers. StorPool aggregates the performance and capacity of all drives
into a shared pool of storage distributed among the servers. Within
this storage pool the user creates thin-provisioned volumes that are
exposed to the clients as block devices. StorPool consists of two parts
wrapped in one package - a server and a client. The StorPool server
allows a hypervisor to act as a storage node, while the StorPool client
allows a hypervisor node to access the storage pool and act as a compute
node. In OpenStack terms the StorPool solution allows each hypervisor
node to be both a storage and a compute node simultaneously.
Prerequisites
-------------
* The controller and all the compute nodes must have access to the StorPool
API service.
* All nodes where StorPool-backed volumes will be attached must have access to
the StorPool data network and run the ``storpool_block`` service.
* If StorPool-backed Cinder volumes need to be created directly from Glance
images, then the node running the ``cinder-volume`` service must also have
access to the StorPool data network and run the ``storpool_block`` service.
* All nodes that need to access the StorPool API (the compute nodes and
the node running the ``cinder-volume`` service) must have the following
packages installed:
* storpool-config (part of the StorPool installation)
* the storpool Python bindings package
* the storpool.spopenstack Python helper package
Configuring the StorPool volume driver
--------------------------------------
A valid ``/etc/storpool.conf`` file is required; please contact the StorPool
support team for assistance.
The StorPool Cinder volume driver has two configuration options that may
be specified both in the global configuration (e.g. in a ``cinder.conf``
volume backend definition) and per volume type:
- ``storpool_template``: specifies the StorPool template (replication,
placement, etc. specifications defined once and used for multiple
volumes and snapshots) to use for the Cinder volume type or, if
specified globally, as a default value for Cinder volumes. There is
no default value for this option, see ``storpool_replication``.
- ``storpool_replication``: if ``storpool_template`` is not set,
the volume will be created with the specified chain replication and
with the default placement constraints for the StorPool cluster.
The default value for the chain replication is 3.
Using the StorPool volume driver
--------------------------------
The most common use for the Cinder StorPool volume driver is probably
attaching volumes to Nova instances. For this to work, the ``nova-compute``
service and the ``os-brick`` library must recognize the "storpool" volume
attachment driver; please contact the StorPool support team for more
information.
Currently there is no StorPool driver for Nova ephemeral volumes; to run
Nova instances with a StorPool-backed volume as a root device, create
a Cinder volume with the root filesystem image, make a snapshot, and let
Nova create the instance with a root device as a new volume created from
that snapshot.

View File

@ -62,6 +62,7 @@ Driver Configuration Reference
drivers/pure-storage-driver
drivers/quobyte-driver
drivers/solidfire-volume-driver
drivers/storpool-volume-driver
drivers/synology-dsm-driver
drivers/tintri-volume-driver
drivers/vzstorage-driver

View File

@ -0,0 +1,3 @@
---
features:
- The StorPool backend driver was added.