Re-add the StorPool distributed storage driver

Re-add the driver as it was removed, then make the following
six changes (bugfixes and updates):

1. Move the testcase to cinder/tests/unit/

2. Pass the H402 docstring check

Add a period at the end of a single-line docstring.

3. Fall back to the configured template

If the volume type does not specify a template, properly fall back to
the default template from the configuration instead of going with none.

4. Return volume object in _attach_volume()

Catch up with rev. bbf1b49 of volume/driver.py.

5. Use the ABC metaclasses

Declare the existing StorPool volume driver functionality by
deriving from the appropriate driver.*VD classes.

6. Implement volume retyping and migrations

initialize_connection(): store the StorPool client ID in the connection
data to be used by the "StorPool" Brick connector.

validate_connector(): actually check the connector data for a valid
hostname defined in the StorPool configuration.

_storpool_client_id(): new method to get the numeric ID of a host in
the StorPool cluster from the /etc/storpool.conf file.

_attach_volume(), _detach_volume(): if the "remote" flag is set, pass
the request on to the default Cinder driver implementation; it knows
how to make remote volumes accessible.

_attach_volume(), _detach_volume(): support the case when these methods
are invoked from a method that is not overridden in the StorPool Cinder
driver - create a storpool.spopenstack.AttachDB attachment structure
and destroy it upon detaching.

ensure_export(): add a stub method, the actual work is handled by
the Nova volume attachment driver.

retype(): new method: handle a StorPool-to-StorPool retyping where
at most the volume's StorPool template or replication is changed.
For anything else, return False to indicate an unsupported request so
that a full volume migration is triggered.

update_migrated_volume(): rename the StorPool volume to correspond to
the real Cinder volume's name.

Add the StorPoolConfigurationMissing and StorPoolConfigurationInvalid
exceptions.

Flesh out the validate_connector() and initialize_connection() unit tests.

Implements: blueprint storpool-block-driver
Implements: blueprint abc-driver-update
Implements: blueprint volume-retype
Change-Id: I5c29cb5f679b08b8db664c9f20cf24cbcd7c6c60
This commit is contained in:
Peter Penchev 2015-05-28 16:32:15 +03:00
parent 918b3c137c
commit 7ac5d50a5c
3 changed files with 898 additions and 0 deletions

View File

@ -901,3 +901,14 @@ class WebDAVClientError(CinderException):
# XtremIO Drivers
class XtremIOAlreadyMappedError(CinderException):
message = _("Volume to Initiator Group mapping already exists")
# StorPool driver
class StorPoolConfigurationMissing(CinderException):
message = _("Missing parameter %(param)s in the %(section)s section "
"of the /etc/storpool.conf file")
class StorPoolConfigurationInvalid(CinderException):
message = _("Invalid parameter %(param)s in the %(section)s section "
"of the /etc/storpool.conf file: %(error)s")

View File

@ -0,0 +1,432 @@
# Copyright 2014, 2015 StorPool
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import mock
from oslo_utils import units
fakeStorPool = mock.Mock()
fakeStorPool.spopenstack = mock.Mock()
fakeStorPool.spapi = mock.Mock()
fakeStorPool.spconfig = mock.Mock()
fakeStorPool.sptypes = mock.Mock()
sys.modules['storpool'] = fakeStorPool
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import storpool as driver
volume_types = {
1: {},
2: {'storpool_template': 'ssd'},
3: {'storpool_template': 'hdd'}
}
volumes = {}
snapshots = {}
def MockExtraSpecs(vtype):
return volume_types[vtype]
def mock_volume_types(f):
def _types_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs',
new=MockExtraSpecs)
def _types_inner_inner2():
return f(inst, *args, **kwargs)
return _types_inner_inner2()
return _types_inner_inner1
def volumeName(vid):
return 'os--volume--{id}'.format(id=vid)
def snapshotName(vtype, vid):
return 'os--snap--{t}--{id}'.format(t=vtype, id=vid)
class MockDisk(object):
def __init__(self, diskId):
self.id = diskId
self.generationLeft = -1
self.agCount = 13
self.agFree = 12
self.agAllocated = 1
class MockTemplate(object):
def __init__(self, name):
self.name = name
class MockApiError(Exception):
def __init__(self, msg):
super(MockApiError, self).__init__(msg)
class MockAPI(object):
def __init__(self):
self._disks = {diskId: MockDisk(diskId) for diskId in (1, 2, 3, 4)}
self._disks[3].generationLeft = 42
self._templates = [MockTemplate(name) for name in ('ssd', 'hdd')]
def setlog(self, log):
self._log = log
def disksList(self):
return self._disks
def snapshotCreate(self, vname, snap):
snapshots[snap['name']] = dict(volumes[vname])
def snapshotDelete(self, name):
del snapshots[name]
def volumeCreate(self, v):
if v['name'] in volumes:
raise MockApiError('volume already exists')
volumes[v['name']] = v
def volumeDelete(self, name):
del volumes[name]
def volumeTemplatesList(self):
return self._templates
def volumesReassign(self, json):
pass
def volumeUpdate(self, name, size):
volumes[name]['size'] = size['size']
class MockAttachDB(object):
def __init__(self, log):
self._api = MockAPI()
def api(self):
return self._api
def volumeName(self, vid):
return volumeName(vid)
def snapshotName(self, vtype, vid):
return snapshotName(vtype, vid)
def MockVolumeUpdateDesc(size):
return {'size': size}
def MockSPConfig(section = 's01'):
res = {}
m = re.match('^s0*([A-Za-z0-9]+)$', section)
if m:
res['SP_OURID'] = m.group(1)
return res
fakeStorPool.spapi.ApiError = MockApiError
fakeStorPool.spconfig.SPConfig = MockSPConfig
fakeStorPool.spopenstack.AttachDB = MockAttachDB
fakeStorPool.sptypes.VolumeUpdateDesc = MockVolumeUpdateDesc
class StorPoolTestCase(test.TestCase):
def setUp(self):
super(StorPoolTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_backend_name = 'storpool_test'
self.cfg.storpool_template = None
self.cfg.storpool_replication = 3
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.StorPoolDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.check_for_setup_error()
def test_initialized(self):
self.assertRaises(TypeError,
self.driver.validate_connector,
5)
self.assertRaises(KeyError,
self.driver.validate_connector,
{'no-host': None})
self.assertRaises(exception.StorPoolConfigurationMissing,
self.driver.validate_connector,
{'host': 'none'})
self.assertRaises(exception.StorPoolConfigurationInvalid,
self.driver.validate_connector,
{'host': 'sbad'})
self.assertTrue(self.driver.validate_connector({'host': 's01'}))
self.assertRaises(TypeError,
self.driver.initialize_connection,
None, 5)
self.assertRaises(KeyError,
self.driver.initialize_connection,
None, {'no-host': None})
self.assertRaises(exception.StorPoolConfigurationMissing,
self.driver.initialize_connection,
None, {'host': 'none'})
self.assertRaises(exception.StorPoolConfigurationInvalid,
self.driver.initialize_connection,
None, {'host': 'sbad'})
c = self.driver.initialize_connection({'id': '42'}, {'host': 's01'})
self.assertEqual('storpool', c['driver_volume_type'])
self.assertDictEqual({'client_id': 1, 'volume': '42'}, c['data'])
c = self.driver.initialize_connection({'id': '616'}, {'host': 's02'})
self.assertEqual('storpool', c['driver_volume_type'])
self.assertDictEqual({'client_id': 2, 'volume': '616'}, c['data'])
self.driver.terminate_connection(None, None)
self.driver.create_export(None, None)
self.driver.remove_export(None, None)
def test_stats(self):
stats = self.driver.get_volume_stats(refresh=True)
self.assertEqual('StorPool', stats['vendor_name'])
self.assertEqual('storpool', stats['storage_protocol'])
self.assertListEqual(['default', 'template_hdd', 'template_ssd'],
sorted([p['pool_name'] for p in stats['pools']]))
r = re.compile('^template_([A-Za-z0-9_]+)$')
for pool in stats['pools']:
self.assertEqual(19, pool['total_capacity_gb'])
self.assertEqual(5, pool['free_capacity_gb'])
if pool['pool_name'] != 'default':
m = r.match(pool['pool_name'])
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(1))
self.assertEqual(m.group(1), pool['storpool_template'])
def assertVolumeNames(self, names):
self.assertListEqual(sorted([volumeName(n) for n in names]),
sorted(volumes.keys()))
@mock_volume_types
def test_create_delete_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertListEqual([volumeName('1')], volumes.keys())
self.assertVolumeNames(('1',))
v = volumes[volumeName('1')]
self.assertEqual(1 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
caught = False
try:
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 0,
'volume_type': None})
except exception.VolumeBackendAPIException:
caught = True
self.assertTrue(caught)
self.driver.delete_volume({'id': '1'})
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 2,
'volume_type': None})
self.assertVolumeNames(('1',))
v = volumes[volumeName('1')]
self.assertEqual(2 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3,
'volume_type': {'id': 1}})
self.assertVolumeNames(('1', '2'))
v = volumes[volumeName('2')]
self.assertEqual(3 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
self.driver.create_volume({'id': '3', 'name': 'v2', 'size': 4,
'volume_type': {'id': 2}})
self.assertVolumeNames(('1', '2', '3'))
v = volumes[volumeName('3')]
self.assertEqual(4 * units.Gi, v['size'])
self.assertEqual('ssd', v['template'])
self.assertNotIn('replication', v.keys())
self.driver.create_volume({'id': '4', 'name': 'v2', 'size': 5,
'volume_type': {'id': 3}})
self.assertVolumeNames(('1', '2', '3', '4'))
v = volumes[volumeName('4')]
self.assertEqual(5 * units.Gi, v['size'])
self.assertEqual('hdd', v['template'])
self.assertNotIn('replication', v.keys())
# Make sure the dictionary is not corrupted somehow...
v = volumes[volumeName('1')]
self.assertEqual(2 * units.Gi, v['size'])
self.assertNotIn('template', v.keys())
self.assertEqual(3, v['replication'])
for vid in ('1', '2', '3', '4'):
self.driver.delete_volume({'id': vid})
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
def test_clone_extend_volume(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('1',))
self.driver.extend_volume({'id': '1'}, 2)
self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size'])
self.driver.create_cloned_volume({'id': '2', 'name': 'clo', 'size': 3},
{'id': 1})
self.assertVolumeNames(('1', '2'))
self.assertDictEqual({}, snapshots)
# Note: this would not be true in a real environment (the snapshot will
# have been deleted, the volume would have no parent), but with this
# fake implementation it helps us make sure that the second volume was
# created with the proper options.
self.assertEqual(volumes[volumeName('2')]['parent'],
snapshotName('clone', '2'))
self.driver.delete_volume({'id': 1})
self.driver.delete_volume({'id': 2})
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@mock_volume_types
def test_config_replication(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
save_repl = self.driver.configuration.storpool_replication
self.driver.configuration.storpool_replication = 3
stats = self.driver.get_volume_stats(refresh=True)
pool = stats['pools'][0]
self.assertEqual(19, pool['total_capacity_gb'])
self.assertEqual(5, pool['free_capacity_gb'])
self.driver.create_volume({'id': 'cfgrepl1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgrepl1',))
v = volumes[volumeName('cfgrepl1')]
self.assertEqual(3, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgrepl1'})
self.driver.configuration.storpool_replication = 2
stats = self.driver.get_volume_stats(refresh=True)
pool = stats['pools'][0]
self.assertEqual(19, pool['total_capacity_gb'])
self.assertEqual(8, pool['free_capacity_gb'])
self.driver.create_volume({'id': 'cfgrepl2', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgrepl2',))
v = volumes[volumeName('cfgrepl2')]
self.assertEqual(2, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgrepl2'})
self.driver.create_volume({'id': 'cfgrepl3', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgrepl3',))
v = volumes[volumeName('cfgrepl3')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgrepl3'})
self.driver.configuration.storpool_replication = save_repl
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
@mock_volume_types
def test_config_template(self):
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)
save_template = self.driver.configuration.storpool_template
self.driver.configuration.storpool_template = None
self.driver.create_volume({'id': 'cfgtempl1', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgtempl1',))
v = volumes[volumeName('cfgtempl1')]
self.assertEqual(3, v['replication'])
self.assertNotIn('template', v)
self.driver.delete_volume({'id': 'cfgtempl1'})
self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgtempl2',))
v = volumes[volumeName('cfgtempl2')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl2'})
self.driver.configuration.storpool_template = 'hdd'
self.driver.create_volume({'id': 'cfgtempl3', 'name': 'v1', 'size': 1,
'volume_type': None})
self.assertVolumeNames(('cfgtempl3',))
v = volumes[volumeName('cfgtempl3')]
self.assertNotIn('replication', v)
self.assertEqual('hdd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl3'})
self.driver.create_volume({'id': 'cfgtempl4', 'name': 'v1', 'size': 1,
'volume_type': {'id': 2}})
self.assertVolumeNames(('cfgtempl4',))
v = volumes[volumeName('cfgtempl4')]
self.assertNotIn('replication', v)
self.assertEqual('ssd', v['template'])
self.driver.delete_volume({'id': 'cfgtempl4'})
self.driver.configuration.storpool_template = save_template
self.assertVolumeNames([])
self.assertDictEqual({}, volumes)
self.assertDictEqual({}, snapshots)

View File

@ -0,0 +1,455 @@
# Copyright (c) 2014, 2015 StorPool
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""StorPool block device driver"""
from __future__ import absolute_import
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume import driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
storpool = importutils.try_import('storpool')
if storpool:
from storpool import spapi
from storpool import spconfig
from storpool import spopenstack
from storpool import sptypes
storpool_opts = [
cfg.StrOpt('storpool_template',
default=None,
help='The StorPool template for volumes with no type.'),
cfg.IntOpt('storpool_replication',
default=3,
help='The default StorPool chain replication value. '
'Used when creating a volume with no specified type if '
'storpool_template is not set. Also used for calculating '
'the apparent free space reported in the stats.'),
]
CONF = cfg.CONF
CONF.register_opts(storpool_opts)
class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD,
driver.SnapshotVD, driver.RetypeVD, driver.BaseVD):
"""The StorPool block device driver.
Version history:
0.1.0 - Initial driver
0.2.0 - Bring the driver up to date with Kilo and Liberty:
- implement volume retyping and migrations
- use the driver.*VD ABC metaclasses
- bugfix: fall back to the configured StorPool template
1.0.0 - Imported into OpenStack Liberty with minor fixes
"""
VERSION = '1.0.0'
def __init__(self, *args, **kwargs):
super(StorPoolDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storpool_opts)
self._sp_config = None
self._ourId = None
self._ourIdInt = None
self._attach = None
def _backendException(self, e):
return exception.VolumeBackendAPIException(data=six.text_type(e))
def _template_from_volume_type(self, vtype):
specs = volume_types.get_volume_type_extra_specs(vtype['id'])
if specs is None:
return None
return specs.get('storpool_template', None)
def create_volume(self, volume):
size = int(volume['size']) * units.Gi
name = self._attach.volumeName(volume['id'])
template = None
if volume['volume_type'] is not None:
template = self._template_from_volume_type(volume['volume_type'])
if template is None:
template = self.configuration.storpool_template
try:
if template is None:
self._attach.api().volumeCreate({
'name': name,
'size': size,
'replication': self.configuration.storpool_replication
})
else:
self._attach.api().volumeCreate({
'name': name,
'size': size,
'template': template
})
except spapi.ApiError as e:
raise self._backendException(e)
def _storpool_client_id(self, connector):
hostname = connector['host']
try:
cfg = spconfig.SPConfig(section=hostname)
return int(cfg['SP_OURID'])
except KeyError:
raise exception.StorPoolConfigurationMissing(
section=hostname, param='SP_OURID')
except Exception as e:
raise exception.StorPoolConfigurationInvalid(
section=hostname, param='SP_OURID', error=e)
def validate_connector(self, connector):
return self._storpool_client_id(connector) >= 0
def initialize_connection(self, volume, connector):
return {'driver_volume_type': 'storpool',
'data': {
'client_id': self._storpool_client_id(connector),
'volume': volume['id'],
}}
def terminate_connection(self, volume, connector, **kwargs):
pass
def create_snapshot(self, snapshot):
volname = self._attach.volumeName(snapshot['volume_id'])
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
def create_volume_from_snapshot(self, volume, snapshot):
size = int(volume['size']) * units.Gi
volname = self._attach.volumeName(volume['id'])
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().volumeCreate({
'name': volname,
'size': size,
'parent': name
})
except spapi.ApiError as e:
raise self._backendException(e)
def create_cloned_volume(self, volume, src_vref):
refname = self._attach.volumeName(src_vref['id'])
snapname = self._attach.snapshotName('clone', volume['id'])
try:
self._attach.api().snapshotCreate(refname, {'name': snapname})
except spapi.ApiError as e:
raise self._backendException(e)
size = int(volume['size']) * units.Gi
volname = self._attach.volumeName(volume['id'])
try:
self._attach.api().volumeCreate({
'name': volname,
'size': size,
'parent': snapname
})
except spapi.ApiError as e:
raise self._backendException(e)
finally:
try:
self._attach.api().snapshotDelete(snapname)
except spapi.ApiError as e:
# ARGH!
LOG.error(_LE("Could not delete the temp snapshot {n}: {msg}").
format(n=snapname, msg=six.text_type(e)))
def create_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def delete_volume(self, volume):
name = self._attach.volumeName(volume['id'])
try:
self._attach.api().volumesReassign(
json=[{"volume": name, "detach": "all"}])
self._attach.api().volumeDelete(name)
except spapi.ApiError as e:
if e.name == 'objectDoesNotExist':
pass
else:
raise self._backendException(e)
def delete_snapshot(self, snapshot):
name = self._attach.snapshotName('snap', snapshot['id'])
try:
self._attach.api().volumesReassign(
json=[{"snapshot": name, "detach": "all"}])
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
if e.name == 'objectDoesNotExist':
pass
else:
raise self._backendException(e)
def check_for_setup_error(self):
if storpool is None:
msg = _('storpool libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
self._attach = spopenstack.AttachDB(log=LOG)
try:
self._attach.api()
except Exception as e:
LOG.error(_LE("StorPoolDriver API initialization failed: {e}").
format(e=e))
raise
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
try:
dl = self._attach.api().disksList()
templates = self._attach.api().volumeTemplatesList()
except spapi.ApiError as e:
raise self._backendException(e)
total = 0
used = 0
free = 0
agSize = 512 * units.Mi
for (id, desc) in dl.iteritems():
if desc.generationLeft != -1:
continue
total += desc.agCount * agSize
used += desc.agAllocated * agSize
free += desc.agFree * agSize * 4096 / (4096 + 128)
# Report the free space as if all new volumes will be created
# with StorPool replication 3; anything else is rare.
free /= self.configuration.storpool_replication
space = {
'total_capacity_gb': total / units.Gi,
'free_capacity_gb': free / units.Gi,
'reserved_percentage': 0,
'QoS_support': False,
}
pools = [dict(space, pool_name='default')]
pools += [dict(space,
pool_name='template_' + t.name,
storpool_template=t.name
) for t in templates]
self._stats = {
'volume_backend_name': self.configuration.safe_get(
'volume_backend_name') or 'storpool',
'vendor_name': 'StorPool',
'driver_version': self.VERSION,
'storage_protocol': 'storpool',
'pools': pools
}
def _attach_volume(self, context, volume, properties, remote=False):
if remote:
return super(StorPoolDriver, self)._attach_volume(
context, volume, properties, remote=remote)
req_id = context.request_id
req = self._attach.get().get(req_id, None)
if req is None:
req = {
'volume': self._attach.volumeName(volume['id']),
'type': 'cinder-attach',
'id': context.request_id,
'rights': 2,
'volsnap': False,
'remove_on_detach': True
}
self._attach.add(req_id, req)
name = req['volume']
self._attach.sync(req_id, None)
return {'device': {'path': '/dev/storpool/{v}'.format(v=name),
'storpool_attach_req': req_id}}, volume
def _detach_volume(self, context, attach_info, volume, properties,
force=False, remote=False):
if remote:
return super(StorPoolDriver, self)._detach_volume(
context, attach_info, volume, properties,
force=force, remote=remote)
req_id = attach_info.get('device', {}).get(
'storpool_attach_req', context.request_id)
req = self._attach.get()[req_id]
name = req['volume']
self._attach.sync(req_id, name)
if req.get('remove_on_detach', False):
self._attach.remove(req_id)
def backup_volume(self, context, backup, backup_service):
volume = self.db.volume_get(context, backup['volume_id'])
req_id = context.request_id
volname = self._attach.volumeName(volume['id'])
name = self._attach.volsnapName(volume['id'], req_id)
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
self._attach.add(req_id, {
'volume': name,
'type': 'backup',
'id': req_id,
'rights': 1,
'volsnap': True
})
try:
return super(StorPoolDriver, self).backup_volume(
context, backup, backup_service)
finally:
self._attach.remove(req_id)
try:
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
LOG.error(
_LE('Could not remove the temp snapshot {n} for {v}: {e}').
format(n=name, v=volname, e=six.text_type(e)))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
req_id = context.request_id
volname = self._attach.volumeName(volume['id'])
name = self._attach.volsnapName(volume['id'], req_id)
try:
self._attach.api().snapshotCreate(volname, {'name': name})
except spapi.ApiError as e:
raise self._backendException(e)
self._attach.add(req_id, {
'volume': name,
'type': 'copy-from',
'id': req_id,
'rights': 1,
'volsnap': True
})
try:
return super(StorPoolDriver, self).copy_volume_to_image(
context, volume, image_service, image_meta)
finally:
self._attach.remove(req_id)
try:
self._attach.api().snapshotDelete(name)
except spapi.ApiError as e:
LOG.error(
_LE('Could not remove the temp snapshot {n} for {v}: {e}').
format(n=name, v=volname, e=six.text_type(e)))
def copy_image_to_volume(self, context, volume, image_service, image_id):
req_id = context.request_id
name = self._attach.volumeName(volume['id'])
self._attach.add(req_id, {
'volume': name,
'type': 'copy-to',
'id': req_id,
'rights': 2
})
try:
return super(StorPoolDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
finally:
self._attach.remove(req_id)
def extend_volume(self, volume, new_size):
size = int(new_size) * units.Gi
name = self._attach.volumeName(volume['id'])
try:
upd = sptypes.VolumeUpdateDesc(size=size)
self._attach.api().volumeUpdate(name, upd)
except spapi.ApiError as e:
raise self._backendException(e)
def ensure_export(self, context, volume):
# Already handled by Nova's AttachDB, we hope.
# Maybe it should move here, but oh well.
pass
def retype(self, context, volume, new_type, diff, host):
update = {}
if diff['encryption']:
LOG.error(_LE('Retype of encryption type not supported.'))
return False
templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication
if diff['extra_specs']:
for (k, v) in diff['extra_specs'].iteritems():
if k == 'volume_backend_name':
if v[0] != v[1]:
# Retype of a volume backend not supported yet,
# the volume needs to be migrated.
return False
elif k == 'storpool_template':
if v[0] != v[1]:
if v[1] is not None:
update['template'] = v[1]
elif templ is not None:
update['template'] = templ
else:
update['replication'] = repl
elif v[0] != v[1]:
LOG.error(_LE('Retype of extra_specs "%s" not '
'supported yet.'), k)
return False
if update:
name = self._attach.volumeName(volume['id'])
try:
upd = sptypes.VolumeUpdateDesc(**update)
self._attach.api().volumeUpdate(name, upd)
except spapi.ApiError as e:
raise self._backendException(e)
return True
def update_migrated_volume(self, context, volume, new_volume):
orig_id = volume['id']
orig_name = self._attach.volumeName(orig_id)
temp_id = new_volume['id']
temp_name = self._attach.volumeName(temp_id)
vols = {v.name: True for v in self._attach.api().volumesList()}
if temp_name not in vols:
LOG.error(_LE('StorPool update_migrated_volume(): it seems '
'that the StorPool volume "%(tid)" was not '
'created as part of the migration from '
'"%(oid)"'), {'tid': temp_id, 'oid': orig_id})
elif orig_name in vols:
LOG.error(_LE('StorPool update_migrated_volume(): both '
'the original volume "%(oid)" and the migrated '
'StorPool volume "%(tid)" seem to exist on '
'the StorPool cluster'),
{'oid': orig_id, 'tid': temp_id})
else:
self._attach.api().volumeUpdate(temp_name, {'rename': orig_name})