Merge "ProphetStor with pool aware cinder scheduler"

This commit is contained in:
Jenkins 2014-11-24 04:58:54 +00:00 committed by Gerrit Code Review
commit 0c8abb4556
4 changed files with 236 additions and 180 deletions

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import httplib
import re
@ -35,12 +36,16 @@ DATA_SERVER_INFO = 0, {
'metadata': {'vendor': 'ProphetStor',
'version': '1.5'}}
DATA_POOLS = 0, {
'children': [POOLUUID]
}
DATA_POOLINFO = 0, {
'capabilitiesURI': '',
'children': [],
'childrenrange': '',
'completionStatus': 'Complete',
'metadata': {'available_capacity': 4194074624,
'metadata': {'available_capacity': 4294967296,
'ctime': 1390551362349,
'vendor': 'prophetstor',
'version': '1.5',
@ -56,7 +61,8 @@ DATA_POOLINFO = 0, {
'pool_uuid': POOLUUID,
'properties': {'raid_level': 'raid0'},
'state': 'Online',
'total_capacity': 4194828288,
'used_capacity': 0,
'total_capacity': 4294967296,
'zpool_guid': '8173612007304181810'},
'objectType': 'application/cdmi-container',
'percentComplete': 100}
@ -98,7 +104,8 @@ DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
DATA_IN_VOLUME = {'id': 'abc123',
'display_name': 'abc123',
'display_description': '',
'size': 1}
'size': 1,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME_VG = {'id': 'abc123',
'display_name': 'abc123',
@ -106,12 +113,14 @@ DATA_IN_VOLUME_VG = {'id': 'abc123',
'size': 1,
'consistencygroup_id':
'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'status': 'available'}
'status': 'available',
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME1 = {'id': 'abc456',
'display_name': 'abc456',
'display_description': '',
'size': 1}
'size': 1,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_CG_SNAPSHOT = {
'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
@ -447,12 +456,13 @@ class TestProphetStorDPLDriver(test.TestCase):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO
res = self.dpldriver.get_volume_stats(True)
self.assertEqual(res['vendor_name'], 'ProphetStor')
self.assertEqual(res['driver_version'], '1.5')
self.assertEqual(res['total_capacity_gb'], 3.91)
self.assertEqual(res['free_capacity_gb'], 3.91)
self.assertEqual(res['reserved_percentage'], 0)
self.assertEqual(res['QoS_support'], False)
self.assertEqual('ProphetStor', res['vendor_name'])
self.assertEqual('1.5', res['driver_version'])
pool = res["pools"][0]
self.assertEqual(4, pool['total_capacity_gb'])
self.assertEqual(4, pool['free_capacity_gb'])
self.assertEqual(0, pool['reserved_percentage'])
self.assertEqual(False, pool['QoS_support'])
def test_create_volume(self):
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
@ -465,6 +475,28 @@ class TestProphetStorDPLDriver(test.TestCase):
int(DATA_IN_VOLUME['size']) * units.Gi,
True)
def test_create_volume_without_pool(self):
fake_volume = copy.deepcopy(DATA_IN_VOLUME)
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.configuration.dpl_pool = ""
fake_volume['host'] = "host@backend" # missing pool
self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume,
volume=fake_volume)
def test_create_volume_with_configuration_pool(self):
fake_volume = copy.deepcopy(DATA_IN_VOLUME)
fake_volume['host'] = "host@backend" # missing pool
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_volume(fake_volume)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
self.configuration.dpl_pool,
int(DATA_IN_VOLUME['size']) * units.Gi,
True)
def test_create_volume_of_group(self):
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
@ -544,13 +576,13 @@ class TestProphetStorDPLDriver(test.TestCase):
self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV
res = self.dpldriver.initialize_connection(DATA_IN_VOLUME,
DATA_IN_CONNECTOR)
self.assertEqual(res['driver_volume_type'], 'iscsi')
self.assertEqual(res['data']['target_lun'], '101')
self.assertEqual(res['data']['target_discovered'], True)
self.assertEqual(res['data']['target_portal'], '172.31.1.210:3260')
self.assertEqual(res['data']['target_iqn'], 'iqn.2013-09.com.'
'prophetstor:hypervisor.'
'886423051816')
self.assertEqual('iscsi', res['driver_volume_type'])
self.assertEqual('101', res['data']['target_lun'])
self.assertEqual(True, res['data']['target_discovered'])
self.assertEqual('172.31.1.210:3260', res['data']['target_portal'])
self.assertEqual(
'iqn.2013-09.com.prophetstor:hypervisor.886423051816',
res['data']['target_iqn'])
def test_terminate_connection(self):
self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT
@ -578,25 +610,27 @@ class TestProphetStorDPLDriver(test.TestCase):
def test_get_pool_info(self):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
_, res = self.dpldriver._get_pool_info(POOLUUID)
self.assertEqual(res['metadata']['available_capacity'], 4194074624)
self.assertEqual(res['metadata']['ctime'], 1390551362349)
self.assertEqual(res['metadata']['display_description'],
'Default Pool')
self.assertEqual(res['metadata']['display_name'],
'default_pool')
self.assertEqual(res['metadata']['event_uuid'],
'4f7c4d679a664857afa4d51f282a516a')
self.assertEqual(res['metadata']['physical_device'], {
'cache': [],
'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'],
'log': [],
'spare': []})
self.assertEqual(res['metadata']['pool_uuid'], POOLUUID)
self.assertEqual(res['metadata']['properties'], {
'raid_level': 'raid0'})
self.assertEqual(res['metadata']['state'], 'Online')
self.assertEqual(res['metadata']['total_capacity'], 4194828288)
self.assertEqual(res['metadata']['zpool_guid'], '8173612007304181810')
self.assertEqual(4294967296, res['metadata']['available_capacity'])
self.assertEqual(1390551362349, res['metadata']['ctime'])
self.assertEqual('Default Pool',
res['metadata']['display_description'])
self.assertEqual('default_pool',
res['metadata']['display_name'])
self.assertEqual('4f7c4d679a664857afa4d51f282a516a',
res['metadata']['event_uuid'])
self.assertEqual(
{'cache': [],
'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'],
'log': [],
'spare': []},
res['metadata']['physical_device'])
self.assertEqual(POOLUUID, res['metadata']['pool_uuid'])
self.assertEqual(
{'raid_level': 'raid0'},
res['metadata']['properties'])
self.assertEqual('Online', res['metadata']['state'])
self.assertEqual(4294967296, res['metadata']['total_capacity'])
self.assertEqual('8173612007304181810', res['metadata']['zpool_guid'])
def test_create_consistency_group(self):
self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT

View File

@ -18,7 +18,7 @@ import errno
import six
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LI, _LE
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.prophetstor import dplcommon
@ -56,8 +56,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
if fcInfo['type'] == 'fc':
fcInfos[fcUuid] = fcInfo
except Exception as e:
msg = _("Failed to get fiber channel info from storage due "
"to %(stat)s") % {'stat': six.string_types(e)}
msg = _LE("Failed to get fiber channel info from storage due "
"to %(stat)s") % {'stat': six.text_type(e)}
LOG.error(msg)
return fcInfos
@ -80,8 +80,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
targetI['targetAddr'] = targetInfo[2]
targetInfos[str(targetInfo[0])] = targetI
except Exception as e:
msg = _("Failed to get fiber channel target from storage server"
" due to %(stat)s") % {'stat': six.text_type(e)}
msg = _LE("Failed to get fiber channel target from storage server"
" due to %(stat)s") % {'stat': six.text_type(e)}
targetInfos = {}
LOG.error(msg)
return targetInfos
@ -99,8 +99,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
targetWwpns = fc_info.get('target_identifier', '')
lstargetWwpns.append(targetWwpns)
except Exception as e:
msg = _("Failed to get target wwpns from storage due "
"to %(stat)s") % {'stat': six.text_type(e)}
msg = _LE("Failed to get target wwpns from storage due "
"to %(stat)s") % {'stat': six.text_type(e)}
LOG.error(msg)
lstargetWwpns = []
return lstargetWwpns
@ -118,7 +118,7 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
fActive = True
break
except Exception:
LOG.error(_('Failed to get sns table'))
LOG.error(_LE('Failed to get sns table'))
return fActive
def _convertHex2String(self, wwpns):
@ -147,8 +147,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
self._conver_uuid2hex(volumeid), targetwwpns,
initiatorwwpns, volumename)
except Exception:
msg = _('Volume %(volumeid)s failed to send assign command, '
'ret: %(status)s output: %(output)s') % \
msg = _LE('Volume %(volumeid)s failed to send assign command, '
'ret: %(status)s output: %(output)s') % \
{'volumeid': volumeid, 'status': ret, 'output': output}
LOG.error(msg)
ret = errno.EFAULT
@ -165,19 +165,16 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
msg = _('Flexvisor failed to assign volume %(id)s: '
'%(status)s.') % {'id': volumeid,
'status': status}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = errno.EFAULT
msg = _('Flexvisor failed to assign volume %(id)s due to '
'unable to query status by event '
'id.') % {'id': volumeid}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor assign volume failed:%(id)s:'
'%(status)s.') % {'id': volumeid, 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return ret
@ -198,21 +195,18 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
msg = _('Flexvisor failed to unassign volume %(id)s:'
' %(status)s.') % {'id': volumeid,
'status': status}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to unassign volume (get event) '
'%(id)s.') % {'id': volumeid}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor unassign volume failed:%(id)s:'
'%(status)s.') % {'id': volumeid, 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor succeed to unassign volume '
'%(id)s.') % {'id': volumeid}
msg = _LI('Flexvisor succeed to unassign volume '
'%(id)s.') % {'id': volumeid}
LOG.info(msg)
return ret
@ -246,15 +240,18 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
ret = 0
targetIdentifier = []
szwwpns = []
LOG.info('initialize_connection volume: %s,'
' connector: %s' % (volume, connector))
msg = _LI('initialize_connection volume: %(volume)s, connector:'
' %(connector)s') % {"volume": volume,
"connector": connector}
LOG.info(msg)
# Get Storage Fiber channel controller
dc_fc = self._get_fc_channel()
# Get existed FC target list to decide target wwpn
dc_target = self._get_targets()
if len(dc_target) == 0:
msg = _('Backend storage did not configure fiber channel target.')
msg = _('Backend storage did not configure fiber channel '
'target.')
raise exception.VolumeBackendAPIException(data=msg)
for keyFc in dc_fc.keys():
@ -271,7 +268,6 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
msg = _('Invalid wwpns format %(wwpns)s') % \
{'wwpns': connector['wwpns']}
raise exception.VolumeBackendAPIException(data=msg)
LOG.error(msg)
szwwpns.append(szwwpn)
if len(szwwpns):
@ -281,7 +277,7 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
{}).get('targetAddr', '')
lsTargetWwpn.append(targetWwpn)
# Use wwpns to assign volume.
msg = _('Prefer use target wwpn %(wwpn)s') % {'wwpn': lsTargetWwpn}
msg = _LI('Prefer use target wwpn %(wwpn)s') % {'wwpn': lsTargetWwpn}
LOG.info(msg)
# Start to create export in all FC target node.
assignedTarget = []
@ -294,8 +290,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
else:
assignedTarget.append(pTarget)
except Exception as e:
msg = _('Failed to export fiber channel target '
'due to %s') % (six.text_type(e))
msg = _LE('Failed to export fiber channel target '
'due to %s') % (six.text_type(e))
LOG.error(msg)
ret = errno.EFAULT
break
@ -334,18 +330,17 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
properties['target_lun'] = int(nLun)
properties['volume_id'] = volume['id']
properties['initiator_target_map'] = init_targ_map
msg = _('%(volume)s assign type fibre_channel, properties '
'%(properties)s') % {'volume': volume['id'],
'properties': properties}
msg = _LI('%(volume)s assign type fibre_channel, properties '
'%(properties)s') % {'volume': volume['id'],
'properties': properties}
LOG.info(msg)
else:
msg = _('Invalid connection initialization response of '
'volume %(name)s') % {'name': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
msg = _('Connect initialization info: '
'{driver_volume_type: fibre_channel, '
'data: %(properties)s') % {'properties': properties}
msg = _LI('Connect initialization info: '
'{driver_volume_type: fibre_channel, '
'data: %(properties)s') % {'properties': properties}
LOG.info(msg)
return {'driver_volume_type': 'fibre_channel',
'data': properties}
@ -365,8 +360,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
szwwpns = []
ret = 0
info = {'driver_volume_type': 'fibre_channel', 'data': {}}
msg = _('terminate_connection volume: %(volume)s, '
'connector: %(con)s') % {'volume': volume, 'con': connector}
msg = _LI('terminate_connection volume: %(volume)s, '
'connector: %(con)s') % {'volume': volume, 'con': connector}
LOG.info(msg)
# Query targetwwpns.
# Get all target list of volume.
@ -376,14 +371,12 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
msg = _('Invalid wwpns format %(wwpns)s') % \
{'wwpns': connector['wwpns']}
raise exception.VolumeBackendAPIException(data=msg)
LOG.error(msg)
szwwpns.append(szwwpn)
if len(szwwpns) == 0:
ret = errno.EFAULT
msg = _('Invalid wwpns format %(wwpns)s') % \
{'wwpns': connector['wwpns']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
for szwwpn in szwwpns:
@ -402,7 +395,6 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
finally:
if ret:
msg = _('Faield to unassign %(volume)s') % (volume['id'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Failed to delete export with fibre channel

View File

@ -16,7 +16,7 @@
import errno
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _LW, _LI, _
from cinder.openstack.common import log as logging
import cinder.volume.driver
from cinder.volume.drivers.prophetstor import dplcommon
@ -57,19 +57,16 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
msg = _('Flexvisor failed to assign volume %(id)s: '
'%(status)s.') % {'id': volume['id'],
'status': status}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = errno.EFAULT
msg = _('Flexvisor failed to assign volume %(id)s due to '
'unable to query status by event '
'id.') % {'id': volume['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor assign volume failed.:%(id)s:'
'%(status)s.') % {'id': volume['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if ret == 0:
@ -105,9 +102,10 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
break
if not (ret == 0 or properties['target_portal']):
raise exception.VolumeBackendAPIException(
data='Flexvisor failed to assign volume %s iqn %s.'
% (volume['id'], connector['initiator']))
msg = _('Flexvisor failed to assign volume %(volume)s '
'iqn %(iqn)s.') % {'volume': volume['id'],
'iqn': connector['initiator']}
raise exception.VolumeBackendAPIException(data=msg)
return {'driver_volume_type': 'iscsi', 'data': properties}
@ -127,21 +125,18 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
msg = _('Flexvisor failed to unassign volume %(id)s:'
' %(status)s.') % {'id': volume['id'],
'status': status}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to unassign volume (get event) '
'%(id)s.') % {'id': volume['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
msg = _('Flexvisor already unassigned volume '
'%(id)s.') % {'id': volume['id']}
msg = _LI('Flexvisor already unassigned volume '
'%(id)s.') % {'id': volume['id']}
LOG.info(msg)
elif ret != 0:
msg = _('Flexvisor failed to unassign volume:%(id)s:'
'%(status)s.') % {'id': volume['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
@ -156,6 +151,7 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
(backend_name or 'DPLISCSIDriver')
self._stats = data
except Exception as exc:
LOG.warning(_LW('Cannot get volume status '
'%(exc)%s.') % {'exc': exc})
msg = _LW('Cannot get volume status '
'%(exc)%s.') % {'exc': exc}
LOG.warning(msg)
return self._stats

View File

@ -15,6 +15,7 @@
"""
Implementation of the class of ProphetStor DPL storage adapter of Federator.
# v2.0.1 Consistency group support
# v2.0.2 Pool aware scheduler
"""
import base64
@ -28,12 +29,13 @@ from oslo.utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _, _LI, _LW, _LE
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume import driver
from cinder.volume.drivers.prophetstor import options
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@ -330,10 +332,14 @@ class DPLVolume(object):
return self._execute(method, url, params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def get_pools(self):
method = 'GET'
url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL)
return self._execute(method, url, None, [httplib.OK])
def get_pool(self, poolid):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid)
return self._execute(method, url, None, [httplib.OK, httplib.ACCEPTED])
def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName,
@ -786,12 +792,11 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to add volume %(id)s '
'to group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor succeeded to add volume %(id)s to '
'group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
msg = _LI('Flexvisor succeeded to add volume %(id)s to '
'group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
LOG.info(msg)
def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID):
@ -805,7 +810,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to get snapshot id of volume '
'%(id)s from group %(vgid)s.') % {'id': volumeID,
'vgid': vgID}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not snapshotID:
msg = _('Flexvisor could not find volume %(id)s snapshot in'
@ -826,9 +830,10 @@ class DPLCOMMONDriver(driver.VolumeDriver):
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
LOG.info(_LI('Start to create consistency group: %(group_name)s '
'id: %(id)s') %
{'group_name': group['name'], 'id': group['id']})
msg = _LI('Start to create consistency group: %(group_name)s '
'id: %(id)s') % {'group_name': group['name'],
'id': group['id']}
LOG.info(msg)
model_update = {'status': 'available'}
try:
ret, output = self.dpl.create_vg(
@ -839,7 +844,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Failed to create consistency group '
'%(id)s:%(ret)s.') % {'id': group['id'],
'ret': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
return model_update
@ -847,7 +851,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Failed to create consistency group '
'%(id)s due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group):
@ -865,7 +868,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Failed to delete consistency group %(id)s '
'due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for volume_ref in volumes:
@ -904,7 +906,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Failed to create cg snapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'available'
@ -934,7 +935,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Failed to delete cgsnapshot %(id)s due to '
'%(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'deleted'
@ -942,7 +942,15 @@ class DPLCOMMONDriver(driver.VolumeDriver):
def create_volume(self, volume):
"""Create a volume."""
pool = self.configuration.dpl_pool
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
@ -960,23 +968,21 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to create volume %(volume)s: '
'%(status)s.') % {'volume': volume['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to create volume (get event) '
'%s.') % (volume['id'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor create volume failed.:%(volumeid)s:'
'%(status)s.') % {'volumeid': volume['id'], 'status': ret}
LOG.error(msg)
'%(status)s.') % {'volumeid': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor succeed to create volume '
'%(id)s.') % {'id': volume['id']}
msg = _LI('Flexvisor succeed to create volume '
'%(id)s.') % {'id': volume['id']}
LOG.info(msg)
if volume.get('consistencygroup_id', None):
@ -1006,7 +1012,6 @@ class DPLCOMMONDriver(driver.VolumeDriver):
except Exception:
msg = _("Flexvisor unable to find the source volume "
"%(id)s info.") % {'id': src_volumeID}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if src_volume:
vgID = src_volume.get('consistencygroup_id', None)
@ -1019,7 +1024,15 @@ class DPLCOMMONDriver(driver.VolumeDriver):
self._conver_uuid2hex(cgsnapshotID),
self._conver_uuid2hex(src_volumeID))
pool = self.configuration.dpl_pool
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev_from_snapshot(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
@ -1034,28 +1047,26 @@ class DPLCOMMONDriver(driver.VolumeDriver):
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s:%(status)s.') % {'id': snapshot['id'],
'status': ret}
LOG.error(msg)
msg = _('Flexvisor failed to create volume from '
'snapshot %(id)s:'
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to create volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor succeed to create volume %(id)s '
'from snapshot.') % {'id': volume['id']}
msg = _LI('Flexvisor succeed to create volume %(id)s '
'from snapshot.') % {'id': volume['id']}
LOG.info(msg)
if volume.get('consistencygroup_id', None):
@ -1085,29 +1096,35 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to spawn volume from snapshot '
'%(id)s:%(status)s.') % {'id': snapshot['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to spawn volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor succeed to create volume %(id)s '
'from snapshot.') % {'id': volume['id']}
msg = _LI('Flexvisor succeed to create volume %(id)s '
'from snapshot.') % {'id': volume['id']}
LOG.info(msg)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
pool = self.configuration.dpl_pool
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.clone_vdev(
self._conver_uuid2hex(src_vref['id']),
self._conver_uuid2hex(volume['id']),
@ -1126,23 +1143,20 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to clone volume (failed to get event'
') %(id)s.') % {'id': src_vref['id']}
LOG.error(msg)
msg = _('Flexvisor failed to clone volume (failed to'
' get event) %(id)s.') % {'id': src_vref['id']}
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor succeed to clone '
'volume %(id)s.') % {'id': volume['id']}
msg = _LI('Flexvisor succeed to clone '
'volume %(id)s.') % {'id': volume['id']}
LOG.info(msg)
if volume.get('consistencygroup_id', None):
@ -1166,20 +1180,20 @@ class DPLCOMMONDriver(driver.VolumeDriver):
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(volume['consistencygroup_id']))
if ret:
msg = _('Flexvisor failed to delete volume %(id)s from'
' the group %(vgid)s.') % {
msg = _LW('Flexvisor failed to delete volume %(id)s from'
' the group %(vgid)s.') % {
'id': volume['id'],
'vgid': volume['consistencygroup_id']}
except Exception as e:
msg = _('Flexvisor failed to delete volume %(id)s from '
'group %(vgid)s due to %(status)s.') % {
msg = _LW('Flexvisor failed to delete volume %(id)s from '
'group %(vgid)s due to %(status)s.') % {
'id': volume['id'],
'vgid': volume['consistencygroup_id'],
'status': six.text_type(e)}
if ret:
ret = 0
LOG.warn(msg)
LOG.warning(msg)
ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
if ret == errno.EAGAIN:
@ -1187,17 +1201,15 @@ class DPLCOMMONDriver(driver.VolumeDriver):
if status['state'] == 'error':
msg = _('Flexvisor failed deleting volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
ret = 0
msg = _('Flexvisor volume %(id)s does not '
'exist.') % {'id': volume['id']}
msg = _LI('Flexvisor volume %(id)s does not '
'exist.') % {'id': volume['id']}
LOG.info(msg)
elif ret != 0:
msg = _('Flexvisor failed to delete volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
@ -1217,25 +1229,22 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to extend volume '
'%(id)s:%(status)s.') % {'id': volume,
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to extend volume '
'(failed to get event) '
'%(id)s.') % {'id': volume['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to extend volume '
'%(id)s: %(status)s.') % {'id': volume['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor succeed to extend volume'
' %(id)s.') % {'id': volume['id']}
msg = _LI('Flexvisor succeed to extend volume'
' %(id)s.') % {'id': volume['id']}
LOG.info(msg)
def create_snapshot(self, snapshot):
@ -1256,19 +1265,16 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to create snapshot for volume '
'%(id)s: %(status)s.') % \
{'id': snapshot['volume_id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to create snapshot for volume '
'(failed to get event) %(id)s.') % \
{'id': snapshot['volume_id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create snapshot for volume %(id)s: '
'%(status)s.') % {'id': snapshot['volume_id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def delete_snapshot(self, snapshot):
@ -1286,25 +1292,22 @@ class DPLCOMMONDriver(driver.VolumeDriver):
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to delete snapshot (failed to '
'get event) %(id)s.') % {'id': snapshot['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
msg = _('Flexvisor snapshot %(id)s not existed.') % \
msg = _LI('Flexvisor snapshot %(id)s not existed.') % \
{'id': snapshot['id']}
LOG.info(msg)
elif ret != 0:
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'], 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor succeed to delete '
'snapshot %(id)s.') % {'id': snapshot['id']}
msg = _LI('Flexvisor succeed to delete '
'snapshot %(id)s.') % {'id': snapshot['id']}
LOG.info(msg)
def get_volume_stats(self, refresh=False):
@ -1317,47 +1320,80 @@ class DPLCOMMONDriver(driver.VolumeDriver):
return self._stats
def _get_pools(self):
pools = []
qpools = []
# Defined access pool by cinder configuration.
defined_pool = self.configuration.dpl_pool
if defined_pool:
qpools.append(defined_pool)
else:
try:
ret, output = self.dpl.get_pools()
if ret == 0:
for poolUuid, poolName in output.get('children', []):
qpools.append(poolUuid)
else:
msg = _LE("Flexvisor failed to get pool list."
"(Error: %d)") % (ret)
LOG.error(msg)
except Exception as e:
msg = _LE("Flexvisor failed to get pool list due to "
"%s.") % (six.text_type(e))
LOG.error(msg)
# Query pool detail information
for poolid in qpools:
ret, output = self._get_pool_info(poolid)
if ret == 0:
pool = {}
pool['pool_name'] = output['metadata']['pool_uuid']
pool['total_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['total_capacity']))
pool['free_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['available_capacity']))
pool['allocated_capacity_gb'] = \
self._convert_size_GB(
int(output['metadata']['used_capacity']))
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
pools.append(pool)
else:
msg = _LW("Failed to query pool %(id)s status "
"%(ret)d.") % {'id': poolid,
'ret': ret}
LOG.warning(msg)
continue
return pools
def _update_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first.
"""
data = {}
totalSize = 0
availableSize = 0
ret, output = self._get_pool_info(self.configuration.dpl_pool)
if ret == 0:
totalSize = int(output['metadata']['total_capacity'])
availableSize = int(output['metadata']['available_capacity'])
else:
totalSize = 0
availableSize = 0
pools = self._get_pools()
data['volume_backend_name'] = \
self.configuration.safe_get('volume_backend_name')
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.configuration.san_ip,
'volume': self.configuration.dpl_pool
}
try:
ret, output = self.dpl.get_server_info()
if ret == 0:
data['vendor_name'] = output['metadata']['vendor']
data['driver_version'] = output['metadata']['version']
data['storage_protocol'] = 'iSCSI'
data['total_capacity_gb'] = self._convert_size_GB(totalSize)
data['free_capacity_gb'] = self._convert_size_GB(availableSize)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['location_info'] = location_info
data['consistencygroup_support'] = True
data['pools'] = pools
self._stats = data
except Exception as e:
msg = _('Failed to get server info due to '
'%(state)s.') % {'state': six.text_type(e)}
msg = _LE('Failed to get server info due to '
'%(state)s.') % {'state': six.text_type(e)}
LOG.error(msg)
return self._stats
@ -1381,22 +1417,20 @@ class DPLCOMMONDriver(driver.VolumeDriver):
if status['state'] != 'available':
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = 0
output = status.get('output', {})
else:
LOG.error(_LE('Flexvisor failed to get pool info '
'(failed to get event)%s.') % (poolid))
LOG.error(_('Flexvisor failed to get pool info '
'(failed to get event)%s.') % (poolid))
raise exception.VolumeBackendAPIException(
data="failed to get event")
elif ret != 0:
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = 'Flexvisor succeed to get pool info.'
msg = _('Flexvisor succeed to get pool info.')
LOG.debug(msg)
return ret, output