Volume manage/unmanage support to ZFSSA drivers
This change for ZFSSA iSCSI and NFS drivers will allow cinder to manage existing volumes on a ZFSSA backend and also unmanage them. Change-Id: I2f280a2dfdb9cc6ce739b340eefa562f98d4038f Implements: blueprint oracle-zfssa-volume-manage-unmanage DocImpact
This commit is contained in:
parent
c4770f7466
commit
a75a528fd2
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -169,6 +169,7 @@ class TestZFSSAISCSIDriver(test.TestCase):
|
||||
self.configuration.zfssa_cache_project = zfssa_cache_dir
|
||||
self.configuration.safe_get = self.fake_safe_get
|
||||
self.configuration.zfssa_replication_ip = '1.1.1.1'
|
||||
self.configuration.zfssa_manage_policy = 'loose'
|
||||
|
||||
def _util_migrate_volume_exceptions(self):
|
||||
self.drv.zfssa.get_lun.return_value = (
|
||||
@ -739,6 +740,120 @@ class TestZFSSAISCSIDriver(test.TestCase):
|
||||
lcfg.zfssa_cache_project,
|
||||
volname)
|
||||
|
||||
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
|
||||
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage')
|
||||
def test_volume_manage(self, _get_existing_vol, _verify_volume_to_manage):
|
||||
lcfg = self.configuration
|
||||
lcfg.zfssa_manage_policy = 'loose'
|
||||
test_vol = self.test_vol
|
||||
self.drv._get_existing_vol.return_value = test_vol
|
||||
self.drv._verify_volume_to_manage.return_value = None
|
||||
self.drv.zfssa.set_lun_props.return_value = True
|
||||
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
|
||||
{'source-name':
|
||||
'volume-567'}))
|
||||
self.drv._get_existing_vol.assert_called_once_with({'source-name':
|
||||
'volume-567'})
|
||||
self.drv._verify_volume_to_manage.assert_called_once_with(test_vol)
|
||||
self.drv.zfssa.set_lun_props.assert_called_once_with(
|
||||
lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
test_vol['name'],
|
||||
name='volume-123',
|
||||
schema={"custom:cinder_managed": True})
|
||||
|
||||
# Case when zfssa_manage_policy is 'loose' and 'cinder_managed' is
|
||||
# set to true.
|
||||
test_vol.update({'cinder_managed': False})
|
||||
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
|
||||
{'source-name':
|
||||
'volume-567'}))
|
||||
|
||||
# Another case is when the zfssa_manage_policy is set to 'strict'
|
||||
lcfg.zfssa_manage_policy = 'strict'
|
||||
test_vol.update({'cinder_managed': False})
|
||||
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
|
||||
{'source-name':
|
||||
'volume-567'}))
|
||||
|
||||
def test_volume_manage_negative(self):
|
||||
lcfg = self.configuration
|
||||
lcfg.zfssa_manage_policy = 'strict'
|
||||
test_vol = self.test_vol
|
||||
|
||||
if 'cinder_managed' in test_vol:
|
||||
del test_vol['cinder_managed']
|
||||
|
||||
self.drv.zfssa.get_lun.return_value = test_vol
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.drv.manage_existing, {'name': 'cindervol'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
test_vol.update({'cinder_managed': True})
|
||||
self.drv.zfssa.get_lun.return_value = test_vol
|
||||
self.assertRaises(exception.ManageExistingAlreadyManaged,
|
||||
self.drv.manage_existing, {'name': 'cindervol'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
test_vol.update({'cinder_managed': False})
|
||||
self.drv.zfssa.get_lun.return_value = test_vol
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.drv.manage_existing, {'name': 'cindervol'},
|
||||
{'source-id': 'volume-567'})
|
||||
|
||||
lcfg.zfssa_manage_policy = 'loose'
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.drv.manage_existing, {'name': 'cindervol'},
|
||||
{'source-id': 'volume-567'})
|
||||
|
||||
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage')
|
||||
def test_volume_manage_negative_api_exception(self,
|
||||
_verify_volume_to_manage):
|
||||
lcfg = self.configuration
|
||||
lcfg.zfssa_manage_policy = 'loose'
|
||||
self.drv.zfssa.get_lun.return_value = self.test_vol
|
||||
self.drv._verify_volume_to_manage.return_value = None
|
||||
self.drv.zfssa.set_lun_props.side_effect = \
|
||||
exception.VolumeBackendAPIException(data='fake exception')
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.manage_existing, {'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
def test_volume_unmanage(self):
|
||||
lcfg = self.configuration
|
||||
self.drv.zfssa.set_lun_props.return_value = True
|
||||
self.assertIsNone(self.drv.unmanage({'name': 'volume-123'}))
|
||||
self.drv.zfssa.set_lun_props.assert_called_once_with(
|
||||
lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
'volume-123',
|
||||
name='unmanaged-volume-123',
|
||||
schema={"custom:cinder_managed": False})
|
||||
|
||||
def test_volume_unmanage_negative(self):
|
||||
self.drv.zfssa.set_lun_props.side_effect = \
|
||||
exception.VolumeBackendAPIException(data='fake exception')
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.unmanage, {'name': 'volume-123'})
|
||||
|
||||
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
|
||||
def test_manage_existing_get_size(self, _get_existing_vol):
|
||||
test_vol = self.test_vol
|
||||
test_vol['size'] = 3 * units.Gi
|
||||
self.drv._get_existing_vol.return_value = test_vol
|
||||
self.assertEqual(3, self.drv.manage_existing_get_size(
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'}))
|
||||
|
||||
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
|
||||
def test_manage_existing_get_size_negative(self, _get_existing_vol):
|
||||
self.drv._get_existing_vol.side_effect = \
|
||||
exception.VolumeNotFound(volume_id='123')
|
||||
self.assertRaises(exception.VolumeNotFound,
|
||||
self.drv.manage_existing_get_size,
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
|
||||
class TestZFSSANFSDriver(test.TestCase):
|
||||
|
||||
@ -791,6 +906,7 @@ class TestZFSSANFSDriver(test.TestCase):
|
||||
self.configuration.zfssa_enable_local_cache = True
|
||||
self.configuration.zfssa_cache_directory = zfssa_cache_dir
|
||||
self.configuration.nfs_sparsed_volumes = 'true'
|
||||
self.configuration.zfssa_manage_policy = 'strict'
|
||||
|
||||
def test_migrate_volume(self):
|
||||
self.drv.zfssa.get_asn.return_value = (
|
||||
@ -1082,6 +1198,152 @@ class TestZFSSANFSDriver(test.TestCase):
|
||||
self.drv.zfssa.delete_file.assert_called_once_with(
|
||||
img_props_nfs['name'])
|
||||
|
||||
def test_volume_manage(self):
|
||||
lcfg = self.configuration
|
||||
lcfg.zfssa_manage_policy = 'loose'
|
||||
test_vol = self.test_vol
|
||||
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.drv.zfssa.rename_volume.return_value = None
|
||||
self.drv.zfssa.set_file_props.return_value = None
|
||||
self.drv.mount_path = lcfg.zfssa_data_ip + ':' + 'fake_mountpoint'
|
||||
self.assertEqual({'provider_location': self.drv.mount_path},
|
||||
self.drv.manage_existing({'name': 'volume-123'},
|
||||
{'source-name':
|
||||
'volume-567'}))
|
||||
|
||||
self.drv.zfssa.get_volume.assert_called_once_with('volume-567')
|
||||
self.drv.zfssa.rename_volume.assert_called_once_with('volume-567',
|
||||
'volume-123')
|
||||
self.drv.zfssa.set_file_props.assert_called_once_with(
|
||||
'volume-123', {'cinder_managed': 'True'})
|
||||
# Test when 'zfssa_manage_policy' is set to 'strict'.
|
||||
lcfg.zfssa_manage_policy = 'strict'
|
||||
test_vol.update({'cinder_managed': 'False'})
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.assertEqual({'provider_location': self.drv.mount_path},
|
||||
self.drv.manage_existing({'name': 'volume-123'},
|
||||
{'source-name':
|
||||
'volume-567'}))
|
||||
|
||||
def test_volume_manage_negative_no_source_name(self):
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.drv.manage_existing,
|
||||
{'name': 'volume-123'},
|
||||
{'source-id': 'volume-567'})
|
||||
|
||||
def test_volume_manage_negative_backend_exception(self):
|
||||
self.drv.zfssa.get_volume.side_effect = \
|
||||
exception.VolumeNotFound(volume_id='volume-567')
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.drv.manage_existing,
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
def test_volume_manage_negative_verify_fail(self):
|
||||
lcfg = self.configuration
|
||||
lcfg.zfssa_manage_policy = 'strict'
|
||||
test_vol = self.test_vol
|
||||
test_vol['cinder_managed'] = ''
|
||||
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.drv.manage_existing,
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
test_vol.update({'cinder_managed': 'True'})
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.assertRaises(exception.ManageExistingAlreadyManaged,
|
||||
self.drv.manage_existing,
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage')
|
||||
def test_volume_manage_negative_rename_fail(self,
|
||||
_verify_volume_to_manage):
|
||||
test_vol = self.test_vol
|
||||
test_vol.update({'cinder_managed': 'False'})
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.drv._verify_volume_to_manage.return_value = None
|
||||
self.drv.zfssa.rename_volume.side_effect = \
|
||||
exception.VolumeBackendAPIException(data="fake exception")
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.manage_existing, {'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage')
|
||||
def test_volume_manage_negative_set_prop_fail(self,
|
||||
_verify_volume_to_manage):
|
||||
test_vol = self.test_vol
|
||||
test_vol.update({'cinder_managed': 'False'})
|
||||
self.drv.zfssa.get_volume.return_value = test_vol
|
||||
self.drv._verify_volume_to_manage.return_value = None
|
||||
self.drv.zfssa.rename_volume.return_value = None
|
||||
self.drv.zfssa.set_file_props.side_effect = \
|
||||
exception.VolumeBackendAPIException(data="fake exception")
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.manage_existing, {'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
def test_volume_unmanage(self):
|
||||
test_vol = self.test_vol
|
||||
test_vol.update({'cinder_managed': 'True'})
|
||||
self.drv.zfssa.rename_volume.return_value = None
|
||||
self.drv.zfssa.set_file_props.return_value = None
|
||||
self.assertIsNone(self.drv.unmanage(test_vol))
|
||||
new_vol_name = 'unmanaged-' + test_vol['name']
|
||||
self.drv.zfssa.rename_volume.assert_called_once_with(test_vol['name'],
|
||||
new_vol_name)
|
||||
self.drv.zfssa.set_file_props.assert_called_once_with(
|
||||
new_vol_name, {'cinder_managed': 'False'})
|
||||
|
||||
def test_volume_unmanage_negative_rename_fail(self):
|
||||
test_vol = self.test_vol
|
||||
test_vol.update({'cinder_managed': 'True'})
|
||||
self.drv.zfssa.rename_volume.side_effect = \
|
||||
exception.VolumeBackendAPIException(data="fake exception")
|
||||
self.drv.zfssa.set_file_props.return_value = None
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.unmanage, test_vol)
|
||||
|
||||
def test_volume_unmanage_negative_set_prop_fail(self):
|
||||
test_vol = self.test_vol
|
||||
test_vol.update({'cinder_managed': 'True'})
|
||||
self.drv.zfssa.rename_volume.return_value = None
|
||||
self.drv.zfssa.set_file_props.side_effect = \
|
||||
exception.VolumeBackendAPIException(data="fake exception")
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.unmanage, test_vol)
|
||||
|
||||
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share')
|
||||
def test_manage_existing_get_size(self, _get_mount_point_for_share):
|
||||
self.drv._get_mount_point_for_share.return_value = \
|
||||
'/fake/mnt/fake_share/'
|
||||
self.drv._mounted_shares = []
|
||||
self.drv._mounted_shares.append('fake_share')
|
||||
file = mock.Mock(st_size=123 * units.Gi)
|
||||
with mock.patch('os.path.isfile', return_value=True):
|
||||
with mock.patch('os.stat', return_value=file):
|
||||
self.assertEqual(float(file.st_size / units.Gi),
|
||||
self.drv.manage_existing_get_size(
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'}))
|
||||
|
||||
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share')
|
||||
def test_manage_existing_get_size_negative(self,
|
||||
_get_mount_point_for_share):
|
||||
self.drv._get_mount_point_for_share.return_value = \
|
||||
'/fake/mnt/fake_share/'
|
||||
self.drv._mounted_shares = []
|
||||
self.drv._mounted_shares.append('fake_share')
|
||||
with mock.patch('os.path.isfile', return_value=True):
|
||||
with mock.patch('os.stat', side_effect=OSError):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.drv.manage_existing_get_size,
|
||||
{'name': 'volume-123'},
|
||||
{'source-name': 'volume-567'})
|
||||
|
||||
|
||||
class TestZFSSAApi(test.TestCase):
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -20,6 +20,7 @@ import math
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import base64
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
@ -84,8 +85,10 @@ ZFSSA_OPTS = [
|
||||
cfg.BoolOpt('zfssa_enable_local_cache', default=True,
|
||||
help='Flag to enable local caching: True, False.'),
|
||||
cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache',
|
||||
help='Name of ZFSSA project where cache volumes are stored.')
|
||||
|
||||
help='Name of ZFSSA project where cache volumes are stored.'),
|
||||
cfg.StrOpt('zfssa_manage_policy', default='loose',
|
||||
choices=['loose', 'strict'],
|
||||
help='Driver policy for volume manage.')
|
||||
]
|
||||
|
||||
CONF.register_opts(ZFSSA_OPTS)
|
||||
@ -109,8 +112,10 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
1.0.1:
|
||||
Backend enabled volume migration.
|
||||
Local cache feature.
|
||||
1.0.2:
|
||||
Volume manage/unmanage support.
|
||||
"""
|
||||
VERSION = '1.0.1'
|
||||
VERSION = '1.0.2'
|
||||
protocol = 'iSCSI'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -144,19 +149,25 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
compression=lcfg.zfssa_lun_compression,
|
||||
logbias=lcfg.zfssa_lun_logbias)
|
||||
|
||||
schemas = [
|
||||
{'property': 'cinder_managed',
|
||||
'description': 'Managed by Cinder',
|
||||
'type': 'Boolean'}]
|
||||
|
||||
if lcfg.zfssa_enable_local_cache:
|
||||
self.zfssa.create_project(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_cache_project,
|
||||
compression=lcfg.zfssa_lun_compression,
|
||||
logbias=lcfg.zfssa_lun_logbias)
|
||||
schemas = [
|
||||
schemas.extend([
|
||||
{'property': 'image_id',
|
||||
'description': 'OpenStack image ID',
|
||||
'type': 'String'},
|
||||
{'property': 'updated_at',
|
||||
'description': 'Most recent updated time of image',
|
||||
'type': 'String'}]
|
||||
self.zfssa.create_schemas(schemas)
|
||||
'type': 'String'}])
|
||||
|
||||
self.zfssa.create_schemas(schemas)
|
||||
|
||||
if (lcfg.zfssa_initiator_config != ''):
|
||||
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
|
||||
@ -224,6 +235,13 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
|
||||
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
|
||||
|
||||
if lcfg.zfssa_manage_policy not in ("loose", "strict"):
|
||||
err_msg = (_("zfssa_manage_policy property needs to be set to"
|
||||
" 'strict' or 'loose'. Current value is: %s.") %
|
||||
lcfg.zfssa_manage_policy)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that driver can login.
|
||||
|
||||
@ -286,6 +304,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
lcfg = self.configuration
|
||||
volsize = str(volume['size']) + 'g'
|
||||
specs = self._get_voltype_specs(volume)
|
||||
specs.update({'custom:cinder_managed': True})
|
||||
self.zfssa.create_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
@ -995,6 +1014,103 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
LOG.warning(_LW("Volume %s exists but can't be deleted"),
|
||||
cache['share'])
|
||||
|
||||
def manage_existing(self, volume, existing_ref):
|
||||
"""Manage an existing volume in the ZFSSA backend.
|
||||
|
||||
:param volume: Reference to the new volume.
|
||||
:param existing_ref: Reference to the existing volume to be managed.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
|
||||
existing_vol = self._get_existing_vol(existing_ref)
|
||||
self._verify_volume_to_manage(existing_vol)
|
||||
|
||||
new_vol_name = volume['name']
|
||||
|
||||
try:
|
||||
self.zfssa.set_lun_props(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
existing_vol['name'],
|
||||
name=new_vol_name,
|
||||
schema={"custom:cinder_managed": True})
|
||||
except exception.VolumeBackendAPIException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed to rename volume %(existing)s to "
|
||||
"%(new)s. Volume manage failed."),
|
||||
{'existing': existing_vol['name'],
|
||||
'new': new_vol_name})
|
||||
return None
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Return size of the volume to be managed by manage_existing."""
|
||||
existing_vol = self._get_existing_vol(existing_ref)
|
||||
|
||||
size = existing_vol['size']
|
||||
return int(math.ceil(float(size) / units.Gi))
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Remove an existing volume from cinder management.
|
||||
|
||||
:param volume: Reference to the volume to be unmanaged.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
new_name = 'unmanaged-' + volume['name']
|
||||
try:
|
||||
self.zfssa.set_lun_props(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
name=new_name,
|
||||
schema={"custom:cinder_managed": False})
|
||||
except exception.VolumeBackendAPIException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed to rename volume %(existing)s to"
|
||||
" %(new)s. Volume unmanage failed."),
|
||||
{'existing': volume['name'],
|
||||
'new': new_name})
|
||||
return None
|
||||
|
||||
def _verify_volume_to_manage(self, volume):
|
||||
lcfg = self.configuration
|
||||
if lcfg.zfssa_manage_policy == 'loose':
|
||||
return
|
||||
|
||||
vol_name = volume['name']
|
||||
|
||||
if 'cinder_managed' not in volume:
|
||||
err_msg = (_("Unknown if the volume: %s to be managed is "
|
||||
"already being managed by Cinder. Aborting manage "
|
||||
"volume. Please add 'cinder_managed' custom schema "
|
||||
"property to the volume and set its value to False."
|
||||
" Alternatively, set the value of cinder config "
|
||||
"policy 'zfssa_manage_policy' to 'loose' to "
|
||||
"remove this restriction.") % vol_name)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
if volume['cinder_managed'] is True:
|
||||
msg = (_("Volume: %s is already being managed by Cinder.")
|
||||
% vol_name)
|
||||
LOG.error(msg)
|
||||
raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
|
||||
|
||||
def _get_existing_vol(self, existing_ref):
|
||||
lcfg = self.configuration
|
||||
if 'source-name' not in existing_ref:
|
||||
msg = (_("Reference to volume: %s to be managed must contain "
|
||||
"source-name.") % existing_ref)
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
try:
|
||||
existing_vol = self.zfssa.get_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
existing_ref['source-name'])
|
||||
except exception.VolumeNotFound:
|
||||
err_msg = (_("Volume %s doesn't exist on the ZFSSA "
|
||||
"backend.") % existing_vol['name'])
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
return existing_vol
|
||||
|
||||
|
||||
class MigrateVolumeInit(task.Task):
|
||||
def execute(self, src_zfssa, volume, src_pool, src_project):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -17,6 +17,7 @@ ZFS Storage Appliance NFS Cinder Volume Driver
|
||||
import datetime as dt
|
||||
import errno
|
||||
import math
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -59,7 +60,10 @@ ZFSSA_OPTS = [
|
||||
help='Flag to enable local caching: True, False.'),
|
||||
cfg.StrOpt('zfssa_cache_directory', default='os-cinder-cache',
|
||||
help='Name of directory inside zfssa_nfs_share where cache '
|
||||
'volumes are stored.')
|
||||
'volumes are stored.'),
|
||||
cfg.StrOpt('zfssa_manage_policy', default='loose',
|
||||
choices=['loose', 'strict'],
|
||||
help='Driver policy for volume manage.')
|
||||
]
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -79,8 +83,10 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||
1.0.1:
|
||||
Backend enabled volume migration.
|
||||
Local cache feature.
|
||||
1.0.2:
|
||||
Volume manage/unmanage support.
|
||||
"""
|
||||
VERSION = '1.0.1'
|
||||
VERSION = '1.0.2'
|
||||
volume_backend_name = 'ZFSSA_NFS'
|
||||
protocol = driver_prefix = driver_volume_type = 'nfs'
|
||||
|
||||
@ -201,6 +207,10 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||
self.zfssa.verify_service('http')
|
||||
self.zfssa.verify_service('nfs')
|
||||
|
||||
def create_volume(self, volume):
|
||||
super(ZFSSANFSDriver, self).create_volume(volume)
|
||||
self.zfssa.set_file_props(volume['name'], {'cinder_managed': 'True'})
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot of a volume."""
|
||||
LOG.info(_LI('Creating snapshot: %s'), snapshot['name'])
|
||||
@ -635,3 +645,119 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||
method='MOVE')
|
||||
provider_location = new_volume['provider_location']
|
||||
return {'_name_id': None, 'provider_location': provider_location}
|
||||
|
||||
def manage_existing(self, volume, existing_ref):
|
||||
"""Manage an existing volume in the ZFSSA backend.
|
||||
|
||||
:param volume: Reference to the new volume.
|
||||
:param existing_ref: Reference to the existing volume to be managed.
|
||||
"""
|
||||
existing_vol_name = self._get_existing_vol_name(existing_ref)
|
||||
try:
|
||||
vol_props = self.zfssa.get_volume(existing_vol_name)
|
||||
except exception.VolumeNotFound:
|
||||
err_msg = (_("Volume %s doesn't exist on the ZFSSA backend.") %
|
||||
existing_vol_name)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
self._verify_volume_to_manage(existing_vol_name, vol_props)
|
||||
|
||||
try:
|
||||
self.zfssa.rename_volume(existing_vol_name, volume['name'])
|
||||
except Exception:
|
||||
LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. "
|
||||
"Volume manage failed."),
|
||||
{'existing': existing_vol_name,
|
||||
'new': volume['name']})
|
||||
raise
|
||||
|
||||
try:
|
||||
self.zfssa.set_file_props(volume['name'],
|
||||
{'cinder_managed': 'True'})
|
||||
except Exception:
|
||||
self.zfssa.rename_volume(volume['name'], existing_vol_name)
|
||||
LOG.error(_LE("Failed to set properties for volume %(existing)s. "
|
||||
"Volume manage failed."),
|
||||
{'existing': volume['name']})
|
||||
raise
|
||||
|
||||
return {'provider_location': self.mount_path}
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Return size of the volume to be managed by manage_existing."""
|
||||
existing_vol_name = self._get_existing_vol_name(existing_ref)
|
||||
|
||||
# The ZFSSA NFS driver only has one mounted share.
|
||||
local_share_mount = self._get_mount_point_for_share(
|
||||
self._mounted_shares[0])
|
||||
local_vol_path = os.path.join(local_share_mount, existing_vol_name)
|
||||
|
||||
try:
|
||||
if os.path.isfile(local_vol_path):
|
||||
size = int(math.ceil(float(
|
||||
utils.get_file_size(local_vol_path)) / units.Gi))
|
||||
except (OSError, ValueError):
|
||||
err_msg = (_("Failed to get size of existing volume: %(vol). "
|
||||
"Volume Manage failed."), {'vol': existing_vol_name})
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
LOG.debug("Size volume: %(vol)s to be migrated is: %(size)s.",
|
||||
{'vol': existing_vol_name, 'size': size})
|
||||
|
||||
return size
|
||||
|
||||
def _verify_volume_to_manage(self, name, vol_props):
|
||||
lcfg = self.configuration
|
||||
|
||||
if lcfg.zfssa_manage_policy != 'strict':
|
||||
return
|
||||
|
||||
if vol_props['cinder_managed'] == "":
|
||||
err_msg = (_("Unknown if the volume: %s to be managed is "
|
||||
"already being managed by Cinder. Aborting manage "
|
||||
"volume. Please add 'cinder_managed' custom schema "
|
||||
"property to the volume and set its value to False. "
|
||||
"Alternatively, Set the value of cinder config "
|
||||
"policy 'zfssa_manage_policy' to 'loose' to "
|
||||
"remove this restriction.") % name)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
if vol_props['cinder_managed'] == 'True':
|
||||
msg = (_("Volume: %s is already being managed by Cinder.") % name)
|
||||
LOG.error(msg)
|
||||
raise exception.ManageExistingAlreadyManaged(volume_ref=name)
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Remove an existing volume from cinder management.
|
||||
|
||||
:param volume: Reference to the volume to be unmanaged.
|
||||
"""
|
||||
new_name = 'unmanaged-' + volume['name']
|
||||
try:
|
||||
self.zfssa.rename_volume(volume['name'], new_name)
|
||||
except Exception:
|
||||
LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. "
|
||||
"Volume unmanage failed."),
|
||||
{'existing': volume['name'],
|
||||
'new': new_name})
|
||||
raise
|
||||
|
||||
try:
|
||||
self.zfssa.set_file_props(new_name, {'cinder_managed': 'False'})
|
||||
except Exception:
|
||||
self.zfssa.rename_volume(new_name, volume['name'])
|
||||
LOG.error(_LE("Failed to set properties for volume %(existing)s. "
|
||||
"Volume unmanage failed."),
|
||||
{'existing': volume['name']})
|
||||
raise
|
||||
|
||||
def _get_existing_vol_name(self, existing_ref):
|
||||
if 'source-name' not in existing_ref:
|
||||
msg = _("Reference to volume to be managed must contain "
|
||||
"source-name.")
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
return existing_ref['source-name']
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -747,6 +747,7 @@ class ZFSSAApi(object):
|
||||
|
||||
val = json.loads(ret.data)
|
||||
ret = {
|
||||
'name': val['lun']['name'],
|
||||
'guid': val['lun']['lunguid'],
|
||||
'number': val['lun']['assignednumber'],
|
||||
'initiatorgroup': val['lun']['initiatorgroup'],
|
||||
@ -759,6 +760,8 @@ class ZFSSAApi(object):
|
||||
if 'custom:image_id' in val['lun']:
|
||||
ret.update({'image_id': val['lun']['custom:image_id']})
|
||||
ret.update({'updated_at': val['lun']['custom:updated_at']})
|
||||
if 'custom:cinder_managed' in val['lun']:
|
||||
ret.update({'cinder_managed': val['lun']['custom:cinder_managed']})
|
||||
|
||||
return ret
|
||||
|
||||
@ -923,6 +926,9 @@ class ZFSSAApi(object):
|
||||
if kargs is None:
|
||||
return
|
||||
|
||||
if 'schema' in kargs:
|
||||
kargs.update(kargs.pop('schema'))
|
||||
|
||||
ret = self.rclient.put(svc, kargs)
|
||||
if ret.status != restclient.Status.ACCEPTED:
|
||||
exception_msg = (_('Error Setting props '
|
||||
@ -1251,6 +1257,7 @@ class ZFSSANfsApi(ZFSSAApi):
|
||||
'updated_at': self._parse_prop(resp, 'updated_at'),
|
||||
'image_id': self._parse_prop(resp, 'image_id'),
|
||||
'origin': self._parse_prop(resp, 'origin'),
|
||||
'cinder_managed': self._parse_prop(resp, 'cinder_managed'),
|
||||
}
|
||||
return result
|
||||
|
||||
@ -1288,3 +1295,7 @@ class ZFSSANfsApi(ZFSSAApi):
|
||||
except Exception:
|
||||
exception_msg = (_('Cannot create directory %s.'), dirname)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def rename_volume(self, src, dst):
|
||||
return self.webdavclient.request(src_file=src, dst_file=dst,
|
||||
method='MOVE')
|
||||
|
@ -0,0 +1,2 @@
|
||||
features:
|
||||
- Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers.
|
Loading…
Reference in New Issue
Block a user