Merge "Implement retype in IBM GPFS Driver and refactor"

This commit is contained in:
Jenkins 2014-02-25 06:59:11 +00:00 committed by Gerrit Code Review
commit 91431b724f
6 changed files with 685 additions and 184 deletions

View File

@ -31,6 +31,7 @@ XIV_DS8K_MODULE = "cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver"
NETAPP_MODULE = "cinder.volume.drivers.netapp.common.Deprecated"
LEFTHAND_REST_MODULE = ("cinder.volume.drivers.san.hp.hp_lefthand_iscsi."
"HPLeftHandISCSIDriver")
GPFS_MODULE = "cinder.volume.drivers.ibm.gpfs.GPFSDriver"
class VolumeDriverCompatibility(test.TestCase):
@ -135,3 +136,11 @@ class VolumeDriverCompatibility(test.TestCase):
def test_hp_lefthand_rest_new(self):
self._load_driver(LEFTHAND_REST_MODULE)
self.assertEqual(self._driver_module_name(), LEFTHAND_REST_MODULE)
def test_gpfs_old(self):
self._load_driver('cinder.volume.drivers.gpfs.GPFSDriver')
self.assertEqual(self._driver_module_name(), GPFS_MODULE)
def test_gpfs_new(self):
self._load_driver(GPFS_MODULE)
self.assertEqual(self._driver_module_name(), GPFS_MODULE)

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import mox as mox_lib
import mock
import os
import tempfile
@ -24,7 +24,6 @@ from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import imageutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import test
@ -32,7 +31,8 @@ from cinder.tests import utils as test_utils
from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.gpfs import GPFSDriver
from cinder.volume.drivers.ibm import gpfs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@ -82,32 +82,32 @@ class GPFSDriverTestCase(test.TestCase):
os.mkdir(self.images_dir)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.driver = GPFSDriver(configuration=conf.Configuration(None))
self.driver = gpfs.GPFSDriver(configuration=conf.Configuration(None))
self.driver.set_execute(self._execute_wrapper)
self.driver._cluster_id = '123456'
self.driver._gpfs_device = '/dev/gpfs'
self.driver._storage_pool = 'system'
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path)
self.volume = importutils.import_object(CONF.volume_manager)
self.volume.driver.set_execute(self._execute_wrapper)
self.volume.driver.set_initialized()
self.volume.stats = dict(allocated_capacity_gb=0)
self.stubs.Set(GPFSDriver, '_create_gpfs_snap',
self.stubs.Set(gpfs.GPFSDriver, '_create_gpfs_snap',
self._fake_gpfs_snap)
self.stubs.Set(GPFSDriver, '_create_gpfs_copy',
self.stubs.Set(gpfs.GPFSDriver, '_create_gpfs_copy',
self._fake_gpfs_copy)
self.stubs.Set(GPFSDriver, '_gpfs_redirect',
self.stubs.Set(gpfs.GPFSDriver, '_gpfs_redirect',
self._fake_gpfs_redirect)
self.stubs.Set(GPFSDriver, '_is_gpfs_parent_file',
self.stubs.Set(gpfs.GPFSDriver, '_is_gpfs_parent_file',
self._fake_is_gpfs_parent)
self.stubs.Set(GPFSDriver, '_is_gpfs_path',
self.stubs.Set(gpfs.GPFSDriver, '_is_gpfs_path',
self._fake_is_gpfs_path)
self.stubs.Set(GPFSDriver, '_delete_gpfs_file',
self.stubs.Set(gpfs.GPFSDriver, '_delete_gpfs_file',
self._fake_delete_gpfs_file)
self.stubs.Set(GPFSDriver, '_create_sparse_file',
self.stubs.Set(gpfs.GPFSDriver, '_create_sparse_file',
self._fake_create_sparse_file)
self.stubs.Set(GPFSDriver, '_allocate_file_blocks',
self.stubs.Set(gpfs.GPFSDriver, '_allocate_file_blocks',
self._fake_allocate_file_blocks)
self.stubs.Set(GPFSDriver, '_get_available_capacity',
self.stubs.Set(gpfs.GPFSDriver, '_get_available_capacity',
self._fake_get_available_capacity)
self.stubs.Set(image_utils, 'qemu_img_info',
self._fake_qemu_qcow2_image_info)
@ -135,26 +135,25 @@ class GPFSDriverTestCase(test.TestCase):
vol = test_utils.create_volume(self.context, host=CONF.host)
volume_id = vol['id']
self.assertTrue(os.path.exists(self.volumes_path))
self.volume.create_volume(self.context, volume_id)
self.driver.create_volume(vol)
path = self.volumes_path + '/' + vol['name']
self.assertTrue(os.path.exists(path))
self.volume.delete_volume(self.context, volume_id)
self.driver.delete_volume(vol)
self.assertFalse(os.path.exists(path))
def test_create_delete_volume_sparse_backing_file(self):
"""Create and delete vol with default sparse creation method."""
CONF.gpfs_sparse_volumes = True
vol = test_utils.create_volume(self.context, host=CONF.host)
volume_id = vol['id']
self.assertTrue(os.path.exists(self.volumes_path))
self.volume.create_volume(self.context, volume_id)
self.driver.create_volume(vol)
path = self.volumes_path + '/' + vol['name']
self.assertTrue(os.path.exists(path))
self.volume.delete_volume(self.context, volume_id)
self.driver.delete_volume(vol)
self.assertFalse(os.path.exists(path))
def test_create_volume_with_attributes(self):
self.stubs.Set(GPFSDriver, '_gpfs_change_attributes',
self.stubs.Set(gpfs.GPFSDriver, '_gpfs_change_attributes',
self._fake_gpfs_change_attributes)
attributes = {'dio': 'yes', 'data_pool_name': 'ssd_pool',
'replicas': '2', 'write_affinity_depth': '1',
@ -163,23 +162,220 @@ class GPFSDriverTestCase(test.TestCase):
'1,1,1:2;2,1,1:2;2,0,3:4'}
vol = test_utils.create_volume(self.context, host=CONF.host,
metadata=attributes)
volume_id = vol['id']
self.assertTrue(os.path.exists(self.volumes_path))
self.volume.create_volume(self.context, volume_id)
self.driver.create_volume(vol)
path = self.volumes_path + '/' + vol['name']
self.assertTrue(os.path.exists(path))
self.volume.delete_volume(self.context, volume_id)
self.driver.delete_volume(vol)
self.assertFalse(os.path.exists(path))
def test_migrate_volume(self):
"""Test volume migration done by driver."""
loc = 'GPFSDriver:cindertest:openstack'
def test_migrate_volume_local(self):
"""Verify volume migration performed locally by driver."""
ctxt = self.context
migrated_by_driver = True
volume = test_utils.create_volume(ctxt, host=CONF.host)
with mock.patch('cinder.utils.execute'):
LOG.debug('Migrate same cluster, different path, '
'move file to new path.')
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
migr, updt = self.driver.migrate_volume(ctxt, volume, host)
self.assertEqual(migr, migrated_by_driver)
self.driver.delete_volume(volume)
LOG.debug('Migrate same cluster, different path, '
'move file to new path, rv = %s.' % migr)
LOG.debug('Migrate same cluster, same path, no action taken.')
gpfs_base = self.driver.configuration.gpfs_mount_point_base
loc = 'GPFSDriver:%s:%s' % (self.driver._cluster_id, gpfs_base)
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
migr, updt = self.driver.migrate_volume(ctxt, volume, host)
self.assertEqual(migr, migrated_by_driver)
self.driver.delete_volume(volume)
LOG.debug('Migrate same cluster, same path, no action taken, '
'rv = %s' % migr)
def test_migrate_volume_generic(self):
"""Verify cases where driver cannot perform migration locally."""
ctxt = self.context
migrated_by_driver = False
volume = test_utils.create_volume(ctxt, host=CONF.host)
with mock.patch('cinder.utils.execute'):
LOG.debug('Migrate request for different cluster, return false '
'for generic migration.')
other_cluster_id = '000000'
loc = 'GPFSDriver:%s:testpath' % other_cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
migr, updt = self.driver.migrate_volume(ctxt, volume, host)
self.assertEqual(migr, migrated_by_driver)
self.driver.delete_volume(volume)
LOG.debug('Migrate request for different cluster, rv = %s.' % migr)
LOG.debug('Migrate request with no location info, return false '
'for generic migration.')
host = {'host': 'foo', 'capabilities': {}}
self.driver.create_volume(volume)
migr, updt = self.driver.migrate_volume(ctxt, volume, host)
self.assertEqual(migr, migrated_by_driver)
self.driver.delete_volume(volume)
LOG.debug('Migrate request with no location info, rv = %s.' % migr)
LOG.debug('Migrate request with bad location info, return false '
'for generic migration.')
bad_loc = 'GPFSDriver:testpath'
cap = {'location_info': bad_loc}
host = {'host': 'foo', 'capabilities': cap}
self.driver.create_volume(volume)
migr, updt = self.driver.migrate_volume(ctxt, volume, host)
self.assertEqual(migr, migrated_by_driver)
self.driver.delete_volume(volume)
LOG.debug('Migrate request with bad location info, rv = %s.' %
migr)
def test_retype_volume_different_pool(self):
ctxt = self.context
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
volume = test_utils.create_volume(self.context, host=CONF.host)
self.driver.create_volume(volume)
self.driver.migrate_volume(self.context, volume, host)
self.driver.delete_volume(volume)
key_specs_old = {'capabilities:storage_pool': 'bronze',
'volume_backend_name': 'backend1'}
key_specs_new = {'capabilities:storage_pool': 'gold',
'volume_backend_name': 'backend1'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
diff, equal = volume_types.volume_types_diff(ctxt,
old_type_ref['id'],
new_type_ref['id'])
# set volume host to match target host
volume = test_utils.create_volume(ctxt, host=host['host'])
volume['volume_type_id'] = old_type['id']
with mock.patch('cinder.utils.execute'):
LOG.debug('Retype different pools, expected rv = True.')
self.driver.create_volume(volume)
rv = self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertTrue(rv)
self.driver.delete_volume(volume)
LOG.debug('Retype different pools, rv = %s.' % rv)
def test_retype_volume_different_host(self):
ctxt = self.context
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
newloc = 'GPFSDriver:000000:testpath'
newcap = {'location_info': newloc}
newhost = {'host': 'foo', 'capabilities': newcap}
key_specs_old = {'capabilities:storage_pool': 'bronze',
'volume_backend_name': 'backend1'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
diff, equal = volume_types.volume_types_diff(ctxt,
old_type_ref['id'],
old_type_ref['id'])
# set volume host to be different from target host
volume = test_utils.create_volume(ctxt, host=CONF.host)
volume['volume_type_id'] = old_type['id']
with mock.patch('cinder.utils.execute'):
LOG.debug('Retype different hosts same cluster, '
'expected rv = True.')
self.driver.db = mock.Mock()
self.driver.create_volume(volume)
rv = self.driver.retype(ctxt, volume, old_type, diff, host)
self.assertTrue(rv)
self.driver.delete_volume(volume)
LOG.debug('Retype different hosts same cluster, rv = %s.' % rv)
LOG.debug('Retype different hosts, different cluster, '
'cannot migrate. Expected rv = False.')
self.driver.create_volume(volume)
rv = self.driver.retype(ctxt, volume, old_type, diff, newhost)
self.assertFalse(rv)
self.driver.delete_volume(volume)
LOG.debug('Retype different hosts, different cluster, '
'cannot migrate, rv = %s.' % rv)
def test_retype_volume_different_pool_and_host(self):
ctxt = self.context
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
key_specs_old = {'capabilities:storage_pool': 'bronze',
'volume_backend_name': 'backend1'}
key_specs_new = {'capabilities:storage_pool': 'gold',
'volume_backend_name': 'backend1'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
diff, equal = volume_types.volume_types_diff(ctxt,
old_type_ref['id'],
new_type_ref['id'])
# set volume host to be different from target host
volume = test_utils.create_volume(ctxt, host=CONF.host)
volume['volume_type_id'] = old_type['id']
with mock.patch('cinder.utils.execute'):
# different host different pool
LOG.debug('Retype different pools and hosts, expected rv = True.')
self.driver.db = mock.Mock()
self.driver.create_volume(volume)
rv = self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertTrue(rv)
self.driver.delete_volume(volume)
LOG.debug('Retype different pools and hosts, rv = %s.' % rv)
def test_retype_volume_different_backend(self):
ctxt = self.context
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
key_specs_old = {'capabilities:storage_pool': 'bronze',
'volume_backend_name': 'backend1'}
key_specs_new = {'capabilities:storage_pool': 'gold',
'volume_backend_name': 'backend2'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
diff, equal = volume_types.volume_types_diff(ctxt,
old_type_ref['id'],
new_type_ref['id'])
# set volume host to match target host
volume = test_utils.create_volume(ctxt, host=host['host'])
volume['volume_type_id'] = old_type['id']
with mock.patch('cinder.utils.execute'):
LOG.debug('Retype different backends, cannot migrate. '
'Expected rv = False.')
self.driver.create_volume(volume)
rv = self.driver.retype(ctxt, volume, old_type, diff, host)
self.assertFalse(rv)
self.driver.delete_volume(volume)
LOG.debug('Retype different backends, cannot migrate, '
'rv = %s.' % rv)
def _create_snapshot(self, volume_id, size='0'):
"""Create a snapshot object."""
@ -193,53 +389,43 @@ class GPFSDriverTestCase(test.TestCase):
def test_create_delete_snapshot(self):
volume_src = test_utils.create_volume(self.context, host=CONF.host)
self.volume.create_volume(self.context, volume_src['id'])
self.driver.create_volume(volume_src)
snapCount = len(db.snapshot_get_all_for_volume(self.context,
volume_src['id']))
self.assertEqual(snapCount, 0)
snapshot = self._create_snapshot(volume_src['id'])
snapshot_id = snapshot['id']
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
self.driver.create_snapshot(snapshot)
self.assertTrue(os.path.exists(os.path.join(self.volumes_path,
snapshot['name'])))
snapCount = len(db.snapshot_get_all_for_volume(self.context,
volume_src['id']))
self.assertEqual(snapCount, 1)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
self.driver.delete_snapshot(snapshot)
self.driver.delete_volume(volume_src)
self.assertFalse(os.path.exists(os.path.join(self.volumes_path,
snapshot['name'])))
snapCount = len(db.snapshot_get_all_for_volume(self.context,
volume_src['id']))
self.assertEqual(snapCount, 0)
def test_create_volume_from_snapshot(self):
volume_src = test_utils.create_volume(self.context, host=CONF.host)
self.volume.create_volume(self.context, volume_src['id'])
self.driver.create_volume(volume_src)
snapshot = self._create_snapshot(volume_src['id'])
snapshot_id = snapshot['id']
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
self.driver.create_snapshot(snapshot)
self.assertTrue(os.path.exists(os.path.join(self.volumes_path,
snapshot['name'])))
volume_dst = test_utils.create_volume(self.context, host=CONF.host,
snapshot_id=snapshot_id)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.driver.create_volume_from_snapshot(volume_dst, snapshot)
self.assertEqual(volume_dst['id'], db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id, db.volume_get(
context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.driver.delete_volume(volume_dst)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
self.driver.delete_snapshot(snapshot)
self.driver.delete_volume(volume_src)
def test_create_cloned_volume(self):
volume_src = test_utils.create_volume(self.context, host=CONF.host)
self.volume.create_volume(self.context, volume_src['id'])
self.driver.create_volume(volume_src)
volume_dst = test_utils.create_volume(self.context, host=CONF.host)
volumepath = os.path.join(self.volumes_path, volume_dst['name'])
@ -251,18 +437,13 @@ class GPFSDriverTestCase(test.TestCase):
volume_dst['id']).id)
self.assertTrue(os.path.exists(volumepath))
self.volume.delete_volume(self.context, volume_src['id'])
self.volume.delete_volume(self.context, volume_dst['id'])
self.driver.delete_volume(volume_src)
self.driver.delete_volume(volume_dst)
def test_create_volume_from_snapshot_method(self):
volume_src = test_utils.create_volume(self.context, host=CONF.host)
self.volume.create_volume(self.context, volume_src['id'])
snapshot = self._create_snapshot(volume_src['id'])
snapshot_id = snapshot['id']
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = test_utils.create_volume(self.context, host=CONF.host)
self.driver.create_volume_from_snapshot(volume_dst, snapshot)
self.assertEqual(volume_dst['id'], db.volume_get(
@ -271,10 +452,7 @@ class GPFSDriverTestCase(test.TestCase):
volumepath = os.path.join(self.volumes_path, volume_dst['name'])
self.assertTrue(os.path.exists(volumepath))
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
self.driver.delete_volume(volume_dst)
def test_clone_image_to_volume_with_copy_on_write_mode(self):
"""Test the function of copy_image_to_volume
@ -295,7 +473,7 @@ class GPFSDriverTestCase(test.TestCase):
{})
self.assertTrue(os.path.exists(volumepath))
self.volume.delete_volume(self.context, volume['id'])
self.driver.delete_volume(volume)
self.assertFalse(os.path.exists(volumepath))
def test_clone_image_to_volume_with_copy_mode(self):
@ -317,7 +495,6 @@ class GPFSDriverTestCase(test.TestCase):
{})
self.assertTrue(os.path.exists(volumepath))
self.volume.delete_volume(self.context, volume['id'])
def test_copy_image_to_volume_with_non_gpfs_image_dir(self):
"""Test the function of copy_image_to_volume
@ -338,7 +515,6 @@ class GPFSDriverTestCase(test.TestCase):
FakeImageService(),
self.image_id)
self.assertTrue(os.path.exists(volumepath))
self.volume.delete_volume(self.context, volume['id'])
def test_copy_image_to_volume_with_illegal_image_format(self):
"""Test the function of copy_image_to_volume
@ -359,8 +535,6 @@ class GPFSDriverTestCase(test.TestCase):
FakeImageService(),
self.image_id)
self.volume.delete_volume(self.context, volume['id'])
def test_get_volume_stats(self):
stats = self.driver.get_volume_stats()
self.assertEqual(stats['volume_backend_name'], 'GPFS')
@ -368,7 +542,27 @@ class GPFSDriverTestCase(test.TestCase):
def test_extend_volume(self):
new_vol_size = 15
mox = mox_lib.Mox()
volume = test_utils.create_volume(self.context, host=CONF.host)
with mock.patch('cinder.image.image_utils.resize_image'):
with mock.patch('cinder.image.image_utils.qemu_img_info'):
self.driver.extend_volume(volume, new_vol_size)
def test_extend_volume_with_failure(self):
new_vol_size = 15
volume = test_utils.create_volume(self.context, host=CONF.host)
volpath = os.path.join(self.volumes_path, volume['name'])
with mock.patch('cinder.image.image_utils.resize_image') as resize:
with mock.patch('cinder.image.image_utils.qemu_img_info'):
resize.side_effect = processutils.ProcessExecutionError('err')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
volume,
new_vol_size)
def test_resize_volume(self):
new_vol_size = 15
new_vol_size_bytes = new_vol_size * units.GiB
volume = test_utils.create_volume(self.context, host=CONF.host)
volpath = os.path.join(self.volumes_path, volume['name'])
@ -376,74 +570,56 @@ class GPFSDriverTestCase(test.TestCase):
file format: raw
virtual size: %sG (%s bytes)
backing file: %s
""" % (volume['name'], new_vol_size, new_vol_size * units.GiB, volpath)
mox.StubOutWithMock(image_utils, 'resize_image')
image_utils.resize_image(volpath, new_vol_size, run_as_root=True)
mox.StubOutWithMock(image_utils, 'qemu_img_info')
""" % (volume['name'], new_vol_size, new_vol_size_bytes, volpath)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
image_utils.qemu_img_info(volpath).AndReturn(img_info)
mox.ReplayAll()
self.driver.extend_volume(volume, new_vol_size)
mox.VerifyAll()
def test_extend_volume_with_failure(self):
new_vol_size = 15
mox = mox_lib.Mox()
volume = test_utils.create_volume(self.context, host=CONF.host)
volpath = os.path.join(self.volumes_path, volume['name'])
mox.StubOutWithMock(image_utils, 'resize_image')
image_utils.resize_image(volpath, new_vol_size, run_as_root=True).\
AndRaise(processutils.ProcessExecutionError('error'))
mox.ReplayAll()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, volume, new_vol_size)
mox.VerifyAll()
with mock.patch('cinder.image.image_utils.resize_image'):
with mock.patch('cinder.image.image_utils.qemu_img_info') as info:
info.return_value = img_info
rv = self.driver._resize_volume_file(volume, new_vol_size)
self.assertEqual(rv, new_vol_size_bytes)
def test_check_for_setup_error_ok(self):
self.stubs.Set(GPFSDriver, '_get_gpfs_state',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_state',
self._fake_gpfs_get_state_active)
self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_cluster_release_level',
self._fake_gpfs_compatible_cluster_release_level)
self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_fs_release_level',
self._fake_gpfs_compatible_filesystem_release_level)
self.driver.check_for_setup_error()
def test_check_for_setup_error_gpfs_not_active(self):
self.stubs.Set(GPFSDriver, '_get_gpfs_state',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_state',
self._fake_gpfs_get_state_not_active)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
def test_check_for_setup_error_not_gpfs_path(self):
self.stubs.Set(GPFSDriver, '_get_gpfs_state',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_state',
self._fake_gpfs_get_state_active)
self.stubs.Set(GPFSDriver, '_is_gpfs_path',
self.stubs.Set(gpfs.GPFSDriver, '_is_gpfs_path',
self._fake_is_not_gpfs_path)
self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_cluster_release_level',
self._fake_gpfs_compatible_cluster_release_level)
self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_fs_release_level',
self._fake_gpfs_compatible_filesystem_release_level)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
def test_check_for_setup_error_incompatible_cluster_version(self):
self.stubs.Set(GPFSDriver, '_get_gpfs_state',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_state',
self._fake_gpfs_get_state_active)
self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_cluster_release_level',
self._fake_gpfs_incompatible_cluster_release_level)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
def test_check_for_setup_error_incompatible_filesystem_version(self):
self.stubs.Set(GPFSDriver, '_get_gpfs_state',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_state',
self._fake_gpfs_get_state_active)
self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_cluster_release_level',
self._fake_gpfs_compatible_cluster_release_level)
self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level',
self.stubs.Set(gpfs.GPFSDriver, '_get_gpfs_fs_release_level',
self._fake_gpfs_incompatible_filesystem_release_level)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)

View File

@ -35,6 +35,7 @@ GPFS_CLONE_MIN_RELEASE = 1200
LOG = logging.getLogger(__name__)
gpfs_opts = [
cfg.StrOpt('gpfs_mount_point_base',
default=None,
@ -69,67 +70,218 @@ gpfs_opts = [
'which initially consume no space. If set to False, the '
'volume is created as a fully allocated file, in which '
'case, creation may take a significantly longer time.')),
cfg.StrOpt('gpfs_storage_pool',
default=None,
help=('Specifies the storage pool that volumes are assigned '
'to. By default, the system storage pool is used.')),
]
CONF = cfg.CONF
CONF.register_opts(gpfs_opts)
def _different(difference_tuple):
"""Return true if two elements of a tuple are different."""
if difference_tuple:
member1, member2 = difference_tuple
return member1 != member2
else:
return False
def _same_filesystem(path1, path2):
"""Return true if the two paths are in the same GPFS file system."""
return os.lstat(path1).st_dev == os.lstat(path2).st_dev
def _sizestr(size_in_g):
"""Convert the specified size into a string value."""
if int(size_in_g) == 0:
# return 100M size on zero input for testing
return '100M'
return '%sG' % size_in_g
class GPFSDriver(driver.VolumeDriver):
"""Implements volume functions using GPFS primitives.
"""Implements volume functions using GPFS primitives."""
Version history:
1.0.0 - Initial driver
1.1.0 - Add volume retype, refactor volume migration
"""
VERSION = "1.0.0"
VERSION = "1.1.0"
def __init__(self, *args, **kwargs):
super(GPFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(gpfs_opts)
def _get_gpfs_state(self):
(out, _) = self._execute('mmgetstate', '-Y', run_as_root=True)
return out
"""Return GPFS state information."""
try:
(out, _) = self._execute('mmgetstate', '-Y', run_as_root=True)
return out
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmgetstate command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _check_gpfs_state(self):
"""Raise VolumeBackendAPIException if GPFS is not active."""
out = self._get_gpfs_state()
lines = out.splitlines()
state_token = lines[0].split(':').index('state')
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
LOG.error(_('GPFS is not active. Detailed output: %s') % out)
exception_message = (_("GPFS is not running - state: %s") %
LOG.error(_('GPFS is not active. Detailed output: %s.') % out)
exception_message = (_('GPFS is not running, state: %s.') %
gpfs_state)
raise exception.VolumeBackendAPIException(data=exception_message)
def _get_filesystem_from_path(self, path):
(out, _) = self._execute('df', path, run_as_root=True)
lines = out.splitlines()
fs = lines[1].split()[0]
return fs
"""Return filesystem for specified path."""
try:
(out, _) = self._execute('df', path, run_as_root=True)
lines = out.splitlines()
filesystem = lines[1].split()[0]
return filesystem
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue df command for path %(path)s, '
'error: %(error)s.') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_gpfs_cluster_id(self):
"""Return the id for GPFS cluster being used."""
try:
(out, _) = self._execute('mmlsconfig', 'clusterId', '-Y',
run_as_root=True)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
cluster_id = lines[1].split(':')[value_token]
return cluster_id
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsconfig command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_fileset_from_path(self, path):
"""Return the GPFS fileset for specified path."""
fs_regex = re.compile(r'.*fileset.name:\s+(?P<fileset>\w+)', re.S)
try:
(out, _) = self._execute('mmlsattr', '-L', path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsattr command on path %(path)s, '
'error: %(error)') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
try:
fileset = fs_regex.match(out).group('fileset')
return fileset
except AttributeError as exc:
LOG.error(_('Failed to find fileset for path %(path)s, error: '
'%(error)s.') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _verify_gpfs_pool(self, storage_pool):
"""Return true if the specified pool is a valid GPFS storage pool."""
try:
self._execute('mmlspool', self._gpfs_device, storage_pool,
run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _update_volume_storage_pool(self, local_path, new_pool):
"""Set the storage pool for a volume to the specified value."""
if new_pool is None:
new_pool = 'system'
if not self._verify_gpfs_pool(new_pool):
msg = (_('Invalid storage pool %s requested. Retype failed.') %
new_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('mmchattr', '-P', new_pool, local_path,
run_as_root=True)
LOG.debug('Updated storage pool with mmchattr to %s.' % new_pool)
return True
except processutils.ProcessExecutionError as exc:
LOG.info('Could not update storage pool with mmchattr to '
'%(pool)s, error: %(error)s' %
{'pool': new_pool,
'error': exc.stderr})
return False
def _get_gpfs_fs_release_level(self, path):
"""Return the GPFS version of the specified file system.
The file system is specified by any valid path it contains.
"""
filesystem = self._get_filesystem_from_path(path)
try:
(out, _) = self._execute('mmlsfs', filesystem, '-V', '-Y',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsfs command for path %(path), '
'error: %(error)s.') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_gpfs_filesystem_release_level(self, path):
fs = self._get_filesystem_from_path(path)
(out, _) = self._execute('mmlsfs', fs, '-V', '-Y',
run_as_root=True)
lines = out.splitlines()
value_token = lines[0].split(':').index('data')
fs_release_level_str = lines[1].split(':')[value_token]
# at this point, release string looks like "13.23 (3.5.0.7)"
# extract first token and convert to whole number value
fs_release_level = int(float(fs_release_level_str.split()[0]) * 100)
return fs, fs_release_level
return filesystem, fs_release_level
def _get_gpfs_cluster_release_level(self):
(out, _) = self._execute('mmlsconfig', 'minreleaseLeveldaemon', '-Y',
run_as_root=True)
"""Return the GPFS version of current cluster."""
try:
(out, _) = self._execute('mmlsconfig', 'minreleaseLeveldaemon',
'-Y', run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsconfig command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
min_release_level = lines[1].split(':')[value_token]
return int(min_release_level)
def _is_gpfs_path(self, directory):
self._execute('mmlsattr', directory, run_as_root=True)
"""Determine if the specified path is in a gpfs file system.
def _is_samefs(self, p1, p2):
if os.lstat(p1).st_dev == os.lstat(p2).st_dev:
If not part of a gpfs file system, raise ProcessExecutionError.
"""
try:
self._execute('mmlsattr', directory, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsattr command for path %(path), '
'error: %(error)s.') %
{'path': directory,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _is_same_fileset(self, path1, path2):
"""Return true if the two paths are in the same GPFS fileset."""
if self._get_fileset_from_path(path1) == \
self._get_fileset_from_path(path2):
return True
return False
def _same_cluster(self, host):
"""Return true if the host is a member of the same GPFS cluster."""
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] == dest_location:
return True
return False
@ -137,36 +289,94 @@ class GPFSDriver(driver.VolumeDriver):
"""Set permission bits for the path."""
self._execute('chmod', modebits, path, run_as_root=True)
def _can_migrate_locally(self, host):
"""Return true if the host can migrate a volume locally."""
if 'location_info' not in host['capabilities']:
LOG.debug('Evaluate migration: no location info, '
'cannot migrate locally.')
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_path) = info.split(':')
except ValueError:
LOG.debug('Evaluate migration: unexpected location info, '
'cannot migrate locally: %s.' % info)
return None
if dest_type != 'GPFSDriver' or dest_id != self._cluster_id:
LOG.debug('Evaluate migration: different destination driver or '
'cluster id in location info: %s.' % info)
return None
LOG.debug('Evaluate migration: use local migration.')
return dest_path
def do_setup(self, ctxt):
"""Determine storage back end capabilities."""
try:
self._cluster_id = self._get_gpfs_cluster_id()
except Exception as setup_exception:
msg = (_('Could not find GPFS cluster id: %s.') %
str(setup_exception))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
gpfs_base = self.configuration.gpfs_mount_point_base
self._gpfs_device = self._get_filesystem_from_path(gpfs_base)
except Exception as setup_exception:
msg = (_('Could not find GPFS file system device: %s.') %
str(setup_exception))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = self.configuration.safe_get('gpfs_storage_pool')
self._storage_pool = pool or 'system'
if not self._verify_gpfs_pool(self._storage_pool):
msg = (_('Invalid storage pool %s specificed.') %
self._storage_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_gpfs_state()
if(self.configuration.gpfs_mount_point_base is None):
if self.configuration.gpfs_mount_point_base is None:
msg = _('Option gpfs_mount_point_base is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_share_mode not in ['copy_on_write',
'copy']):
if (self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_share_mode not in ['copy_on_write',
'copy']):
msg = _('Option gpfs_images_share_mode is not set correctly.')
LOG.warn(msg)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_dir is None):
msg = _('Option gpfs_images_dir is not set correctly.')
LOG.warn(msg)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not self._is_samefs(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
not _same_filesystem(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different file systems') %
'%(vol)s and %(img)s belong to different file '
'systems.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.warn(msg)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not self._is_same_fileset(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different filesets.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
_gpfs_cluster_release_level = self._get_gpfs_cluster_release_level()
@ -197,12 +407,13 @@ class GPFSDriver(driver.VolumeDriver):
# Check if GPFS is mounted
self._verify_gpfs_path_state(directory)
fs, fslevel = self._get_gpfs_filesystem_release_level(directory)
filesystem, fslevel = \
self._get_gpfs_fs_release_level(directory)
if not fslevel >= GPFS_CLONE_MIN_RELEASE:
msg = (_('The GPFS filesystem %(fs)s is not at the required '
'release level. Current level is %(cur)s, must be '
'at least %(min)s.') %
{'fs': fs,
{'fs': filesystem,
'cur': fslevel,
'min': GPFS_CLONE_MIN_RELEASE})
LOG.error(msg)
@ -211,7 +422,7 @@ class GPFSDriver(driver.VolumeDriver):
def _create_sparse_file(self, path, size):
"""Creates file with 0 disk usage."""
sizestr = self._sizestr(size)
sizestr = _sizestr(size)
self._execute('truncate', '-s', sizestr, path, run_as_root=True)
def _allocate_file_blocks(self, path, size):
@ -226,18 +437,23 @@ class GPFSDriver(driver.VolumeDriver):
run_as_root=True)
def _gpfs_change_attributes(self, options, path):
"""Update GPFS attributes on the specified file."""
cmd = ['mmchattr']
cmd.extend(options)
cmd.append(path)
LOG.debug('Update volume attributes with mmchattr to %s.' % options)
self._execute(*cmd, run_as_root=True)
def _set_volume_attributes(self, path, metadata):
"""Set various GPFS attributes for this volume."""
set_pool = False
options = []
for item in metadata:
if item['key'] == 'data_pool_name':
options.extend(['-P', item['value']])
set_pool = True
elif item['key'] == 'replicas':
options.extend(['-r', item['value'], '-m', item['value']])
elif item['key'] == 'dio':
@ -250,6 +466,10 @@ class GPFSDriver(driver.VolumeDriver):
options.extend(['--write-affinity-failure-group',
item['value']])
# metadata value has precedence over value set in volume type
if self.configuration.gpfs_storage_pool and not set_pool:
options.extend(['-P', self.configuration.gpfs_storage_pool])
if options:
self._gpfs_change_attributes(options, path)
@ -284,6 +504,7 @@ class GPFSDriver(driver.VolumeDriver):
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a GPFS volume from a snapshot."""
volume_path = self.local_path(volume)
snapshot_path = self.local_path(snapshot)
self._create_gpfs_copy(src=snapshot_path, dest=volume_path)
@ -293,6 +514,8 @@ class GPFSDriver(driver.VolumeDriver):
return {'size': math.ceil(virt_size / units.GiB)}
def create_cloned_volume(self, volume, src_vref):
"""Create a GPFS volume from another volume."""
src = self.local_path(src_vref)
dest = self.local_path(volume)
self._create_gpfs_clone(src, dest)
@ -301,13 +524,15 @@ class GPFSDriver(driver.VolumeDriver):
return {'size': math.ceil(virt_size / units.GiB)}
def _delete_gpfs_file(self, fchild):
"""Delete a GPFS file and cleanup clone children."""
if not os.path.exists(fchild):
return
(out, err) = self._execute('mmclone', 'show', fchild, run_as_root=True)
fparent = None
reInode = re.compile(
'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = reInode.match(out)
inode_regex = re.compile(
r'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = inode_regex.match(out)
if match:
inode = match.group('inode')
path = os.path.dirname(fchild)
@ -328,7 +553,7 @@ class GPFSDriver(driver.VolumeDriver):
# would succeed and the snapshot is deleted.
if not os.path.exists(fchild) and fparent:
fpbase = os.path.basename(fparent)
if (fpbase.endswith('.snap') or fpbase.endswith('.ts')):
if fpbase.endswith('.snap') or fpbase.endswith('.ts'):
self._delete_gpfs_file(fparent)
def delete_volume(self, volume):
@ -350,8 +575,8 @@ class GPFSDriver(driver.VolumeDriver):
if max_depth == 0:
return False
(out, err) = self._execute('mmclone', 'show', src, run_as_root=True)
reDepth = re.compile('.*\s+no\s+(?P<depth>\d+)', re.M | re.S)
match = reDepth.match(out)
depth_regex = re.compile(r'.*\s+no\s+(?P<depth>\d+)', re.M | re.S)
match = depth_regex.match(out)
if match:
depth = int(match.group('depth'))
if depth > max_depth:
@ -360,22 +585,26 @@ class GPFSDriver(driver.VolumeDriver):
return False
def _create_gpfs_clone(self, src, dest):
"""Create a GPFS file clone parent for the specified file."""
snap = dest + ".snap"
self._create_gpfs_snap(src, snap)
self._create_gpfs_copy(snap, dest)
if(self._gpfs_redirect(src) and self._gpfs_redirect(dest)):
if self._gpfs_redirect(src) and self._gpfs_redirect(dest):
self._execute('rm', '-f', snap, run_as_root=True)
def _create_gpfs_copy(self, src, dest):
"""Create a GPFS file clone copy for the specified file."""
self._execute('mmclone', 'copy', src, dest, run_as_root=True)
def _create_gpfs_snap(self, src, dest=None):
"""Create a GPFS file clone snapshot for the specified file."""
if dest is None:
self._execute('mmclone', 'snap', src, run_as_root=True)
else:
self._execute('mmclone', 'snap', src, dest, run_as_root=True)
def _is_gpfs_parent_file(self, gpfs_file):
"""Return true if the specified file is a gpfs clone parent."""
out, _ = self._execute('mmclone', 'show', gpfs_file, run_as_root=True)
ptoken = out.splitlines().pop().split()[0]
return ptoken == 'yes'
@ -403,6 +632,7 @@ class GPFSDriver(driver.VolumeDriver):
check_exit_code=False, run_as_root=True)
def local_path(self, volume):
"""Return the local path for the specified volume."""
return os.path.join(self.configuration.gpfs_mount_point_base,
volume['name'])
@ -444,7 +674,8 @@ class GPFSDriver(driver.VolumeDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
LOG.debug("Updating volume stats.")
gpfs_base = self.configuration.gpfs_mount_point_base
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'GPFS'
@ -457,17 +688,20 @@ class GPFSDriver(driver.VolumeDriver):
data['free_capacity_gb'] = math.ceil(free / units.GiB)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['storage_pool'] = self._storage_pool
data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' %
{'cluster_id': self._cluster_id,
'root_path': gpfs_base})
data['reserved_percentage'] = 0
self._stats = data
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100M'
return '%sG' % size_in_g
def clone_image(self, volume, image_location, image_id, image_meta):
"""Create a volume from the specified image."""
return self._clone_image(volume, image_location, image_id)
def _is_cloneable(self, image_id):
"""Return true if the specified image can be cloned by GPFS."""
if not((self.configuration.gpfs_images_dir and
self.configuration.gpfs_images_share_mode)):
reason = 'glance repository not configured to use GPFS'
@ -495,7 +729,7 @@ class GPFSDriver(driver.VolumeDriver):
cloneable_image, reason, image_path = self._is_cloneable(image_id)
if not cloneable_image:
LOG.debug('Image %(img)s not cloneable: %(reas)s' %
LOG.debug('Image %(img)s not cloneable: %(reas)s.' %
{'img': image_id, 'reas': reason})
return (None, False)
@ -511,17 +745,17 @@ class GPFSDriver(driver.VolumeDriver):
if data.file_format == 'raw':
if (self.configuration.gpfs_images_share_mode ==
'copy_on_write'):
LOG.debug('Clone image to vol %s using mmclone' %
LOG.debug('Clone image to vol %s using mmclone.' %
volume['id'])
self._create_gpfs_copy(image_path, vol_path)
elif self.configuration.gpfs_images_share_mode == 'copy':
LOG.debug('Clone image to vol %s using copyfile' %
LOG.debug('Clone image to vol %s using copyfile.' %
volume['id'])
shutil.copyfile(image_path, vol_path)
# if image is not raw convert it to raw into vol_path destination
else:
LOG.debug('Clone image to vol %s using qemu convert' %
LOG.debug('Clone image to vol %s using qemu convert.' %
volume['id'])
image_utils.convert_image(image_path, vol_path, 'raw')
@ -542,7 +776,7 @@ class GPFSDriver(driver.VolumeDriver):
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
LOG.debug('Copy image to vol %s using image_utils fetch_to_raw' %
LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.' %
volume['id'])
image_utils.fetch_to_raw(context, image_service, image_id,
self.local_path(volume),
@ -557,7 +791,7 @@ class GPFSDriver(driver.VolumeDriver):
image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_("Failed to resize volume "
"%(volume_id)s, error: %(error)s") %
"%(volume_id)s, error: %(error)s.") %
{'volume_id': volume['id'],
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
@ -604,16 +838,91 @@ class GPFSDriver(driver.VolumeDriver):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def _mkfs(self, volume, fs, label=None):
if fs == 'swap':
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""
LOG.debug('Migrate volume request %(vol)s to %(host)s.' %
{'vol': volume['name'],
'host': host['host']})
dest_path = self._can_migrate_locally(host)
if dest_path is None:
LOG.debug('Cannot migrate volume locally, use generic migration.')
return (False, None)
if dest_path == self.configuration.gpfs_mount_point_base:
LOG.debug('Migration target is same cluster and path, '
'no work needed.')
return (True, None)
LOG.debug('Migration target is same cluster but different path, '
'move the volume file.')
local_path = self.local_path(volume)
new_path = os.path.join(dest_path, volume['name'])
try:
self._execute('mv', local_path, new_path, run_as_root=True)
return (True, None)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Driver-based migration of volume %(vol) failed. '
'Move from %(src)s to %(dst)s failed with error: '
'%(error)s.') %
{'vol': volume['name'],
'src': local_path,
'dst': new_path,
'error': exc.stderr})
return (False, None)
def migrate_volume(self, context, volume, host):
"""Attempt to migrate a volume to specified host."""
return self._migrate_volume(volume, host)
def retype(self, context, volume, new_type, diff, host):
"""Modify volume to be of new type."""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.' %
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
retyped = False
migrated = False
pools = diff['extra_specs'].get('capabilities:storage_pool')
backends = diff['extra_specs'].get('volume_backend_name')
hosts = (volume['host'], host['host'])
# if different backends let migration create a new volume and copy
# data because the volume is considered to be substantially different
if _different(backends):
LOG.debug('Retype request is for different backends, '
'use migration: %s %s.' % backends)
return False
if _different(pools):
old, new = pools
LOG.debug('Retype pool attribute from %s to %s.' % pools)
retyped = self._update_volume_storage_pool(self.local_path(volume),
new)
if _different(hosts):
LOG.debug('Retype hosts migrate from: %s to %s.' % hosts)
migrated, mdl_update = self._migrate_volume(volume, host)
if migrated:
updates = {'host': host['host']}
self.db.volume_update(context, volume['id'], updates)
return retyped or migrated
def _mkfs(self, volume, filesystem, label=None):
"""Initialize volume to be specified filesystem type."""
if filesystem == 'swap':
cmd = ['mkswap']
else:
cmd = ['mkfs', '-t', fs]
cmd = ['mkfs', '-t', filesystem]
if fs in ('ext3', 'ext4'):
if filesystem in ('ext3', 'ext4'):
cmd.append('-F')
if label:
if fs in ('msdos', 'vfat'):
if filesystem in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
@ -625,7 +934,7 @@ class GPFSDriver(driver.VolumeDriver):
self._execute(*cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
exception_message = (_("mkfs failed on volume %(vol)s, "
"error message was: %(err)s")
"error message was: %(err)s.")
% {'vol': volume['name'], 'err': exc.stderr})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(

View File

@ -112,7 +112,9 @@ MAPPING = {
'cinder.volume.drivers.huawei.HuaweiISCSIDriver':
'cinder.volume.drivers.huawei.HuaweiVolumeDriver',
'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver':
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver'}
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver',
'cinder.volume.drivers.gpfs.GPFSDriver':
'cinder.volume.drivers.ibm.gpfs.GPFSDriver', }
def locked_volume_operation(f):

View File

@ -1111,7 +1111,24 @@
#
# Options defined in cinder.volume.drivers.gpfs
# Options defined in cinder.volume.drivers.hds.hds
#
# configuration file for HDS cinder plugin for HUS (string
# value)
#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml
#
# Options defined in cinder.volume.drivers.huawei
#
# config data for cinder huawei plugin (string value)
#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
#
# Options defined in cinder.volume.drivers.ibm.gpfs
#
# Specifies the path of the GPFS directory where Block Storage
@ -1147,22 +1164,9 @@
# may take a significantly longer time. (boolean value)
#gpfs_sparse_volumes=true
#
# Options defined in cinder.volume.drivers.hds.hds
#
# configuration file for HDS cinder plugin for HUS (string
# value)
#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml
#
# Options defined in cinder.volume.drivers.huawei
#
# config data for cinder huawei plugin (string value)
#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
# Specifies the storage pool that volumes are assigned to. By
# default, the system storage pool is used. (string value)
#gpfs_storage_pool=<None>
#

View File

@ -91,6 +91,7 @@ mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
find: CommandFilter, find, root
mkfs: CommandFilter, mkfs, root