Add SMB Volume Driver

Introduces a volume driver which makes use of SMB shares for
hosting volumes as disk images, having a similar workflow with the
other NFS like drivers.

The driver uses Samba in order to access the SMB shares. It takes
SMB shares information such as share path, credentials and other
mount flags from a config file of which location can be configured
in the cinder config file.

It includes all the features required by the Juno release.

Driver cert results:
http://paste.openstack.org/show/100915/

The online snapshot related tests have been skipped as this
feature is not supported yet.

Implements: blueprint smbfs-volume-driver

DocImpact

Co-Authored-By: Adelina Tuvenie <atuvenie@cloudbasesolutions.com>

Change-Id: I3b4bebf73122d5b784228af18f9b1eddfc02b643
This commit is contained in:
Lucian Petrut 2014-07-10 15:45:31 +03:00 committed by Eric Harney
parent 2bd9a8e710
commit 240b0c02df
9 changed files with 2020 additions and 948 deletions

View File

@ -42,6 +42,12 @@ class RemoteFsClient(object):
err=_('nfs_mount_point_base required'))
self._mount_options = kwargs.get('nfs_mount_options', None)
self._check_nfs_options()
elif mount_type == "cifs":
self._mount_base = kwargs.get('smbfs_mount_point_base', None)
if not self._mount_base:
raise exception.InvalidParameterValue(
err=_('smbfs_mount_point_base required'))
self._mount_options = kwargs.get('smbfs_mount_options', None)
elif mount_type == "glusterfs":
self._mount_base = kwargs.get('glusterfs_mount_point_base', None)
if not self._mount_base:

View File

@ -726,6 +726,19 @@ class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Smbfs driver
class SmbfsException(RemoteFSException):
message = _("Unknown SMBFS exception.")
class SmbfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted SMBFS shares found.")
class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG.")
# Gluster driver
class GlusterfsException(RemoteFSException):
message = _("Unknown Gluster exception")

View File

@ -979,263 +979,6 @@ class GlusterFsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_create_snapshot(self):
(mox, drv) = self._mox, self._driver
self.stub_out_not_replaying(drv, '_ensure_share_mounted')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
volume = self._simple_volume()
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
mox.StubOutWithMock(drv, '_execute')
vol_filename = 'volume-%s' % self.VOLUME_UUID
hashed = drv._get_hash_str(self.TEST_EXPORT1)
vol_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
vol_filename)
snap_path = '%s.%s' % (vol_path, self.SNAP_UUID)
info_path = '%s%s' % (vol_path, '.info')
info_dict = {'active': vol_filename}
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_dict)
drv._create_qcow2_snap_file(snap_ref, vol_filename, snap_path)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_dict)
# SNAP_UUID_2 has been removed from dict.
info_file_dict = {'active': 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID),
self.SNAP_UUID: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID)}
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.create_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_bottom(self):
"""Multiple snapshots exist.
In this test, path (volume-<uuid>) is backed by
snap_path (volume-<uuid>.<snap_uuid>) which is backed by
snap_path_2 (volume-<uuid>.<snap_uuid_2>).
Delete the snapshot identified by SNAP_UUID_2.
Chain goes from
(SNAP_UUID) (SNAP_UUID_2)
volume-abc -> volume-abc.baca -> volume-abc.bebe
to
(SNAP_UUID)
volume-abc -> volume-abc.baca
"""
(mox, drv) = self._mox, self._driver
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
volume_filename = 'volume-%s' % self.VOLUME_UUID
snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
snap_file = '%s.%s' % (volume_filename, self.SNAP_UUID)
snap_file_2 = '%s.%s' % (volume_filename, self.SNAP_UUID_2)
info_path = '%s%s' % (volume_path, '.info')
qemu_img_info_output = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_read_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_get_backing_chain_for_path')
mox.StubOutWithMock(drv, '_get_matching_backing_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
drv._ensure_share_writable(volume_dir)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
image_utils.qemu_img_info(snap_path_2).AndReturn(img_info)
info_file_dict = {'active': snap_file_2,
self.SNAP_UUID_2: snap_file_2,
self.SNAP_UUID: snap_file}
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': self._simple_volume(),
'id': self.SNAP_UUID_2}
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
drv._execute('rm', '-f', snap_path_2, run_as_root=True)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_middle(self):
"""Multiple snapshots exist.
In this test, path (volume-<uuid>) is backed by
snap_path (volume-<uuid>.<snap_uuid>) which is backed by
snap_path_2 (volume-<uuid>.<snap_uuid_2>).
Delete the snapshot identified with SNAP_UUID.
Chain goes from
(SNAP_UUID) (SNAP_UUID_2)
volume-abc -> volume-abc.baca -> volume-abc.bebe
to (SNAP_UUID_2)
volume-abc -> volume-abc.bebe
"""
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID)
snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
snap_file_2 = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID_2)
qemu_img_info_output_snap_1 = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 122K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID,
'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID))
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_get_backing_chain_for_path')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
info_file_dict = {self.SNAP_UUID_2: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID_2),
self.SNAP_UUID: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID)}
drv._ensure_share_writable(volume_dir)
info_path = drv._local_path_volume(volume) + '.info'
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
img_info = imageutils.QemuImgInfo(qemu_img_info_output_snap_1)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
snap_path_chain = [{'filename': snap_file_2,
'backing-filename': snap_file},
{'filename': snap_file,
'backing-filename': volume_file}]
drv.get_active_image_from_info(volume).AndReturn(snap_file_2)
drv._get_backing_chain_for_path(volume, snap_path_2).\
AndReturn(snap_path_chain)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
drv._execute('rm', '-f', snap_path_2, run_as_root=True)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_not_in_info(self):
"""Snapshot not in info file / info file doesn't exist.
Snapshot creation failed so nothing is on-disk. Driver
should allow operation to succeed so the manager can
remove the snapshot record.
(Scenario: Snapshot object created in Cinder db but not
on backing storage.)
"""
(mox, drv) = self._mox, self._driver
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = os.path.join(volume_dir, volume_filename)
info_path = '%s%s' % (volume_path, '.info')
mox.StubOutWithMock(drv, '_read_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_ensure_share_writable')
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': self._simple_volume(),
'id': self.SNAP_UUID_2}
drv._ensure_share_writable(volume_dir)
drv._read_info_file(info_path, empty_if_missing=True).AndReturn({})
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_read_info_file(self):
(mox, drv) = self._mox, self._driver
@ -1308,7 +1051,6 @@ class GlusterFsDriverTestCase(test.TestCase):
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
info_path = '%s.info' % volume_path
ctxt = context.RequestContext('fake_user', 'fake_project')
@ -1324,14 +1066,13 @@ class GlusterFsDriverTestCase(test.TestCase):
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(drv, '_do_create_snapshot')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
drv._do_create_snapshot(snap_ref, snap_file, snap_path)
create_info = {'snapshot_id': snap_ref['id'],
'type': 'qcow2',
@ -1354,14 +1095,9 @@ class GlusterFsDriverTestCase(test.TestCase):
snap_ref_progress_90p['progress'] = '90%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_90p)
snap_info = {'active': snap_file,
self.SNAP_UUID: snap_file}
drv._write_info_file(info_path, snap_info)
mox.ReplayAll()
drv.create_snapshot(snap_ref)
drv._create_snapshot_online(snap_ref, snap_file, snap_path)
mox.VerifyAll()
@ -1389,14 +1125,13 @@ class GlusterFsDriverTestCase(test.TestCase):
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(drv, '_do_create_snapshot')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(drv, '_write_info_file')
drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
drv._do_create_snapshot(snap_ref, snap_file, snap_path)
create_info = {'snapshot_id': snap_ref['id'],
'type': 'qcow2',
@ -1423,10 +1158,10 @@ class GlusterFsDriverTestCase(test.TestCase):
mox.ReplayAll()
self.assertRaisesAndMessageMatches(
exception.GlusterfsException,
exception.RemoteFSException,
'Nova returned "error" status while creating snapshot.',
drv.create_snapshot,
snap_ref)
drv._create_snapshot_online,
snap_ref, snap_file, snap_path)
mox.VerifyAll()
@ -1507,8 +1242,6 @@ class GlusterFsDriverTestCase(test.TestCase):
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
@ -1610,8 +1343,6 @@ class GlusterFsDriverTestCase(test.TestCase):
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
@ -1713,8 +1444,6 @@ class GlusterFsDriverTestCase(test.TestCase):
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
@ -1740,136 +1469,6 @@ class GlusterFsDriverTestCase(test.TestCase):
mox.VerifyAll()
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_delete_stale_snapshot')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'get_active_image_from_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_qemu_img_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_read_info_file')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_path_volume')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_volume_dir')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_ensure_share_writable')
def test_delete_snapshot_online_stale_snapshot(self,
mock_ensure_share_writable,
mock_local_volume_dir,
mock_local_path_volume,
mock_read_info_file,
mock_qemu_img_info,
mock_get_active_image,
mock_delete_stale_snap):
volume = self._simple_volume()
ctxt = context.RequestContext('fake_user', 'fake_project')
volume['status'] = 'in-use'
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_path = volume_path + '.info'
stale_snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID_2,
'context': ctxt}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + stale_snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
stale_snapshot['id']: stale_snap_file}
qemu_img_info = imageutils.QemuImgInfo()
qemu_img_info.file_format = 'qcow2'
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_qemu_img_info.return_value = qemu_img_info
mock_get_active_image.return_value = active_snap_file
self._driver.delete_snapshot(stale_snapshot)
mock_ensure_share_writable.assert_called_once_with(
self.TEST_MNT_POINT)
mock_local_path_volume.assert_called_once_with(
stale_snapshot['volume'])
mock_read_info_file.assert_called_once_with(info_path,
empty_if_missing=True)
mock_qemu_img_info.assert_called_once_with(stale_snap_path)
mock_get_active_image.assert_called_once_with(
stale_snapshot['volume'])
mock_delete_stale_snap.assert_called_once_with(stale_snapshot)
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_write_info_file')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'get_active_image_from_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_volume_dir')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_read_info_file')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_path_volume')
def test_delete_stale_snapshot(self, mock_local_path_volume,
mock_read_info_file,
mock_local_volume_dir,
mock_get_active_image,
mock_delete_if_exists,
mock_write_info_file):
volume = self._simple_volume()
volume['status'] = 'in-use'
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_path = volume_path + '.info'
# Test case where snapshot_file = active_file
snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID_2}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
snapshot['id']: stale_snap_file}
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_get_active_image.return_value = active_snap_file
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
self._driver._delete_stale_snapshot(snapshot)
mock_local_path_volume.assert_called_with(snapshot['volume'])
mock_read_info_file.assert_called_with(info_path)
self.assertFalse(mock_delete_if_exists.called)
self.assertFalse(mock_write_info_file.called)
# Test case where snapshot_file != active_file
snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
snapshot['id']: stale_snap_file}
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_get_active_image.return_value = active_snap_file
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
self._driver._delete_stale_snapshot(snapshot)
mock_local_path_volume.assert_called_with(snapshot['volume'])
mock_read_info_file.assert_called_with(info_path)
mock_delete_if_exists.assert_called_once_with(stale_snap_path)
snap_info.pop(snapshot['id'], None)
mock_write_info_file.assert_called_once_with(info_path, snap_info)
def test_get_backing_chain_for_path(self):
(mox, drv) = self._mox, self._driver

View File

@ -0,0 +1,297 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import mock
from cinder import exception
from cinder import test
from cinder.volume.drivers import remotefs
class RemoteFsSnapDriverTestCase(test.TestCase):
_FAKE_CONTEXT = 'fake_context'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': 'fake_share',
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = '/mnt/fake_hash'
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT,
_FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'context': _FAKE_CONTEXT,
'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (_FAKE_VOLUME_PATH + '.' + _FAKE_SNAPSHOT_ID)
def setUp(self):
super(RemoteFsSnapDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSSnapDriver()
self._driver._remotefsclient = mock.Mock()
self._driver._execute = mock.Mock()
self._driver._delete = mock.Mock()
def _test_delete_snapshot(self, volume_in_use=False,
stale_snapshot=False,
is_active_image=True,
highest_file_exists=False):
# If the snapshot is not the active image, it is guaranteed that
# another snapshot exists having it as backing file.
# If yet another file is backed by the file from the next level,
# it means that the 'highest file' exists and it needs to be rebased.
fake_snapshot_name = os.path.basename(self._FAKE_SNAPSHOT_PATH)
fake_info = {'active': fake_snapshot_name,
self._FAKE_SNAPSHOT['id']: fake_snapshot_name}
fake_snap_img_info = mock.Mock()
fake_base_img_info = mock.Mock()
if stale_snapshot:
fake_snap_img_info.backing_file = None
else:
fake_snap_img_info.backing_file = self._FAKE_VOLUME_NAME
fake_snap_img_info.file_format = 'qcow2'
fake_base_img_info.backing_file = None
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._qemu_img_info = mock.Mock(
side_effect=[fake_snap_img_info, fake_base_img_info])
self._driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._driver._read_info_file = mock.Mock()
self._driver._write_info_file = mock.Mock()
self._driver._img_commit = mock.Mock()
self._driver._rebase_img = mock.Mock()
self._driver._ensure_share_writable = mock.Mock()
self._driver._delete_stale_snapshot = mock.Mock()
self._driver._delete_snapshot_online = mock.Mock()
expected_info = {
'active': fake_snapshot_name,
self._FAKE_SNAPSHOT_ID: fake_snapshot_name
}
if volume_in_use:
fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT)
fake_snapshot['volume']['status'] = 'in-use'
self._driver._read_info_file.return_value = fake_info
self._driver._delete_snapshot(fake_snapshot)
if stale_snapshot:
self._driver._delete_stale_snapshot.assert_called_once_with(
fake_snapshot)
else:
expected_online_delete_info = {
'active_file': fake_snapshot_name,
'snapshot_file': fake_snapshot_name,
'base_file': self._FAKE_VOLUME_NAME,
'base_id': None,
'new_base_file': None
}
self._driver._delete_snapshot_online.assert_called_once_with(
self._FAKE_CONTEXT, fake_snapshot,
expected_online_delete_info)
elif is_active_image:
self._driver._read_info_file.return_value = fake_info
self._driver._delete_snapshot(self._FAKE_SNAPSHOT)
self._driver._img_commit.assert_called_once_with(
self._FAKE_SNAPSHOT_PATH)
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, fake_info)
else:
fake_upper_snap_id = 'fake_upper_snap_id'
fake_upper_snap_path = (
self._FAKE_VOLUME_PATH + '-snapshot' + fake_upper_snap_id)
fake_upper_snap_name = os.path.basename(fake_upper_snap_path)
fake_backing_chain = [
{'filename': fake_upper_snap_name,
'backing-filename': fake_snapshot_name},
{'filename': fake_snapshot_name,
'backing-filename': self._FAKE_VOLUME_NAME},
{'filename': self._FAKE_VOLUME_NAME,
'backing-filename': None}]
fake_info[fake_upper_snap_id] = fake_upper_snap_name
fake_info[self._FAKE_SNAPSHOT_ID] = fake_snapshot_name
if highest_file_exists:
fake_highest_snap_id = 'fake_highest_snap_id'
fake_highest_snap_path = (
self._FAKE_VOLUME_PATH + '-snapshot' +
fake_highest_snap_id)
fake_highest_snap_name = os.path.basename(
fake_highest_snap_path)
fake_highest_snap_info = {
'filename': fake_highest_snap_name,
'backing-filename': fake_upper_snap_name,
}
fake_backing_chain.insert(0, fake_highest_snap_info)
fake_info['active'] = fake_highest_snap_name
fake_info[fake_highest_snap_id] = fake_highest_snap_name
else:
fake_info['active'] = fake_upper_snap_name
expected_info = copy.deepcopy(fake_info)
expected_info[fake_upper_snap_id] = fake_snapshot_name
del expected_info[self._FAKE_SNAPSHOT_ID]
if not highest_file_exists:
expected_info['active'] = fake_snapshot_name
self._driver._read_info_file.return_value = fake_info
self._driver._get_backing_chain_for_path = mock.Mock(
return_value=fake_backing_chain)
self._driver._delete_snapshot(self._FAKE_SNAPSHOT)
self._driver._img_commit.assert_any_call(
fake_upper_snap_path)
if highest_file_exists:
self._driver._rebase_img.assert_called_once_with(
fake_highest_snap_path, fake_snapshot_name, 'qcow2')
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, expected_info)
def test_delete_snapshot_when_active_file(self):
self._test_delete_snapshot()
def test_delete_snapshot_in_use(self):
self._test_delete_snapshot(volume_in_use=True)
def test_delete_snapshot_in_use_stale_snapshot(self):
self._test_delete_snapshot(volume_in_use=True,
stale_snapshot=True)
def test_delete_snapshot_with_one_upper_file(self):
self._test_delete_snapshot(is_active_image=False)
def test_delete_snapshot_with_two_or_more_upper_files(self):
self._test_delete_snapshot(is_active_image=False,
highest_file_exists=True)
def test_delete_stale_snapshot(self):
fake_snapshot_name = os.path.basename(self._FAKE_SNAPSHOT_PATH)
fake_snap_info = {
'active': self._FAKE_VOLUME_NAME,
self._FAKE_SNAPSHOT_ID: fake_snapshot_name
}
expected_info = {'active': self._FAKE_VOLUME_NAME}
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._read_info_file = mock.Mock(
return_value=fake_snap_info)
self._driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._driver._write_info_file = mock.Mock()
self._driver._delete_stale_snapshot(self._FAKE_SNAPSHOT)
self._driver._delete.assert_called_once_with(self._FAKE_SNAPSHOT_PATH)
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, expected_info)
def test_do_create_snapshot(self):
self._driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
fake_backing_path = os.path.join(
self._driver._local_volume_dir(),
self._FAKE_VOLUME_NAME)
self._driver._execute = mock.Mock()
self._driver._set_rw_permissions_for_all = mock.Mock()
self._driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=mock.sentinel.backing_fmt))
self._driver._do_create_snapshot(self._FAKE_SNAPSHOT,
self._FAKE_VOLUME_NAME,
self._FAKE_SNAPSHOT_PATH)
command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % fake_backing_path,
self._FAKE_SNAPSHOT_PATH]
command2 = ['qemu-img', 'rebase', '-u',
'-b', self._FAKE_VOLUME_NAME,
'-F', mock.sentinel.backing_fmt,
self._FAKE_SNAPSHOT_PATH]
self._driver._execute.assert_any_call(*command1, run_as_root=True)
self._driver._execute.assert_any_call(*command2, run_as_root=True)
def _test_create_snapshot(self, volume_in_use=False):
fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT)
fake_snapshot_info = {}
fake_snapshot_file_name = os.path.basename(self._FAKE_SNAPSHOT_PATH)
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._read_info_file = mock.Mock(
return_value=fake_snapshot_info)
self._driver._do_create_snapshot = mock.Mock()
self._driver._create_snapshot_online = mock.Mock()
self._driver._write_info_file = mock.Mock()
self._driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._driver._get_new_snap_path = mock.Mock(
return_value=self._FAKE_SNAPSHOT_PATH)
expected_snapshot_info = {
'active': fake_snapshot_file_name,
self._FAKE_SNAPSHOT_ID: fake_snapshot_file_name
}
if volume_in_use:
fake_snapshot['volume']['status'] = 'in-use'
expected_method_called = '_create_snapshot_online'
else:
fake_snapshot['volume']['status'] = 'available'
expected_method_called = '_do_create_snapshot'
self._driver._create_snapshot(fake_snapshot)
fake_method = getattr(self._driver, expected_method_called)
fake_method.assert_called_with(
fake_snapshot, self._FAKE_VOLUME_NAME,
self._FAKE_SNAPSHOT_PATH)
self._driver._write_info_file.assert_called_with(
mock.sentinel.fake_info_path,
expected_snapshot_info)
def test_create_snapshot_volume_available(self):
self._test_create_snapshot()
def test_create_snapshot_volume_in_use(self):
self._test_create_snapshot(volume_in_use=True)
def test_create_snapshot_invalid_volume(self):
fake_snapshot = copy.deepcopy(self._FAKE_SNAPSHOT)
fake_snapshot['volume']['status'] = 'error'
self.assertRaises(exception.InvalidVolume,
self._driver._create_snapshot,
fake_snapshot)

537
cinder/tests/test_smbfs.py Normal file
View File

@ -0,0 +1,537 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers import smbfs
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash')
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_LISTDIR = [_FAKE_VOLUME_NAME, _FAKE_VOLUME_NAME + '.vhd',
_FAKE_VOLUME_NAME + '.vhdx', 'fake_folder']
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
smbfs.SmbfsDriver.__init__ = lambda x: None
self._smbfs_driver = smbfs.SmbfsDriver()
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def _test_setup(self, config, share_config_exists=True):
fake_exists = mock.Mock(return_value=share_config_exists)
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
with mock.patch('os.path.exists', fake_exists):
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(None)
self.assertEqual(self._smbfs_driver.shares, {})
fake_ensure_mounted.assert_called_once()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(fake_config, None)
def test_setup_missing_shares_config_file(self):
self._test_setup(self._FAKE_SMBFS_CONFIG, False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
fake_capacity_info = ((2, 1, 5), (2, 1, 4), (2, 1, 1))
self._smbfs_driver._get_capacity_info = mock.Mock(
side_effect=fake_capacity_info)
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual(ret_value, 'fake_share3')
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, True)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
def test_get_volume_path(self):
self._smbfs_driver.get_volume_format = mock.Mock(
return_value='vhd')
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
expected = self._FAKE_VOLUME_PATH + '.vhd'
ret_val = self._smbfs_driver.local_path(self._FAKE_VOLUME)
self.assertEqual(expected, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
with contextlib.nested(
mock.patch.object(image_utils, 'resize_image'),
mock.patch.object(image_utils, 'convert_image')) as (
fake_resize, fake_convert):
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv._extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv._extend_volume(
self._FAKE_VOLUME,
mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, unsupported_qemu_version=False,
wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_image_service = mock.MagicMock()
fake_image_service.show.return_value = (
{'id': 'fake_image_id', 'disk_format': 'raw'})
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
if unsupported_qemu_version:
qemu_version = [1, 5]
else:
qemu_version = [1, 7]
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.get_qemu_version = mock.Mock(
return_value=qemu_version)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
exc = None
with contextlib.nested(
mock.patch.object(image_utils,
'fetch_to_volume_format'),
mock.patch.object(image_utils,
'qemu_img_info')) as (
fake_fetch,
fake_qemu_img_info):
if wrong_size_after_fetch:
exc = exception.ImageUnacceptable
elif unsupported_qemu_version:
exc = exception.InvalidVolume
fake_qemu_img_info.return_value = fake_img_info
if exc:
self.assertRaises(
exc, drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, fake_image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_copy_image_to_volume_unsupported_qemu_version(self):
self._test_copy_image_to_volume(unsupported_qemu_version=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_total_allocated = fake_total_blocks * fake_block_size
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
fake_du = (str(fake_total_allocated), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock(
side_effect=(fake_df, fake_du))
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
fake_total_allocated)
self.assertEqual(expected, ret_val)

View File

@ -163,56 +163,10 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
hashed)
return path
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if snap_info == {}:
# No info file = no snapshots exist
vol_path = os.path.basename(self._local_path_volume(volume))
return vol_path
return snap_info['active']
@utils.synchronized('glusterfs', external=False)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_('Cloning volume %(src)s to volume %(dst)s') %
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
src_vref['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
self._create_cloned_volume(volume, src_vref)
@utils.synchronized('glusterfs', external=False)
def create_volume(self, volume):
@ -230,26 +184,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
@utils.synchronized('glusterfs', external=False)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
snapshot['volume_size'])
return {'provider_location': volume['provider_location']}
self._create_volume_from_snapshot(volume, snapshot)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
@ -321,229 +256,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
return self._create_snapshot(snapshot)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot,
providing a qcow2 file.
Otherwise, create locally with qemu-img.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb',
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' }
4. First snapshot deleted:
volume-1234 <- volume-1234.aaaa(* now with bbbb's data)
volume-1234.aaaa is removed (logically) from the snapshot chain.
The data from volume-1234.bbbb is merged into it.
(*) Since bbbb's data was committed into the aaaa file, we have
"removed" aaaa's snapshot point but the .aaaa file now
represents snapshot with id "bbbb".
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.aaaa' (* changed!)
}
5. Second snapshot deleted:
volume-1234
volume-1234.bbbb is removed from the snapshot chain, as above.
The base image, volume-1234, becomes the active image for this
volume again. If in-use, the VM begins using the volume-1234.bbbb
file immediately as part of the snapshot delete process.
info file: { 'active': 'volume-1234' }
For the above operations, Cinder handles manipulation of qcow2 files
when the volume is detached. When attached, Cinder creates and deletes
qcow2 files, but Nova is responsible for transitioning the VM between
them and handling live transfers of data between files as required.
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
if status == 'in-use':
# Perform online snapshot via Nova
context = snapshot['context']
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
path_to_disk = self._local_path_volume(snapshot['volume'])
new_snap_path = '%s.%s' % (
self._local_path_volume(snapshot['volume']),
snapshot['id'])
self._create_qcow2_snap_file(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s' % result)
except Exception as e:
LOG.error(_('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.GlusterfsException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.GlusterfsException(msg)
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path, empty_if_missing=True)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
return
LOG.debug('create snapshot: %s' % snapshot)
LOG.debug('volume id: %s' % snapshot['volume_id'])
path_to_disk = self._local_path_volume(snapshot['volume'])
self._create_snapshot_offline(snapshot, path_to_disk)
def _create_qcow2_snap_file(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = '%s/%s' % (
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=True)
info = self._qemu_img_info(backing_path_full_path)
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=True)
self._set_rw_permissions_for_all(new_snap_path)
def _create_snapshot_offline(self, snapshot, path_to_disk):
"""Create snapshot (offline case)."""
# Requires volume status = 'available'
new_snap_path = '%s.%s' % (path_to_disk, snapshot['id'])
backing_filename = self.get_active_image_from_info(snapshot['volume'])
self._create_qcow2_snap_file(snapshot,
backing_filename,
new_snap_path)
# Update info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path,
empty_if_missing=True)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _get_matching_backing_file(self, backing_chain, snapshot_file):
return next(f for f in backing_chain
if f.get('backing-filename', '') == snapshot_file)
@ -553,198 +265,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""Apply locking to the delete snapshot operation."""
self._delete_snapshot(snapshot)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: GlusterfsException(msg) if operation fails
:returns: None
"""
LOG.debug('deleting snapshot %s' % snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
self._ensure_share_writable(
self._local_volume_dir(snapshot['volume']))
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.') % snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is %s' % snapshot_file)
snapshot_path = '%s/%s' % (self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(snapshot_path)
vol_path = self._local_volume_dir(snapshot['volume'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = '%s/%s' % (vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
msg = _('No backing file found for %s, allowing snapshot '
'to be deleted.') % snapshot_path
LOG.warn(msg)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(
self._local_volume_dir(snapshot['volume']), base_file)
base_file_img_info = self._qemu_img_info(base_path)
new_base_file = base_file_img_info.backing_file
base_id = None
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
for key, value in snap_info.iteritems():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# Need to merge snapshot_file into its backing file
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted) |
# exist) | |
base_file = snapshot_path_img_info.backing_file
self._qemu_img_commit(snapshot_path)
self._execute('rm', '-f', snapshot_path, run_as_root=True)
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
del(snap_info[snapshot['id']])
# Active file has changed
snap_info['active'] = base_file
self._write_info_file(info_path, snap_info)
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
#(guaranteed to | (being deleted)|(guaranteed to | (may exist,
# exist, not | | exist, being |needs ptr update
# used here) | | committed down)| if so)
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.GlusterfsException(msg)
snap_info = self._read_info_file(info_path)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.GlusterfsException(msg)
# Is there a file depending on higher_file?
highest_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
higher_file),
None)
if highest_file is None:
msg = 'No file depends on %s.' % higher_file
LOG.debug(msg)
# Committing higher_file into snapshot_file
# And update pointer in highest_file
higher_file_path = '%s/%s' % (vol_path, higher_file)
self._qemu_img_commit(higher_file_path)
if highest_file is not None:
highest_file_path = '%s/%s' % (vol_path, highest_file)
info = self._qemu_img_info(snapshot_path)
snapshot_file_fmt = info.file_format
backing_fmt = ('-F', snapshot_file_fmt)
self._execute('qemu-img', 'rebase', '-u',
'-b', snapshot_file,
highest_file_path, *backing_fmt,
run_as_root=True)
self._execute('rm', '-f', higher_file_path, run_as_root=True)
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
del(snap_info[snapshot['id']])
snap_info[higher_id] = snapshot_file
if higher_file == active_file:
if highest_file is not None:
msg = _('Check condition failed: '
'%s expected to be None.') % 'highest_file'
raise exception.GlusterfsException(msg)
# Active file has changed
snap_info['active'] = snapshot_file
self._write_info_file(info_path, snap_info)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
@ -839,23 +359,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if snapshot['id'] in snap_info:
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_('Deleting stale snapshot: %s') % snapshot['id'])
fileutils.delete_if_exists(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
@ -907,40 +410,8 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
@utils.synchronized('glusterfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = '%s/%s' % (self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path)
backing_file = info.backing_file
if backing_file:
snapshots_exist = True
else:
snapshots_exist = False
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if snapshots_exist or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
self._copy_volume_to_image(context, volume, image_service,
image_meta)
@utils.synchronized('glusterfs', external=False)
def extend_volume(self, volume, size_gb):
@ -1103,3 +574,68 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
return super(GlusterfsDriver, self).backup_volume(
context, backup, backup_service)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s' % result)
except Exception as e:
LOG.error(_('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)

View File

@ -170,7 +170,7 @@ class RemoteFSDriver(driver.VolumeDriver):
mounted_path = self.local_path(volume)
self._execute('rm', '-f', mounted_path, run_as_root=True)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
@ -192,6 +192,11 @@ class RemoteFSDriver(driver.VolumeDriver):
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=True)
def _create_sparsed_file(self, path, size):
"""Creates file with 0 disk usage."""
self._execute('truncate', '-s', '%sG' % size,
@ -378,17 +383,24 @@ class RemoteFSSnapDriver(RemoteFSDriver):
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def _local_volume_dir(self, volume):
raise NotImplementedError()
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = '%s/%s' % (
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self._local_path_volume(volume), '.info')
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
@ -421,8 +433,13 @@ class RemoteFSSnapDriver(RemoteFSDriver):
return info
def _qemu_img_commit(self, path):
return self._execute('qemu-img', 'commit', path, run_as_root=True)
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path, run_as_root=True)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=True)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
@ -526,3 +543,444 @@ class RemoteFSSnapDriver(RemoteFSDriver):
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path)
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_('Cloning volume %(src)s to volume %(dst)s') %
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_('Deleting stale snapshot: %s') % snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemotefsException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:' % snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
self._ensure_share_writable(
self._local_volume_dir(snapshot['volume']))
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.') % snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s' % snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(snapshot_path)
vol_path = self._local_volume_dir(snapshot['volume'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
msg = _('No backing file found for %s, allowing snapshot '
'to be deleted.') % snapshot_path
LOG.warn(msg)
# Snapshot may be stale, so just delete it and update ther
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(
self._local_volume_dir(snapshot['volume']), base_file)
base_file_img_info = self._qemu_img_info(base_path)
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.iteritems():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# Need to merge snapshot_file into its backing file
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted) |
# exist) | |
base_file = snapshot_path_img_info.backing_file
self._img_commit(snapshot_path)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
# Active file has changed
snap_info['active'] = base_file
self._write_info_file(info_path, snap_info)
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted)|(guaranteed to | (may exist,
# exist, not | | exist, being | needs ptr
# used here) | | committed down)| update if so)
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
# Is there a file depending on higher_file?
highest_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
higher_file),
None)
if highest_file is None:
msg = 'No file depends on %s.' % higher_file
LOG.debug(msg)
# Committing higher_file into snapshot_file
# And update pointer in highest_file
higher_file_path = os.path.join(vol_path, higher_file)
self._img_commit(higher_file_path)
if highest_file is not None:
highest_file_path = os.path.join(vol_path, highest_file)
snapshot_file_fmt = snapshot_path_img_info.file_format
self._rebase_img(highest_file_path, snapshot_file,
snapshot_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
snap_info[higher_id] = snapshot_file
if higher_file == active_file:
if highest_file is not None:
msg = _('Check condition failed: '
'%s expected to be None.') % 'highest_file'
raise exception.RemoteFSException(msg)
# Active file has changed
snap_info['active'] = snapshot_file
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=True)
info = self._qemu_img_info(backing_path_full_path)
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=True)
self._set_rw_permissions_for_all(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot,
providing a qcow2 file.
Otherwise, create locally with qemu-img.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb',
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' }
4. First snapshot deleted:
volume-1234 <- volume-1234.aaaa(* now with bbbb's data)
volume-1234.aaaa is removed (logically) from the snapshot chain.
The data from volume-1234.bbbb is merged into it.
(*) Since bbbb's data was committed into the aaaa file, we have
"removed" aaaa's snapshot point but the .aaaa file now
represents snapshot with id "bbbb".
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.aaaa' (* changed!)
}
5. Second snapshot deleted:
volume-1234
volume-1234.bbbb is removed from the snapshot chain, as above.
The base image, volume-1234, becomes the active image for this
volume again. If in-use, the VM begins using the volume-1234.bbbb
file immediately as part of the snapshot delete process.
info file: { 'active': 'volume-1234' }
For the above operations, Cinder handles manipulation of qcow2 files
when the volume is detached. When attached, Cinder creates and deletes
qcow2 files, but Nova is responsible for transitioning the VM between
them and handling live transfers of data between files as required.
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
raise NotImplementedError()
def _delete_snapshot_online(self, context, snapshot, info):
raise NotImplementedError()

View File

@ -0,0 +1,590 @@
# Copyright (c) 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo.config import cfg
from cinder.brick.remotefs import remotefs
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import remotefs as remotefs_drv
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('smbfs_shares_config',
default='/etc/cinder/smbfs_shares',
help='File with the list of available smbfs shares.'),
cfg.StrOpt('smbfs_default_volume_format',
default='qcow2',
help=('Default format that will be used when creating volumes '
'if no volume format is specified. Can be set to: '
'raw, qcow2, vhd or vhdx.')),
cfg.BoolOpt('smbfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space '
'rather than regular files when using raw format, '
'in which case volume creation takes lot of time.')),
cfg.FloatOpt('smbfs_used_ratio',
default=0.95,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.')),
cfg.FloatOpt('smbfs_oversub_ratio',
default=1.0,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.')),
cfg.StrOpt('smbfs_mount_point_base',
default='$state_path/mnt',
help=('Base dir containing mount points for smbfs shares.')),
cfg.StrOpt('smbfs_mount_options',
default='noperm,file_mode=0775,dir_mode=0775',
help=('Mount options passed to the smbfs client. See '
'mount.cifs man page for details.')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""SMBFS based cinder volume driver.
"""
driver_volume_type = 'smbfs'
driver_prefix = 'smbfs'
volume_backend_name = 'Generic_SMBFS'
SHARE_FORMAT_REGEX = r'//.+/.+'
VERSION = VERSION
_DISK_FORMAT_VHD = 'vhd'
_DISK_FORMAT_VHD_LEGACY = 'vpc'
_DISK_FORMAT_VHDX = 'vhdx'
_DISK_FORMAT_RAW = 'raw'
_DISK_FORMAT_QCOW2 = 'qcow2'
def __init__(self, execute=putils.execute, *args, **kwargs):
self._remotefsclient = None
super(SmbfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'smbfs_mount_point_base')
opts = getattr(self.configuration,
'smbfs_mount_options')
self._remotefsclient = remotefs.RemoteFsClient(
'cifs', root_helper, execute=execute,
smbfs_mount_point_base=self.base,
smbfs_mount_options=opts)
self.img_suffix = None
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
# Find active image
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path)
fmt = info.file_format
data = {'export': volume['provider_location'],
'format': fmt,
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
config = self.configuration.smbfs_shares_config
if not config:
msg = (_("SMBFS config file not set (smbfs_shares_config)."))
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.exists(config):
msg = (_("SMBFS config file at %(config)s doesn't exist.") %
{'config': config})
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.isabs(self.base):
msg = _("Invalid mount point base: %s") % self.base
LOG.error(msg)
raise exception.SmbfsException(msg)
if not self.configuration.smbfs_oversub_ratio > 0:
msg = _(
"SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: "
"%s") % self.configuration.smbfs_oversub_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
if ((not self.configuration.smbfs_used_ratio > 0) and
(self.configuration.smbfs_used_ratio <= 1)):
msg = _("SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.smbfs_used_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
self.shares = {} # address : options
self._ensure_shares_mounted()
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
fmt = self.get_volume_format(volume)
local_dir = self._local_volume_dir(volume)
local_path = os.path.join(local_dir, volume['name'])
if fmt in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
local_path += '.' + fmt
return local_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path, ext = os.path.splitext(vol_path)
snap_path += '.' + snapshot['id'] + ext
return snap_path
def get_volume_format(self, volume, qemu_format=False):
volume_dir = self._local_volume_dir(volume)
volume_path = os.path.join(volume_dir, volume['name'])
if os.path.exists(volume_path):
info = self._qemu_img_info(volume_path)
volume_format = info.file_format
else:
volume_format = (
self._get_volume_format_spec(volume) or
self.configuration.smbfs_default_volume_format)
if qemu_format and volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
elif volume_format == self._DISK_FORMAT_VHD_LEGACY:
volume_format = self._DISK_FORMAT_VHD
return volume_format
@utils.synchronized('smbfs', external=False)
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, '
'skipping.'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
LOG.debug("Skipping deletion of volume %s as it does not exist." %
mounted_path)
info_path = self._local_path_volume_info(volume)
self._delete(info_path)
def get_qemu_version(self):
info, _ = self._execute('qemu-img', check_exit_code=False)
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warn(_("qemu-img is not installed."))
return None
return [int(x) for x in version.groups()[0].split('.')]
def _create_windows_image(self, volume_path, volume_size, volume_format):
"""Creates a VHD or VHDX file of a given size."""
# vhd is regarded as vpc by qemu
if volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
else:
qemu_version = self.get_qemu_version()
if qemu_version < [1, 7]:
err_msg = _("This version of qemu-img does not support vhdx "
"images. Please upgrade to 1.7 or greater.")
raise exception.SmbfsException(err_msg)
self._execute('qemu-img', 'create', '-f', volume_format,
volume_path, str(volume_size * units.Gi),
run_as_root=True)
def _do_create_volume(self, volume):
"""Create a volume on given smbfs_share.
:param volume: volume reference
"""
volume_format = self.get_volume_format(volume)
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("Creating new volume at %s." % volume_path)
if os.path.exists(volume_path):
msg = _('File already exists at %s.') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
self._create_windows_image(volume_path, volume_size,
volume_format)
else:
self.img_suffix = None
if volume_format == self._DISK_FORMAT_QCOW2:
self._create_qcow2_file(volume_path, volume_size)
elif self.configuration.smbfs_sparsed_volumes:
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions_for_all(volume_path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/share
"""
mount_point = self._get_mount_point_for_share(smbfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
run_as_root=True)
block_size, blocks_total, blocks_avail = map(float, df.split())
total_available = block_size * blocks_avail
total_size = block_size * blocks_total
du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude',
'*snapshot*', mount_point, run_as_root=True)
total_allocated = float(du.split()[0])
return total_size, total_available, total_allocated
def _find_share(self, volume_size_in_gib):
"""Choose SMBFS share among available ones for given volume size.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
:param volume_size_in_gib: int size in GB
"""
if not self._mounted_shares:
raise exception.SmbfsNoSharesMounted()
target_share = None
target_share_reserved = 0
for smbfs_share in self._mounted_shares:
if not self._is_share_eligible(smbfs_share, volume_size_in_gib):
continue
total_allocated = self._get_capacity_info(smbfs_share)[2]
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = smbfs_share
target_share_reserved = total_allocated
else:
target_share = smbfs_share
target_share_reserved = total_allocated
if target_share is None:
raise exception.SmbfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug('Selected %s as target smbfs share.' % target_share)
return target_share
def _is_share_eligible(self, smbfs_share, volume_size_in_gib):
"""Verifies SMBFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'smbfs_used_ratio'. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * smbfs_oversub_ratio) to ensure enough space is
available for the new volume.
:param smbfs_share: smbfs share
:param volume_size_in_gib: int size in GB
"""
used_ratio = self.configuration.smbfs_used_ratio
oversub_ratio = self.configuration.smbfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.Gi
total_size, total_available, total_allocated = \
self._get_capacity_info(smbfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
LOG.debug('%s is above smbfs_used_ratio.' % smbfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug('%s is above smbfs_oversub_ratio.' % smbfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug('%s reserved space is above smbfs_oversub_ratio.' %
smbfs_share)
return False
return True
@utils.synchronized('smbfs', external=False)
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
msg = _("This driver does not support snapshotting in-use volumes.")
raise exception.SmbfsException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
msg = _("This driver does not support deleting in-use snapshots.")
raise exception.SmbfsException(msg)
def _do_create_snapshot(self, snapshot, backing_filename, new_snap_path):
self._check_snapshot_support(snapshot)
super(SmbfsDriver, self)._do_create_snapshot(
snapshot, backing_filename, new_snap_path)
def _check_snapshot_support(self, snapshot):
volume_format = self.get_volume_format(snapshot['volume'])
# qemu-img does not yet support differencing vhd/vhdx
if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
err_msg = _("Snapshots are not supported for this volume "
"format: %s") % volume_format
raise exception.InvalidVolume(err_msg)
@utils.synchronized('smbfs', external=False)
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@utils.synchronized('smbfs', external=False)
def extend_volume(self, volume, size_gb):
LOG.info(_('Extending volume %s.'), volume['id'])
self._extend_volume(volume, size_gb)
def _extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb)
LOG.info(_('Resizing file to %sG...') % size_gb)
self._do_extend_volume(volume_path, size_gb)
def _do_extend_volume(self, volume_path, size_gb):
info = self._qemu_img_info(volume_path)
fmt = info.file_format
# Note(lpetrut): as for version 2.0, qemu-img cannot resize
# vhd/x images. For the moment, we'll just use an intermediary
# conversion in order to be able to do the resize.
if fmt in (self._DISK_FORMAT_VHDX, self._DISK_FORMAT_VHD_LEGACY):
temp_image = volume_path + '.tmp'
image_utils.convert_image(volume_path, temp_image,
self._DISK_FORMAT_RAW)
image_utils.resize_image(temp_image, size_gb)
image_utils.convert_image(temp_image, volume_path, fmt)
self._delete(temp_image)
else:
image_utils.resize_image(volume_path, size_gb)
if not self._is_file_size_equal(volume_path, size_gb):
raise exception.ExtendVolumeError(
reason='Resizing image file failed.')
def _check_extend_volume_support(self, volume, size_gb):
volume_path = self.local_path(volume)
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
if active_file_path != volume_path:
msg = _('Extend volume is only supported for this '
'driver when no snapshots exist.')
raise exception.InvalidVolume(msg)
extend_by = int(size_gb) - volume['size']
if not self._is_share_eligible(volume['provider_location'],
extend_by):
raise exception.ExtendVolumeError(reason='Insufficient space to '
'extend volume %s to %sG.'
% (volume['id'], size_gb))
@utils.synchronized('smbfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._copy_volume_to_image(context, volume, image_service, image_meta)
@utils.synchronized('smbfs', external=False)
def create_volume_from_snapshot(self, volume, snapshot):
self._create_volume_from_snapshot(volume, snapshot)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot['volume'])
out_format = self.get_volume_format(volume, qemu_format=True)
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
LOG.debug("Will copy from snapshot at %s" % path_to_snap_img)
image_utils.convert_image(path_to_snap_img,
self.local_path(volume),
out_format)
self._extend_volume(volume, volume_size)
self._set_rw_permissions_for_all(self.local_path(volume))
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_format = self.get_volume_format(volume, qemu_format=True)
image_meta = image_service.show(context, image_id)
qemu_version = self.get_qemu_version()
if (qemu_version < [1, 7] and (
volume_format == self._DISK_FORMAT_VHDX and
image_meta['disk_format'] != volume_format)):
err_msg = _("Unsupported volume format: vhdx. qemu-img 1.7 or "
"higher is required in order to properly support this "
"format.")
raise exception.InvalidVolume(err_msg)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
self.local_path(volume), volume_format,
self.configuration.volume_dd_blocksize)
self._do_extend_volume(self.local_path(volume), volume['size'])
data = image_utils.qemu_img_info(self.local_path(volume))
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d.") % virt_size))
@utils.synchronized('smbfs', external=False)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
self._create_cloned_volume(volume, src_vref)
def _ensure_share_mounted(self, smbfs_share):
mnt_flags = []
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
# The domain name must be removed from the
# user name when using Samba.
mnt_flags = self.parse_credentials(mnt_flags).split()
self._remotefsclient.mount(smbfs_share, mnt_flags)
def parse_options(self, option_str):
opts_dict = {}
opts_list = []
if option_str:
for i in option_str.split():
if i == '-o':
continue
for j in i.split(','):
tmp_opt = j.split('=')
if len(tmp_opt) > 1:
opts_dict[tmp_opt[0]] = tmp_opt[1]
else:
opts_list.append(tmp_opt[0])
return opts_list, opts_dict
def parse_credentials(self, mnt_flags):
options_list, options_dict = self.parse_options(mnt_flags)
username = (options_dict.pop('user', None) or
options_dict.pop('username', None))
if username:
# Remove the Domain from the user name
options_dict['username'] = username.split('\\')[-1]
else:
options_dict['username'] = 'guest'
named_options = ','.join("%s=%s" % (key, val) for (key, val)
in options_dict.iteritems())
options_list = ','.join(options_list)
flags = '-o ' + ','.join([named_options, options_list])
return flags.strip(',')
def _get_volume_format_spec(self, volume):
extra_specs = []
metadata_specs = volume.get('volume_metadata') or []
extra_specs += metadata_specs
vol_type = volume.get('volume_type')
if vol_type:
volume_type_specs = vol_type.get('extra_specs') or []
extra_specs += volume_type_specs
for spec in extra_specs:
if 'volume_format' in spec.key:
return spec.value
return None
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
return virt_size == size

View File

@ -1861,6 +1861,42 @@
#scality_sofs_volume_dir=cinder/volumes
#
# Options defined in cinder.volume.drivers.smbfs
#
# File with the list of available smbfs shares. (string value)
#smbfs_shares_config=/etc/cinder/smbfs_shares
# Default format that will be used when creating volumes if no
# volume format is specified. Can be set to: raw, qcow2, vhd
# or vhdx. (string value)
#smbfs_default_volume_format=qcow2
# Create volumes as sparsed files which take no space rather
# than regular files when using raw format, in which case
# volume creation takes lot of time. (boolean value)
#smbfs_sparsed_volumes=true
# Percent of ACTUAL usage of the underlying volume before no
# new volumes can be allocated to the volume destination.
# (floating point value)
#smbfs_used_ratio=0.95
# This will compare the allocated to available space on the
# volume destination. If the ratio exceeds this number, the
# destination will no longer be valid. (floating point value)
#smbfs_oversub_ratio=1.0
# Base dir containing mount points for smbfs shares. (string
# value)
#smbfs_mount_point_base=$state_path/mnt
# Mount options passed to the smbfs client. See mount.cifs man
# page for details. (string value)
#smbfs_mount_options=noperm,file_mode=0775,dir_mode=0775
#
# Options defined in cinder.volume.drivers.solidfire
#