Merge "Remove backup-restore methods in driver code"

This commit is contained in:
Jenkins 2017-05-22 16:22:30 +00:00 committed by Gerrit Code Review
commit 33e0c5d132
14 changed files with 0 additions and 1324 deletions

View File

@ -1737,20 +1737,6 @@ class VSPHORCMFCDriverTest(test.TestCase):
mock_copy_image.assert_called_with(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
@mock.patch.object(vsp_utils, 'execute', side_effect=_execute)
def test_restore_backup(self, execute):
"""Normal case: Restore a backup volume."""
backup = 'fake_backup'
backup_service = 'fake_backup_service'
with mock.patch.object(driver.VolumeDriver, 'restore_backup') \
as mock_restore_backup:
self.driver.restore_backup(
self.ctxt, backup, TEST_VOLUME[0], backup_service)
mock_restore_backup.assert_called_with(
self.ctxt, backup, TEST_VOLUME[0], backup_service)
@mock.patch.object(utils, 'execute', side_effect=_cinder_execute)
def test_update_migrated_volume_success(self, execute):
"""Normal case: 'modify ldev -status discard_zero_page' succeeds."""

View File

@ -1831,20 +1831,6 @@ class VSPHORCMISCSIDriverTest(test.TestCase):
mock_copy_image.assert_called_with(
self.ctxt, TEST_VOLUME[0], image_service, image_id)
@mock.patch.object(vsp_utils, 'execute', side_effect=_execute)
def test_restore_backup(self, execute):
"""Normal case: Restore a backup volume."""
backup = 'fake_backup'
backup_service = 'fake_backup_service'
with mock.patch.object(driver.VolumeDriver, 'restore_backup') \
as mock_restore_backup:
self.driver.restore_backup(
self.ctxt, backup, TEST_VOLUME[0], backup_service)
mock_restore_backup.assert_called_with(
self.ctxt, backup, TEST_VOLUME[0], backup_service)
@mock.patch.object(utils, 'execute', side_effect=_cinder_execute)
def test_update_migrated_volume_success(self, execute):
"""Normal case: 'modify ldev -status discard_zero_page' succeeds."""

View File

@ -1401,45 +1401,6 @@ class GPFSDriverTestCase(test.TestCase):
volume = self._fake_volume()
self.driver.copy_volume_to_image('', volume, '', '')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_gpfs_clone')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_backup_volume(self,
mock_local_path,
mock_create_gpfs_clone,
mock_gpfs_redirect,
mock_temp_chown,
mock_file_open,
mock_delete_gpfs_file):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
backup = {}
backup['volume_id'] = 'test'
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.volumes_path
self.driver.backup_volume('', backup, backup_service)
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_restore_backup(self,
mock_local_path,
mock_temp_chown,
mock_file_open):
volume = self._fake_volume()
backup = {}
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.volumes_path
self.driver.restore_backup('', backup, volume, backup_service)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_can_migrate_locally')
@ -2160,27 +2121,3 @@ class GPFSNFSDriverTestCase(test.TestCase):
mock_find_share.return_value = self.TEST_VOLUME_PATH
self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
self.driver.create_cloned_volume(volume, src_vref))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_delete_gpfs_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_do_backup')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_backup_source')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'local_path')
def test_backup_volume(self,
mock_local_path,
mock_create_backup_source,
mock_do_backup,
mock_delete_gpfs_file):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
backup = {}
backup['volume_id'] = 'test'
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.TEST_VOLUME_PATH
self.driver.backup_volume('', backup, backup_service)

View File

@ -15,7 +15,6 @@ import os
import socket
import mock
import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
@ -28,7 +27,6 @@ from cinder.tests.unit.brick import fake_lvm
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit.volume import test_driver
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import lvm
import cinder.volume.utils
@ -135,41 +133,6 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
lvm_driver.check_for_setup_error()
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
def test_retype_volume(self):
vol = tests_utils.create_volume(self.context)
new_type = fake.VOLUME_TYPE_ID
@ -216,52 +179,6 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
'provider_location': fake_provider},
update)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
mock_volume_get.return_value = vol
temp_snapshot = tests_utils.create_snapshot(self.context, vol['id'])
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_snapshot = mock.MagicMock()
self.volume.driver._delete_temp_snapshot = mock.MagicMock()
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info
self.volume.driver._create_temp_snapshot.return_value = temp_snapshot
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_snapshot.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_snapshot.assert_called_once_with(
self.context, temp_snapshot)
def test_create_volume_from_snapshot_none_sparse(self):
with mock.patch.object(self.volume.driver, 'vg'), \

View File

@ -24,14 +24,11 @@ from oslo_concurrency import processutils
from oslo_utils import importutils
from oslo_utils import units
from cinder.backup import driver as backup_driver
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
@ -50,8 +47,6 @@ class SheepdogDriverTestDataGenerator(object):
self.TEST_CLONED_VOL_DATA)
self.TEST_SNAPSHOT = self._make_fake_snapshot(
self.TEST_SNAPSHOT_DATA, self.TEST_VOLUME)
self.TEST_BACKUP_VOLUME = self._make_fake_backup_volume(
self.TEST_BACKUP_VOL_DATA)
def sheepdog_cmd_error(self, cmd, exit_code, stdout, stderr):
return (('(Command: %(cmd)s) '
@ -73,10 +68,6 @@ class SheepdogDriverTestDataGenerator(object):
snapshot_obj.volume = src_volume
return snapshot_obj
def _make_fake_backup_volume(self, backup_data):
return fake_backup.fake_backup_obj(context.get_admin_context(),
**backup_data)
def cmd_dog_vdi_create(self, name, size):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'create', name,
'%sG' % size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT)
@ -152,10 +143,6 @@ class SheepdogDriverTestDataGenerator(object):
'id': fake.SNAPSHOT_ID,
}
TEST_BACKUP_VOL_DATA = {
'volume_id': fake.VOLUME_ID,
}
COLLIE_NODE_INFO = """
0 107287605248 3623897354 3%
Total 107287605248 3623897354 3% 54760833024
@ -1392,100 +1379,3 @@ class SheepdogDriverTestCase(test.TestCase):
self.driver.extend_volume(self.test_data.TEST_VOLUME, 10)
fake_execute.assert_called_once_with(self._vdiname, 10)
self.assertTrue(fake_logger.debug.called)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_success(self, fake_delete_snapshot,
fake_backup_service, fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_backup_service = mock.Mock()
fake_volume_get.return_value = fake_volume
self.driver.backup_volume(fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
call_args, call_kwargs = fake_backup_service.backup.call_args
call_backup, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_fail_to_create_snap(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume_get.return_value = fake_volume
fake_create_snapshot.side_effect = exception.SheepdogCmdError(
cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy')
self.assertRaises(exception.SheepdogError,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(1, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot')
def test_backup_volume_fail_to_backup_vol(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_volume = self.test_data.TEST_VOLUME
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume_get.return_value = fake_volume
class BackupError(Exception):
pass
fake_backup_service.backup.side_effect = BackupError()
self.assertRaises(BackupError,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(backup_driver, 'BackupDriver')
def test_restore_backup(self, fake_backup_service):
fake_context = {}
fake_backup = self.test_data.TEST_BACKUP_VOLUME
fake_volume = self.test_data.TEST_VOLUME
self.driver.restore_backup(
fake_context, fake_backup, fake_volume, fake_backup_service)
call_args, call_kwargs = fake_backup_service.restore.call_args
call_backup, call_volume_id, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertEqual(fake_volume.id, call_volume_id)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)

View File

@ -1269,307 +1269,6 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
vmdk_path,
dc)
@mock.patch.object(image_transfer, 'copy_stream_optimized_disk')
@mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk)
def _test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
self._db.volume_get.return_value = volume
vops.get_backing.return_value = None
backing = mock.sentinel.backing
create_backing.return_value = backing
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
context = mock.sentinel.context
backup = {'id': 2, 'volume_id': 1}
backup_service = mock.Mock()
self._driver.backup_volume(context, backup, backup_service)
create_backing.assert_called_once_with(volume)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
self.assertEqual(mock.call(tmp_file_path, "wb"),
file_open.call_args_list[0])
copy_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, port=self.PORT, vm=backing, vmdk_file_path=vmdk_path,
vmdk_size=volume['size'] * units.Gi)
self.assertEqual(mock.call(tmp_file_path, "rb"),
file_open.call_args_list[1])
backup_service.backup.assert_called_once_with(backup, tmp_file)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
def _test_restore_backup(
self, vops, generate_uuid, temporary_file, file_open,
restore_backing, extend_volume):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backup = {'id': 2, 'size': 1}
context = mock.sentinel.context
backup_service = mock.Mock()
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.snapshot_exists.return_value = True
self.assertRaises(
cinder_exceptions.InvalidVolume, self._driver.restore_backup,
context, backup, volume, backup_service)
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vops.snapshot_exists.return_value = False
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
self.assertFalse(extend_volume.called)
temporary_file.reset_mock()
file_open.reset_mock()
backup_service.reset_mock()
restore_backing.reset_mock()
volume = {'name': 'vol-1', 'id': 1, 'size': 2}
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
extend_volume.assert_called_once_with(volume, volume['size'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
def _test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = src_uuid
src = mock.sentinel.src
create_backing.return_value = src
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp,
mock.sentinel.folder, summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
dest = mock.sentinel.dest
vops.clone_backing.return_value = dest
context = mock.sentinel.context
volume = {'name': 'vol-1',
'id': 'bd45dfe5-d411-435d-85ac-2605fe7d5d8f', 'size': 1}
backing = None
tmp_file_path = mock.sentinel.tmp_file_path
backup_size = units.Gi
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type=disk_type, host=mock.sentinel.host,
resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder)
vops.update_backing_disk_uuid.assert_called_once_with(dest,
volume['id'])
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
vops.clone_backing.reset_mock()
vops.update_backing_disk_uuid.reset_mock()
delete_temp_backing.reset_mock()
dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b"
tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa"
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
backing = mock.sentinel.backing
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type=disk_type, host=mock.sentinel.host,
resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder)
vops.update_backing_disk_uuid.assert_called_once_with(dest,
volume['id'])
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
delete_temp_backing.reset_mock()
vops.rename_backing.reset_mock()
def vops_rename(backing, new_name):
if backing == dest and new_name == volume['name']:
raise exceptions.VimException("error")
vops.rename_backing.side_effect = vops_rename
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
self.assertRaises(
exceptions.VimException, self._driver._restore_backing, context,
volume, backing, tmp_file_path, backup_size)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name']),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(image_transfer, 'download_stream_optimized_data')
@mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, download_data, delete_temp_backing)
def _test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
rp = mock.sentinel.rp
folder = mock.sentinel.folder
summary = mock.Mock()
summary.name = mock.sentinel.name
select_ds.return_value = (mock.ANY, rp, folder, summary)
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
profile_id = 'profile-1'
get_storage_profile_id.return_value = profile_id
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
create_spec = mock.Mock()
vops.get_create_spec.return_value = create_spec
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vm_ref = mock.sentinel.vm_ref
download_data.return_value = vm_ref
context = mock.sentinel.context
name = 'vm-1'
volume = {'name': 'vol-1',
'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e',
'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'],
volumeops.BACKING_UUID_KEY: volume['id']}
vops.get_create_spec.assert_called_once_with(
name, 0, disk_type, summary.name, profileId=profile_id,
extra_config=extra_config)
file_open.assert_called_once_with(tmp_file_path, "rb")
download_data.assert_called_once_with(
context, self._config.vmware_image_transfer_timeout_secs, tmp_file,
session=session, host=self._config.vmware_host_ip,
port=self._config.vmware_host_port, resource_pool=rp,
vm_folder=folder, vm_import_spec=import_spec,
image_size=file_size_bytes)
download_data.side_effect = exceptions.VimException("error")
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
self.assertRaises(
exceptions.VimException,
self._driver._create_backing_from_stream_optimized_file,
context, name, volume, tmp_file_path, file_size_bytes)
delete_temp_backing.assert_called_once_with(backing)
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.vim_util.get_vc_version')
def test_get_vc_version(self, get_vc_version, session):
@ -2355,27 +2054,6 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
def test_create_backing(self):
self._test_create_backing()
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_utils.fileutils.delete_if_exists')
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close')
def test_temporary_file(
self, close, mkstemp, delete_if_exists, ensure_tree):
fd = mock.sentinel.fd
tmp = mock.sentinel.tmp
mkstemp.return_value = (fd, tmp)
prefix = ".vmdk"
suffix = "test"
with self._driver._temporary_file(prefix=prefix,
suffix=suffix) as tmp_file:
self.assertEqual(tmp, tmp_file)
ensure_tree.assert_called_once_with(self.TMP_DIR)
mkstemp.assert_called_once_with(dir=self.TMP_DIR,
prefix=prefix,
suffix=suffix)
close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_hosts(self, vops):
host_1 = mock.sentinel.host_1

View File

@ -189,42 +189,6 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver"
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_available(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
def test_create_temp_cloned_volume(self):
with mock.patch.object(
self.volume.driver,
@ -245,82 +209,6 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
self.context, vol)
self.assertEqual('available', cloned_vol.status)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_snapshot = mock.MagicMock()
self.volume.driver._delete_temp_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
self.volume.driver._create_temp_snapshot.return_value = temp_vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_snapshot.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_snapshot.assert_called_once_with(
self.context, temp_vol)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch('six.moves.builtins.open')
def test_restore_backup(self,
mock_open,
mock_get_connector_properties,
mock_temporary_chown):
dev_null = '/dev/null'
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'], 'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': dev_null}}
volume_file = mock.MagicMock()
mock_open.return_value.__enter__.return_value = volume_file
mock_get_connector_properties.return_value = properties
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled.side_effect = (False,
True)
backup_service = mock.MagicMock()
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
backup_service.restore.assert_called_with(backup, vol['id'],
volume_file)
self.assertEqual(1, backup_service.restore.call_count)
def test_get_backup_device_available(self):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID

View File

@ -1222,181 +1222,6 @@ class BaseVD(object):
return (device_to_backup, is_snapshot)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
# NOTE(xyang): _backup_volume_temp_snapshot and
# _backup_volume_temp_volume are splitted into two
# functions because there were concerns during code
# reviews that it is confusing to put all the logic
# into one function. There's a trade-off between
# reducing code duplication and increasing code
# readability here. Added a note here to explain why
# we've decided to have two separate functions as
# there will always be arguments from both sides.
if self.backup_use_temp_snapshot():
self._backup_volume_temp_snapshot(context, backup,
backup_service)
else:
self._backup_volume_temp_volume(context, backup,
backup_service)
def _backup_volume_temp_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume or snapshot.
To backup a snapshot, create a temp volume from the snapshot and
back it up.
Otherwise to backup an in-use volume, create a temp volume and
back it up.
"""
volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
LOG.debug('Creating a new backup for volume %s.', volume['name'])
temp_vol_ref = None
device_to_backup = volume
# NOTE(xyang): If it is to backup from snapshot, create a temp
# volume from the source snapshot, backup the temp volume, and
# then clean up the temp volume.
if snapshot:
temp_vol_ref = self._create_temp_volume_from_snapshot(
context, volume, snapshot)
backup.temp_volume_id = temp_vol_ref.id
backup.save()
device_to_backup = temp_vol_ref
else:
# NOTE(xyang): Check volume status if it is not to backup from
# snapshot; if 'in-use', create a temp volume from the source
# volume, backup the temp volume, and then clean up the temp
# volume; if 'available', just backup the volume.
previous_status = volume.get('previous_status')
if previous_status == "in-use":
temp_vol_ref = self._create_temp_cloned_volume(
context, volume)
backup.temp_volume_id = temp_vol_ref.id
backup.save()
device_to_backup = temp_vol_ref
self._backup_device(context, backup, backup_service, device_to_backup)
if temp_vol_ref:
self._delete_temp_volume(context, temp_vol_ref)
backup.temp_volume_id = None
backup.save()
def _backup_volume_temp_snapshot(self, context, backup, backup_service):
"""Create a new backup from an existing volume or snapshot.
If it is to backup from snapshot, back it up directly.
Otherwise for in-use volume, create a temp snapshot and back it up.
"""
volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
LOG.debug('Creating a new backup for volume %s.', volume['name'])
device_to_backup = volume
is_snapshot = False
temp_snapshot = None
# NOTE(xyang): If it is to backup from snapshot, back it up
# directly. No need to clean it up.
if snapshot:
device_to_backup = snapshot
is_snapshot = True
else:
# NOTE(xyang): If it is not to backup from snapshot, check volume
# status. If the volume status is 'in-use', create a temp snapshot
# from the source volume, backup the temp snapshot, and then clean
# up the temp snapshot; if the volume status is 'available', just
# backup the volume.
previous_status = volume.get('previous_status')
if previous_status == "in-use":
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
device_to_backup = temp_snapshot
is_snapshot = True
self._backup_device(context, backup, backup_service, device_to_backup,
is_snapshot)
if temp_snapshot:
self._delete_temp_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def _backup_device(self, context, backup, backup_service, device,
is_snapshot=False):
"""Create a new backup from a volume or snapshot."""
LOG.debug('Creating a new backup for %s.', device['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
if is_snapshot:
attach_info, device = self._attach_snapshot(context, device,
properties)
else:
attach_info, device = self._attach_volume(context, device,
properties)
try:
device_path = attach_info['device']['path']
# Secure network file systems will not chown files.
if self.secure_file_operations_enabled():
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
else:
with utils.temporary_chown(device_path):
with open(device_path) as device_file:
backup_service.backup(backup, device_file)
finally:
if is_snapshot:
self._detach_snapshot(context, attach_info, device, properties)
else:
self._detach_volume(context, attach_info, device, properties)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(('Restoring backup %(backup)s to '
'volume %(volume)s.'),
{'backup': backup['id'],
'volume': volume['name']})
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
attach_info, volume = self._attach_volume(context, volume, properties)
try:
volume_path = attach_info['device']['path']
# Secure network file systems will not chown files.
if self.secure_file_operations_enabled():
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
else:
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'],
volume_file)
finally:
self._detach_volume(context, attach_info, volume, properties)
def _create_temp_snapshot(self, context, volume):
kwargs = {
'volume_id': volume['id'],
@ -1475,18 +1300,6 @@ class BaseVD(object):
temp_vol_ref.save()
return temp_vol_ref
def _delete_temp_snapshot(self, context, snapshot):
self.delete_snapshot(snapshot)
with snapshot.obj_as_admin():
self.db.volume_glance_metadata_delete_by_snapshot(
context, snapshot.id)
snapshot.destroy()
def _delete_temp_volume(self, context, volume):
self.delete_volume(volume)
context = context.elevated()
self.db.volume_destroy(context, volume['id'])
def clear_download(self, context, volume):
"""Clean up after an interrupted image copy."""
pass

View File

@ -526,12 +526,6 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
"""
self.discard_zero_page(dest_vol)
def restore_backup(self, context, backup, volume, backup_service):
self.do_setup_status.wait()
super(HBSDFCDriver, self).restore_backup(context, backup,
volume, backup_service)
self.discard_zero_page(volume)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)

View File

@ -972,44 +972,6 @@ class GPFSDriver(driver.CloneableImageVD,
image_meta,
self.local_path(volume))
def _create_backup_source(self, volume, backup):
src_path = self._get_volume_path(volume)
dest_path = '%s_%s' % (src_path, backup['id'])
self._create_gpfs_clone(src_path, dest_path)
self._gpfs_redirect(src_path)
return dest_path
def _do_backup(self, backup_path, backup, backup_service):
with utils.temporary_chown(backup_path):
with open(backup_path) as backup_file:
backup_service.backup(backup, backup_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
backup_path = '%s_%s' % (volume_path, backup['id'])
# create a snapshot that will be used as the backup source
self._create_backup_source(volume, backup)
try:
LOG.debug('Begin backup of volume %s.', volume['name'])
self._do_backup(backup_path, backup, backup_service)
finally:
# clean up snapshot file. If it is a clone parent, delete
# will fail silently, but be cleaned up when volume is
# eventually removed. This ensures we do not accumulate
# more than gpfs_max_clone_depth snap files.
self._delete_gpfs_file(backup_path)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug('Begin restore of backup %s.', backup['id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""
LOG.debug('Migrate volume request %(vol)s to %(host)s.',
@ -1531,21 +1493,3 @@ class GPFSNFSDriver(GPFSDriver, nfs.NfsDriver, san.SanDriver):
volume['provider_location'] = self._find_share(volume['size'])
self._resize_volume_file(volume, volume['size'])
return {'provider_location': volume['provider_location']}
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
backup_path = '%s_%s' % (volume_path, backup['id'])
# create a snapshot that will be used as the backup source
backup_remote_path = self._create_backup_source(volume, backup)
try:
LOG.debug('Begin backup of volume %s.', volume['name'])
self._do_backup(backup_path, backup, backup_service)
finally:
# clean up snapshot file. If it is a clone parent, delete
# will fail silently, but be cleaned up when volume is
# eventually removed. This ensures we do not accumulate
# more than gpfs_max_clone_depth snap files.
backup_mount_path = os.path.dirname(backup_path)
self._delete_gpfs_file(backup_remote_path, backup_mount_path)

View File

@ -32,7 +32,6 @@ from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import objects
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
@ -534,49 +533,6 @@ class LVMVolumeDriver(driver.VolumeDriver):
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
temp_snapshot = None
# NOTE(xyang): If it is to backup from snapshot, back it up
# directly. No need to clean it up.
if snapshot:
volume_path = self.local_path(snapshot)
else:
# NOTE(xyang): If it is not to backup from snapshot, check volume
# status. If the volume status is 'in-use', create a temp snapshot
# from the source volume, backup the temp snapshot, and then clean
# up the temp snapshot; if the volume status is 'available', just
# backup the volume.
previous_status = volume.get('previous_status', None)
if previous_status == "in-use":
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_temp_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.

View File

@ -20,7 +20,6 @@ import os
import tempfile
from eventlet import tpool
from os_brick.initiator import linuxrbd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
@ -1179,34 +1178,6 @@ class RBDDriver(driver.CloneableImageVD,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
with RBDVolumeProxy(self, volume.name,
self.configuration.rbd_pool) as rbd_image:
rbd_meta = linuxrbd.RBDImageMetadata(
rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
with RBDVolumeProxy(self, volume.name,
self.configuration.rbd_pool) as rbd_image:
rbd_meta = linuxrbd.RBDImageMetadata(
rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
backup_service.restore(backup, volume.id, rbd_fd)
LOG.debug("volume restore complete.")
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume.size

View File

@ -638,42 +638,3 @@ class SheepdogDriver(driver.VolumeDriver):
self.client.resize(volume.name, new_size)
LOG.debug('Extend volume from %(old_size)s GB to %(new_size)s GB.',
{'old_size': volume.size, 'new_size': new_size})
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
src_volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot_name = 'tmp-snap-%s' % src_volume.name
# NOTE(tishizaki): If previous backup_volume operation has failed,
# a temporary snapshot for previous operation may exist.
# So, the old snapshot must be deleted before backup_volume.
# Sheepdog 0.9 or later 'delete_snapshot' operation
# is done successfully, although target snapshot does not exist.
# However, sheepdog 0.8 or before 'delete_snapshot' operation
# is failed, and raise ProcessExecutionError when target snapshot
# does not exist.
try:
self.client.delete_snapshot(src_volume.name, temp_snapshot_name)
except (exception.SheepdogCmdError):
pass
try:
self.client.create_snapshot(src_volume.name, temp_snapshot_name)
except (exception.SheepdogCmdError, OSError):
msg = (_('Failed to create a temporary snapshot for volume %s.')
% src_volume.id)
LOG.exception(msg)
raise exception.SheepdogError(reason=msg)
try:
sheepdog_fd = SheepdogIOWrapper(self.client.get_addr(), self.port,
src_volume, temp_snapshot_name)
backup_service.backup(backup, sheepdog_fd)
finally:
self.client.delete_snapshot(src_volume.name, temp_snapshot_name)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
sheepdog_fd = SheepdogIOWrapper(self.client.get_addr(),
self.port, volume)
backup_service.restore(backup, volume['id'], sheepdog_fd)

View File

@ -22,15 +22,11 @@ driver creates a virtual machine for each of the volumes. This virtual
machine is never powered on and is often referred as the shadow VM.
"""
import contextlib
import math
import os
import tempfile
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_utils import versionutils
@ -1484,247 +1480,6 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
"%(size)s GB.",
{'vol': vol_name, 'size': new_size})
@contextlib.contextmanager
def _temporary_file(self, *args, **kwargs):
"""Create a temporary file and return its path."""
tmp_dir = self.configuration.vmware_tmp_dir
fileutils.ensure_tree(tmp_dir)
fd, tmp = tempfile.mkstemp(
dir=self.configuration.vmware_tmp_dir, *args, **kwargs)
try:
os.close(fd)
yield tmp
finally:
fileutils.delete_if_exists(tmp)
def _download_vmdk(self, context, volume, backing, tmp_file_path):
"""Download virtual disk in streamOptimized format."""
timeout = self.configuration.vmware_image_transfer_timeout_secs
host_ip = self.configuration.vmware_host_ip
port = self.configuration.vmware_host_port
vmdk_ds_file_path = self.volumeops.get_vmdk_path(backing)
with open(tmp_file_path, "wb") as tmp_file:
image_transfer.copy_stream_optimized_disk(
context,
timeout,
tmp_file,
session=self.session,
host=host_ip,
port=port,
vm=backing,
vmdk_file_path=vmdk_ds_file_path,
vmdk_size=volume['size'] * units.Gi)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
LOG.debug("Creating backup: %(backup_id)s for volume: %(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
backing = self.volumeops.get_backing(volume['name'])
if backing is None:
LOG.debug("Creating backing for volume: %s.", volume['name'])
backing = self._create_backing(volume)
tmp_vmdk_name = uuidutils.generate_uuid()
with self._temporary_file(suffix=".vmdk",
prefix=tmp_vmdk_name) as tmp_file_path:
# TODO(vbala) Clean up vmware_tmp_dir during driver init.
LOG.debug("Using temporary file: %(tmp_path)s for creating backup:"
" %(backup_id)s.",
{'tmp_path': tmp_file_path,
'backup_id': backup['id']})
self._download_vmdk(context, volume, backing, tmp_file_path)
with open(tmp_file_path, "rb") as tmp_file:
LOG.debug("Calling backup service to backup file: %s.",
tmp_file_path)
backup_service.backup(backup, tmp_file)
LOG.debug("Created backup: %(backup_id)s for volume: "
"%(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
def _create_backing_from_stream_optimized_file(
self, context, name, volume, tmp_file_path, file_size_bytes):
"""Create backing from streamOptimized virtual disk file."""
LOG.debug("Creating backing: %(name)s from virtual disk: %(path)s.",
{'name': name,
'path': tmp_file_path})
(_host, rp, folder, summary) = self._select_ds_for_volume(volume)
LOG.debug("Selected datastore: %(ds)s for backing: %(name)s.",
{'ds': summary.name,
'name': name})
# Prepare import spec for backing.
cf = self.session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
profile_id = self._get_storage_profile_id(volume)
disk_type = VMwareVcVmdkDriver._get_disk_type(volume)
extra_config = self._get_extra_config(volume)
# We cannot determine the size of a virtual disk created from
# streamOptimized disk image. Set size to 0 and let vCenter
# figure out the size after virtual disk creation.
vm_create_spec = self.volumeops.get_create_spec(
name, 0, disk_type, summary.name, profileId=profile_id,
extra_config=extra_config)
vm_import_spec.configSpec = vm_create_spec
timeout = self.configuration.vmware_image_transfer_timeout_secs
host_ip = self.configuration.vmware_host_ip
port = self.configuration.vmware_host_port
try:
with open(tmp_file_path, "rb") as tmp_file:
vm_ref = image_transfer.download_stream_optimized_data(
context,
timeout,
tmp_file,
session=self.session,
host=host_ip,
port=port,
resource_pool=rp,
vm_folder=folder,
vm_import_spec=vm_import_spec,
image_size=file_size_bytes)
LOG.debug("Created backing: %(name)s from virtual disk: "
"%(path)s.",
{'name': name,
'path': tmp_file_path})
return vm_ref
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Error occurred while creating temporary "
"backing.")
backing = self.volumeops.get_backing(name)
if backing is not None:
self._delete_temp_backing(backing)
def _restore_backing(
self, context, volume, backing, tmp_file_path, backup_size):
"""Restore backing from backup."""
# Create temporary backing from streamOptimized file.
src_name = uuidutils.generate_uuid()
src = self._create_backing_from_stream_optimized_file(
context, src_name, volume, tmp_file_path, backup_size)
# Copy temporary backing for desired disk type conversion.
new_backing = (backing is None)
if new_backing:
# No backing exists; clone can be used as the volume backing.
dest_name = volume['name']
else:
# Backing exists; clone can be used as the volume backing only
# after deleting the current backing.
dest_name = uuidutils.generate_uuid()
dest = None
tmp_backing_name = None
renamed = False
try:
# Find datastore for clone.
(host, rp, folder, summary) = self._select_ds_for_volume(volume)
datastore = summary.datastore
disk_type = VMwareVcVmdkDriver._get_disk_type(volume)
dest = self.volumeops.clone_backing(dest_name, src, None,
volumeops.FULL_CLONE_TYPE,
datastore, disk_type=disk_type,
host=host, resource_pool=rp,
folder=folder)
self.volumeops.update_backing_disk_uuid(dest, volume['id'])
if new_backing:
LOG.debug("Created new backing: %s for restoring backup.",
dest_name)
return
# Rename current backing.
tmp_backing_name = uuidutils.generate_uuid()
self.volumeops.rename_backing(backing, tmp_backing_name)
renamed = True
# Rename clone in order to treat it as the volume backing.
self.volumeops.rename_backing(dest, volume['name'])
# Now we can delete the old backing.
self._delete_temp_backing(backing)
LOG.debug("Deleted old backing and renamed clone for restoring "
"backup.")
except (exceptions.VimException, exceptions.VMwareDriverException):
with excutils.save_and_reraise_exception():
if dest is not None:
# Copy happened; we need to delete the clone.
self._delete_temp_backing(dest)
if renamed:
# Old backing was renamed; we need to undo that.
try:
self.volumeops.rename_backing(backing,
volume['name'])
except exceptions.VimException:
LOG.warning("Cannot undo volume rename; old "
"name was %(old_name)s and new "
"name is %(new_name)s.",
{'old_name': volume['name'],
'new_name': tmp_backing_name},
exc_info=True)
finally:
# Delete the temporary backing.
self._delete_temp_backing(src)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume.
This method raises InvalidVolume if the existing volume contains
snapshots since it is not possible to restore the virtual disk of
a backing with snapshots.
"""
LOG.debug("Restoring backup: %(backup_id)s to volume: %(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
backing = self.volumeops.get_backing(volume['name'])
if backing is not None and self.volumeops.snapshot_exists(backing):
msg = _("Volume cannot be restored since it contains snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
tmp_vmdk_name = uuidutils.generate_uuid()
with self._temporary_file(suffix=".vmdk",
prefix=tmp_vmdk_name) as tmp_file_path:
LOG.debug("Using temporary file: %(tmp_path)s for restoring "
"backup: %(backup_id)s.",
{'tmp_path': tmp_file_path,
'backup_id': backup['id']})
with open(tmp_file_path, "wb") as tmp_file:
LOG.debug("Calling backup service to restore backup: "
"%(backup_id)s to file: %(tmp_path)s.",
{'backup_id': backup['id'],
'tmp_path': tmp_file_path})
backup_service.restore(backup, volume['id'], tmp_file)
LOG.debug("Backup: %(backup_id)s restored to file: "
"%(tmp_path)s.",
{'backup_id': backup['id'],
'tmp_path': tmp_file_path})
self._restore_backing(context, volume, backing, tmp_file_path,
backup['size'] * units.Gi)
if backup['size'] < volume['size']:
# Current backing size is backup size.
LOG.debug("Backup size: %(backup_size)d is less than "
"volume size: %(vol_size)d; extending volume.",
{'backup_size': backup['size'],
'vol_size': volume['size']})
self.extend_volume(volume, volume['size'])
LOG.debug("Backup: %(backup_id)s restored to volume: "
"%(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
def _get_disk_device(self, vmdk_path, vm_inv_path):
# Get the VM that corresponds to the given inventory path.
vm = self.volumeops.get_entity_by_inventory_path(vm_inv_path)