Merge "Add support to incremental backups in cinder"

This commit is contained in:
Jenkins
2015-03-06 19:48:11 +00:00
committed by Gerrit Code Review
16 changed files with 976 additions and 44 deletions

View File

@@ -1,4 +1,6 @@
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -38,6 +40,7 @@ def make_backup(elem):
elem.set('status')
elem.set('size')
elem.set('container')
elem.set('parent_id')
elem.set('volume_id')
elem.set('object_count')
elem.set('availability_zone')
@@ -106,7 +109,8 @@ class CreateDeserializer(wsgi.MetadataXMLDeserializer):
backup_node = self.find_first_child_named(node, 'backup')
attributes = ['container', 'display_name',
'display_description', 'volume_id']
'display_description', 'volume_id',
'parent_id']
for attr in attributes:
if backup_node.getAttribute(attr):
@@ -248,6 +252,7 @@ class BackupsController(wsgi.Controller):
container = backup.get('container', None)
name = backup.get('name', None)
description = backup.get('description', None)
incremental = backup.get('incremental', False)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
@@ -256,7 +261,8 @@ class BackupsController(wsgi.Controller):
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container)
volume_id, container,
incremental)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error:

View File

@@ -1,4 +1,6 @@
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -69,6 +71,13 @@ class API(base.Base):
msg = _('Backup status must be available or error')
raise exception.InvalidBackup(reason=msg)
# Don't allow backup to be deleted if there are incremental
# backups dependent on it.
deltas = self.get_all(context, {'parent_id': backup['id']})
if deltas and len(deltas):
msg = _('Incremental backups exist for this backup.')
raise exception.InvalidBackup(reason=msg)
self.db.backup_update(context, backup_id, {'status': 'deleting'})
self.backup_rpcapi.delete_backup(context,
backup['host'],
@@ -112,13 +121,15 @@ class API(base.Base):
return [srv['host'] for srv in services if not srv['disabled']]
def create(self, context, name, description, volume_id,
container, availability_zone=None):
container, incremental=False, availability_zone=None):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create')
volume = self.volume_api.get(context, volume_id)
if volume['status'] != "available":
msg = _('Volume to be backed up must be available')
raise exception.InvalidVolume(reason=msg)
volume_host = volume_utils.extract_host(volume['host'], 'host')
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
@@ -160,6 +171,26 @@ class API(base.Base):
raise exception.BackupLimitExceeded(
allowed=quotas[over])
# Find the latest backup of the volume and use it as the parent
# backup to do an incremental backup.
latest_backup = None
if incremental:
backups = self.db.backup_get_all_by_volume(context.elevated(),
volume_id)
if backups:
latest_backup = max(backups, key=lambda x: x['created_at'])
else:
msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg)
parent_id = None
if latest_backup:
parent_id = latest_backup['id']
if latest_backup['status'] != "available":
msg = _('The parent backup must be available for '
'incremental backup.')
raise exception.InvalidBackup(reason=msg)
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
@@ -168,6 +199,7 @@ class API(base.Base):
'volume_id': volume_id,
'status': 'creating',
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'host': volume_host, }
try:

View File

@@ -1,4 +1,6 @@
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -87,6 +89,11 @@ swiftbackup_service_opts = [
cfg.IntOpt('backup_swift_object_size',
default=52428800,
help='The size in bytes of Swift backup objects'),
cfg.IntOpt('backup_swift_block_size',
default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_swift_object_size '
'has to be multiple of backup_swift_block_size.'),
cfg.IntOpt('backup_swift_retry_attempts',
default=3,
help='The number of retries to make for Swift operations'),
@@ -225,6 +232,11 @@ class SwiftBackupDriver(driver.BackupDriver):
filename = '%s_metadata' % swift_object_name
return filename
def _sha256_filename(self, backup):
swift_object_name = backup['service_metadata']
filename = '%s_sha256file' % swift_object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta):
filename = self._metadata_filename(backup)
@@ -239,6 +251,7 @@ class SwiftBackupDriver(driver.BackupDriver):
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
reader = six.StringIO(metadata_json)
@@ -253,17 +266,55 @@ class SwiftBackupDriver(driver.BackupDriver):
raise exception.InvalidBackup(reason=err)
LOG.debug('_write_metadata finished')
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = six.text_type(backup['created_at'])
sha256file['chunk_size'] = CONF.backup_swift_block_size
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
reader = six.StringIO(sha256file_json)
etag = self.conn.put_object(container, filename, reader,
content_length=reader.len)
md5 = hashlib.md5(sha256file_json).hexdigest()
if etag != md5:
err = (_('Error writing sha256file file to swift. MD5 of metadata'
' file in swift [%(etag)s] is not the same as MD5 of '
'sha256file file sent to swift [%(md5)s].')
% {'etag': etag, 'md5': md5})
raise exception.InvalidBackup(reason=err)
LOG.debug('_write_sha256file finished')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s' %
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
(_resp, body) = self.conn.get_object(container, filename)
metadata = json.loads(body)
LOG.debug('_read_metadata finished (%s)' % metadata)
LOG.debug('_read_metadata finished (%s).', metadata)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
(resp, body) = self.conn.get_object(container, filename)
sha256file = json.loads(body)
LOG.debug('_read_sha256file finished (%s).', sha256file)
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
backup_id = backup['id']
@@ -297,12 +348,16 @@ class SwiftBackupDriver(driver.BackupDriver):
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
return object_meta, container, volume_size_bytes
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
return object_meta, object_sha256, container, volume_size_bytes
def _backup_chunk(self, backup, container, data, data_offset, object_meta):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
@@ -350,14 +405,26 @@ class SwiftBackupDriver(driver.BackupDriver):
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _finalize_backup(self, backup, container, object_meta):
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Finalize the backup by updating its metadata on Swift."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
try:
self._write_sha256file(backup,
backup['volume_id'],
container,
sha256_list)
except socket.error as err:
msg = _("Exception: %s") % err
LOG.error(msg)
raise exception.SwiftConnectionFailed(reason=msg)
try:
self._write_metadata(backup,
backup['volume_id'],
@@ -365,7 +432,10 @@ class SwiftBackupDriver(driver.BackupDriver):
object_list,
volume_meta)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
msg = _("Exception: %s") % err
LOG.error(msg)
raise exception.SwiftConnectionFailed(reason=msg)
self.db.backup_update(self.context, backup['id'],
{'object_count': object_id})
LOG.debug('backup %s finished.' % backup['id'])
@@ -404,9 +474,43 @@ class SwiftBackupDriver(driver.BackupDriver):
object_meta)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume to Swift."""
(object_meta, container,
volume_size_bytes) = self._prepare_backup(backup)
"""Backup the given volume to Swift.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.data_block_size_bytes % CONF.backup_swift_block_size:
err = _('Swift object size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup['parent_id']:
parent_backup = self.db.backup_get(self.context,
backup['parent_id'])
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
CONF.backup_swift_block_size):
err = (_('Swift block size has changed since the last '
'backup. New block size: %(new)s. Old block '
'size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': CONF.backup_swift_block_size})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup['size'] > parent_backup['size']:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
(object_meta, object_sha256, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
@@ -425,13 +529,62 @@ class SwiftBackupDriver(driver.BackupDriver):
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
while True:
data = volume_file.read(self.data_block_size_bytes)
data_offset = volume_file.tell()
data = volume_file.read(self.data_block_size_bytes)
if data == '':
break
self._backup_chunk(backup, container, data,
data_offset, object_meta)
# Calculate new shas with the datablock.
shalist = []
off = 0
datalen = len(data)
while off < datalen:
chunk_start = off
chunk_end = chunk_start + CONF.backup_swift_block_size
if chunk_end > datalen:
chunk_end = datalen
chunk = data[chunk_start:chunk_end]
sha = hashlib.sha256(chunk).hexdigest()
shalist.append(sha)
off += CONF.backup_swift_block_size
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * CONF.backup_swift_block_size
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * CONF.backup_swift_block_size
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = datalen
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off, object_meta)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data,
data_offset, object_meta)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
@@ -442,7 +595,7 @@ class SwiftBackupDriver(driver.BackupDriver):
object_meta,
total_block_sent_num,
volume_size_bytes)
# reset the counter
# Reset the counter
counter = 0
# Stop the timer.
@@ -450,17 +603,18 @@ class SwiftBackupDriver(driver.BackupDriver):
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(
_LE("Backup volume metadata to swift failed: %s") %
six.text_type(err))
_LE("Backup volume metadata to swift failed: %s."),
err)
self.delete(backup)
self._finalize_backup(backup, container, object_meta)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 swift volume backup from swift."""
@@ -471,7 +625,8 @@ class SwiftBackupDriver(driver.BackupDriver):
metadata_object_names = sum((obj.keys() for obj in metadata_objects),
[])
LOG.debug('metadata_object_names = %s' % metadata_object_names)
prune_list = [self._metadata_filename(backup)]
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
swift_object_names = [swift_object_name for swift_object_name in
self._generate_object_names(backup)
if swift_object_name not in prune_list]
@@ -497,6 +652,7 @@ class SwiftBackupDriver(driver.BackupDriver):
raise exception.SwiftConnectionFailed(reason=err)
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(metadata_object.values()[0]['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm' %
compression_algorithm)
@@ -552,18 +708,37 @@ class SwiftBackupDriver(driver.BackupDriver):
err = (_('No support to restore swift backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
restore_func(backup, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
LOG.error(msg)
raise exception.BackupOperationError(msg)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup['parent_id']:
prev_backup = (self.db.backup_get(
self.context, current_backup['parent_id']))
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.' %
{'backup_id': backup_id, 'volume_id': volume_id})

View File

@@ -775,6 +775,12 @@ def backup_get_all_by_project(context, project_id, filters=None):
filters=filters)
def backup_get_all_by_volume(context, volume_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id,
filters=filters)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.

View File

@@ -3008,6 +3008,20 @@ def backup_get_all_by_project(context, project_id, filters=None):
return _backup_get_all(context, filters)
@require_context
def backup_get_all_by_volume(context, volume_id, filters=None):
authorize_project_context(context, volume_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['volume_id'] = volume_id
return _backup_get_all(context, filters)
@require_context
def backup_create(context, values):
backup = models.Backup()

View File

@@ -0,0 +1,51 @@
# Copyright 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, String, Table
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
parent_id = Column('parent_id', String(length=36))
try:
backups.create_column(parent_id)
backups.update().values(parent_id=None).execute()
except Exception:
LOG.error(_LE("Adding parent_id column to backups table failed."))
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
parent_id = backups.columns.parent_id
try:
backups.drop_column(parent_id)
except Exception:
LOG.error(_LE("Dropping parent_id column from backups table failed."))
raise

View File

@@ -496,6 +496,7 @@ class Backup(BASE, CinderBase):
display_name = Column(String(255))
display_description = Column(String(255))
container = Column(String(255))
parent_id = Column(String(36))
status = Column(String(255))
fail_reason = Column(String(255))
service_metadata = Column(String(255))

View File

@@ -58,6 +58,9 @@ class BackupsAPITestCase(test.TestCase):
display_description='this is a test backup',
container='volumebackups',
status='creating',
snapshot=False,
incremental=False,
parent_id=None,
size=0, object_count=0, host='testhost'):
"""Create a backup object."""
backup = {}
@@ -73,6 +76,9 @@ class BackupsAPITestCase(test.TestCase):
backup['fail_reason'] = ''
backup['size'] = size
backup['object_count'] = object_count
backup['snapshot'] = snapshot
backup['incremental'] = incremental
backup['parent_id'] = parent_id
return db.backup_create(context.get_admin_context(), backup)['id']
@staticmethod
@@ -406,6 +412,36 @@ class BackupsAPITestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_snapshot_json(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5,
status='available')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(res.status_int, 202)
self.assertIn('id', res_dict['backup'])
self.assertTrue(_mock_service_get_all_by_topic.called)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_xml(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
@@ -431,6 +467,72 @@ class BackupsAPITestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_delta(self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id, status="available")
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['backup'])
self.assertTrue(_mock_service_get_all_by_topic.called)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_incremental_backup_invalid_status(
self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id)
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: The parent backup must be '
'available for incremental backup.',
res_dict['badRequest']['message'])
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_backup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/backups')
@@ -511,6 +613,30 @@ class BackupsAPITestCase(test.TestCase):
'Invalid volume: Volume to be backed up must'
' be available')
def test_create_backup_with_InvalidVolume2(self):
# need to create the volume referenced below first
volume_id = utils.create_volume(self.context, size=5,
status='in-use')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be backed up must'
' be available')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_backup_WithOUT_enabled_backup_service(
self,
@@ -542,6 +668,39 @@ class BackupsAPITestCase(test.TestCase):
volume = self.volume_api.get(context.get_admin_context(), volume_id)
self.assertEqual(volume['status'], 'available')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_create_incremental_backup_invalid_no_full(
self, _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5,
status='available')['id']
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
"incremental": True,
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: No backups available to do '
'an incremental backup.',
res_dict['badRequest']['message'])
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic')
def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic):
@@ -623,6 +782,23 @@ class BackupsAPITestCase(test.TestCase):
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_delta_backup(self):
backup_id = self._create_backup(status='available')
delta_id = self._create_backup(status='available',
incremental=True)
req = webob.Request.blank('/v2/fake/backups/%s' %
delta_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
self.assertEqual('deleting',
self._get_backup_attrib(delta_id, 'status'))
db.backup_destroy(context.get_admin_context(), delta_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_error(self):
backup_id = self._create_backup(status='error')
req = webob.Request.blank('/v2/fake/backups/%s' %
@@ -666,6 +842,28 @@ class BackupsAPITestCase(test.TestCase):
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_with_InvalidBackup2(self):
volume_id = utils.create_volume(self.context, size=5)['id']
backup_id = self._create_backup(volume_id, status="available")
delta_backup_id = self._create_backup(status='available',
incremental=True,
parent_id=backup_id)
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual('Invalid backup: Incremental backups '
'exist for this backup.',
res_dict['badRequest']['message'])
db.backup_destroy(context.get_admin_context(), delta_backup_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_volume_id_specified_json(self):
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first

View File

@@ -87,9 +87,12 @@ class FakeSwiftConnection(object):
metadata['backup_description'] = 'fake backup description'
metadata['created_at'] = '2013-02-19 11:20:54,805'
metadata['objects'] = [{
'backup_001': {'compression': 'zlib', 'length': 10},
'backup_002': {'compression': 'zlib', 'length': 10},
'backup_003': {'compression': 'zlib', 'length': 10}
'backup_001': {'compression': 'zlib', 'length': 10,
'offset': 0},
'backup_002': {'compression': 'zlib', 'length': 10,
'offset': 10},
'backup_003': {'compression': 'zlib', 'length': 10,
'offset': 20}
}]
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
fake_object_body = metadata_json

View File

@@ -0,0 +1,106 @@
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2014 TrilioData, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import httplib
import os
import socket
import tempfile
from swiftclient import client as swift
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class FakeSwiftClient2(object):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
@classmethod
def Connection(self, *args, **kargs):
LOG.debug("fake FakeSwiftClient Connection")
return FakeSwiftConnection2()
class FakeSwiftConnection2(object):
"""Logging calls instead of executing."""
def __init__(self, *args, **kwargs):
self.tempdir = tempfile.mkdtemp()
def head_container(self, container):
LOG.debug("fake head_container(%s)", container)
if container == 'missing_container':
raise swift.ClientException('fake exception',
http_status=httplib.NOT_FOUND)
elif container == 'unauthorized_container':
raise swift.ClientException('fake exception',
http_status=httplib.UNAUTHORIZED)
elif container == 'socket_error_on_head':
raise socket.error(111, 'ECONNREFUSED')
def put_container(self, container):
LOG.debug("fake put_container(%s)", container)
def get_container(self, container, **kwargs):
LOG.debug("fake get_container %(container)s.",
{'container': container})
fake_header = None
container_dir = tempfile.gettempdir() + '/' + container
fake_body = []
for f in os.listdir(container_dir):
try:
f.index(kwargs['prefix'])
fake_body.append({'name': f})
except Exception:
pass
return fake_header, fake_body
def head_object(self, container, name):
LOG.debug("fake head_object %(container)s, %(name)s.",
{'container': container,
'name': name})
return {'etag': 'fake-md5-sum'}
def get_object(self, container, name):
LOG.debug("fake get_object %(container)s, %(name)s.",
{'container': container,
'name': name})
if container == 'socket_error_on_get':
raise socket.error(111, 'ECONNREFUSED')
object_path = tempfile.gettempdir() + '/' + container + '/' + name
with fileutils.file_open(object_path, 'rb') as object_file:
return (None, object_file.read())
def put_object(self, container, name, reader, content_length=None,
etag=None, chunk_size=None, content_type=None,
headers=None, query_string=None):
LOG.debug("fake put_object %(container)s, %(name)s.",
{'container': container,
'name': name})
object_path = tempfile.gettempdir() + '/' + container + '/' + name
with fileutils.file_open(object_path, 'wb') as object_file:
object_file.write(reader.read())
return hashlib.md5(reader.read()).hexdigest()
def delete_object(self, container, name):
LOG.debug("fake delete_object %(container)s, %(name)s.",
{'container': container,
'name': name})

View File

@@ -80,6 +80,8 @@ class BaseBackupTest(test.TestCase):
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = CONF.backup_driver
backup['snapshot'] = False
backup['parent_id'] = None
backup['size'] = size
backup['object_count'] = object_count
return db.backup_create(self.ctxt, backup)['id']

View File

@@ -18,8 +18,10 @@ Tests for Backup swift code.
"""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import zlib
@@ -35,6 +37,7 @@ from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup import fake_swift_client
from cinder.tests.backup import fake_swift_client2
LOG = logging.getLogger(__name__)
@@ -60,11 +63,13 @@ class BackupSwiftTestCase(test.TestCase):
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, container='test-container'):
backup = {'id': 123,
def _create_backup_db_entry(self, container='test-container',
backup_id=123, parent_id=None):
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': '1234-5678-1234-8888'}
'volume_id': '1234-5678-1234-8888',
'parent_id': parent_id}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
@@ -81,7 +86,10 @@ class BackupSwiftTestCase(test.TestCase):
self._create_volume_db_entry()
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
for _i in xrange(0, 128):
self.volume_file.write(os.urandom(1024))
@@ -200,6 +208,191 @@ class BackupSwiftTestCase(test.TestCase):
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
def _fake_generate_swift_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s', prefix)
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(swift_dr.SwiftBackupDriver,
'_generate_swift_object_name_prefix',
_fake_generate_swift_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(128 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
def _fake_generate_swift_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s', prefix)
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(swift_dr.SwiftBackupDriver,
'_generate_swift_object_name_prefix',
_fake_generate_swift_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
def _fake_generate_swift_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s', prefix)
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(swift_dr.SwiftBackupDriver,
'_generate_swift_object_name_prefix',
_fake_generate_swift_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(2 * 8 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(4 * 8 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 32
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32])
def test_backup_delta_two_blocks_in_object_change(self):
def _fake_generate_swift_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s', prefix)
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(swift_dr.SwiftBackupDriver,
'_generate_swift_object_name_prefix',
_fake_generate_swift_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_create_backup_put_object_wraps_socket_error(self):
container_name = 'socket_error_on_put'
self._create_backup_db_entry(container=container_name)
@@ -274,6 +467,54 @@ class BackupSwiftTestCase(test.TestCase):
backup = db.backup_get(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', volume_file)
def test_restore_delta(self):
def _fake_generate_swift_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s', prefix)
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(swift_dr.SwiftBackupDriver,
'_generate_swift_object_name_prefix',
_fake_generate_swift_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
self.stubs.Set(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True)
deltabackup = db.backup_get(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888',
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_wraps_socket_error(self):
container_name = 'socket_error_on_get'
self._create_backup_db_entry(container=container_name)

View File

@@ -1347,12 +1347,15 @@ class DBAPIBackupTestCase(BaseTest):
'fail_reason': 'test',
'service_metadata': 'metadata',
'service': 'service',
'parent_id': "parent_id",
'size': 1000,
'object_count': 100}
if one:
return base_values
def compose(val, step):
if isinstance(val, bool):
return val
if isinstance(val, str):
step = str(step)
return val + step

View File

@@ -763,6 +763,15 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
"driver_initiator_data")
self.assertFalse(has_table)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
sqlalchemy.types.VARCHAR)
def _post_downgrade_039(self, engine):
backups = db_utils.get_table(engine, 'backups')
self.assertNotIn('parent_id', backups.c)
def test_walk_versions(self):
self.walk_versions(True, False)

View File

@@ -158,7 +158,9 @@ class QuotaIntegrationTestCase(test.TestCase):
'name',
'description',
vol_ref['id'],
'container')
'container',
False,
None)
db.backup_destroy(self.context, backup_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
@@ -198,7 +200,8 @@ class QuotaIntegrationTestCase(test.TestCase):
name='name',
description='description',
volume_id=vol_ref['id'],
container='container')
container='container',
incremental=False)
db.backup_destroy(self.context, backup_ref['id'])
db.volume_destroy(self.context, vol_ref['id'])
@@ -239,7 +242,9 @@ class QuotaIntegrationTestCase(test.TestCase):
'name',
'description',
vol_ref['id'],
'container')
'container',
False,
None)
# Make sure the backup volume_size isn't included in usage.
vol_ref2 = volume.API().create(self.context, 10, '', '')

View File

@@ -78,6 +78,10 @@ CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
PLATFORM = sys.platform
fake_opt = [
cfg.StrOpt('fake_opt1', default='fake', help='fake opts')
]
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
@@ -4649,14 +4653,13 @@ class LVMVolumeDriverTestCase(DriverTestCase):
def test_delete_volume_invalid_parameter(self, _mock_create_export):
self.configuration.volume_clear = 'zero'
self.configuration.volume_clear_size = 0
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration)
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
# Test volume without 'size' field and 'volume_size' field
self.assertRaises(exception.InvalidParameterValue,
lvm_driver._delete_volume,
@@ -4669,7 +4672,8 @@ class LVMVolumeDriverTestCase(DriverTestCase):
self.configuration.volume_type = 'default'
volume = dict(self.FAKE_VOLUME, size=1)
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration)
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(False)
@@ -4685,7 +4689,8 @@ class LVMVolumeDriverTestCase(DriverTestCase):
self.configuration.lvm_type = 'thin'
self.configuration.iscsi_helper = 'tgtadm'
lvm_driver = lvm.LVMISCSIDriver(configuration=self.configuration,
vg_obj=mox.MockAnything())
vg_obj=mox.MockAnything(),
db=db)
# Ensures that copy_volume is not called for ThinLVM
self.mox.StubOutWithMock(volutils, 'copy_volume')
@@ -4701,6 +4706,81 @@ class LVMVolumeDriverTestCase(DriverTestCase):
lvm_driver._delete_volume(fake_snapshot, is_snapshot=True)
def test_check_for_setup_error(self):
def get_all_volume_groups(vg):
return [{'name': 'cinder-volumes'}]
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
configuration = conf.Configuration(fake_opt, 'fake_group')
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration,
vg_obj=vg_obj, db=db)
lvm_driver.delete_snapshot = mock.Mock()
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
volume = tests_utils.create_volume(self.context,
host=socket.gethostname())
volume_id = volume['id']
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = 'fake'
backup['host'] = socket.gethostname()
backup['availability_zone'] = '1'
backup['display_name'] = 'test_check_for_setup_error'
backup['display_description'] = 'test_check_for_setup_error'
backup['container'] = 'fake'
backup['status'] = 'creating'
backup['fail_reason'] = ''
backup['service'] = 'fake'
backup['parent_id'] = None
backup['size'] = 5 * 1024 * 1024
backup['object_count'] = 22
db.backup_create(self.context, backup)['id']
lvm_driver.check_for_setup_error()
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(fileutils, 'file_open')
@mock.patch.object(cinder.brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db, 'volume_get')
def test_backup_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id']}
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = file('/dev/null')
backup_service.backup(backup, f, None)
self.volume.driver._attach_volume.return_value = attach_info
self.volume.driver.backup_volume(self.context, backup,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""