Fix earlier backup records can't be restored

We upgraded the backup service attribute from
module name to class name. This results in
the failure of restoring earlier backups
with new code. This patch upgrades the older
backup records as well as adds compatibility
in code.

Change-Id: I0374ab9f2d37ecf8d8bce191c7535601266d432e
Closes-Bug: #1727912
This commit is contained in:
TommyLike 2017-10-27 09:28:04 +08:00
parent 04fe6f10a6
commit 7bae5759c8
8 changed files with 99 additions and 12 deletions

View File

@ -490,7 +490,11 @@ class BackupManager(manager.ThreadPoolManager):
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if backup_service not in configured_service:
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
@ -574,7 +578,11 @@ class BackupManager(manager.ThreadPoolManager):
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if backup_service not in configured_service:
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
@ -658,7 +666,11 @@ class BackupManager(manager.ThreadPoolManager):
backup_record = {'backup_service': backup.service}
backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if backup_service not in configured_service:
err = (_('Export record aborted, the backup service currently '
'configured [%(configured_service)s] is not the '
'backup service that was used to create this '
@ -815,7 +827,11 @@ class BackupManager(manager.ThreadPoolManager):
LOG.info('Backup service: %s.', backup_service_name)
if backup_service_name is not None:
configured_service = self.driver_name
if backup_service_name != configured_service:
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if backup_service_name not in configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'

View File

@ -208,6 +208,7 @@ class DbCommands(object):
online_migrations = (
# Added in Queens
db.service_uuids_online_data_migration,
db.backup_service_online_migration,
)
def __init__(self):

View File

@ -97,6 +97,10 @@ def service_uuids_online_data_migration(context, max_count):
return IMPL.service_uuids_online_data_migration(context, max_count)
def backup_service_online_migration(context, max_count):
return IMPL.backup_service_online_migration(context, max_count)
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)

View File

@ -598,6 +598,40 @@ def service_uuids_online_data_migration(context, max_count):
return total, updated
@require_admin_context
def backup_service_online_migration(context, max_count):
name_rules = {'cinder.backup.drivers.swift':
'cinder.backup.drivers.swift.SwiftBackupDriver',
'cinder.backup.drivers.ceph':
'cinder.backup.drivers.ceph.CephBackupDriver',
'cinder.backup.drivers.glusterfs':
'cinder.backup.drivers.glusterfs.GlusterfsBackupDriver',
'cinder.backup.drivers.google':
'cinder.backup.drivers.google.GoogleBackupDriver',
'cinder.backup.drivers.nfs':
'cinder.backup.drivers.nfs.NFSBackupDriver',
'cinder.backup.drivers.tsm':
'cinder.backup.drivers.tsm.TSMBackupDriver',
'cinder.backup.drivers.posix':
'cinder.backup.drivers.posix.PosixBackupDriver'}
total = 0
updated = 0
session = get_session()
with session.begin():
total = model_query(
context, models.Backup, session=session).filter(
models.Backup.service.in_(name_rules.keys())).count()
backups = (model_query(
context, models.Backup, session=session).filter(
models.Backup.service.in_(
name_rules.keys())).limit(max_count)).all()
if len(backups):
for backup in backups:
updated += 1
backup.service = name_rules[backup.service]
return total, updated
###################

View File

@ -17,8 +17,8 @@ from cinder.backup import driver
class FakeBackupService(driver.BackupDriver):
def __init__(self, context, db_driver=None):
super(FakeBackupService, self).__init__(context, db_driver)
def __init__(self, context, db=None):
super(FakeBackupService, self).__init__(context, db)
def backup(self, backup, volume_file):
pass

View File

@ -1122,11 +1122,14 @@ class BackupTestCase(BaseBackupTest):
backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self):
@ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService',
'cinder.tests.unit.backup.fake_service')
def test_delete_backup(self, service):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id)
status=fields.BackupStatus.DELETING, volume_id=vol_id,
service=service)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
@ -1242,16 +1245,19 @@ class BackupTestCase(BaseBackupTest):
self.ctxt,
backup)
def test_export_record(self):
@ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService',
'cinder.tests.unit.backup.fake_service')
def test_export_record(self, service):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id,
service=service)
export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(CONF.backup_driver, export['backup_service'])
self.assertEqual(service, export['backup_service'])
self.assertIn('backup_url', export)
def test_import_record_with_verify_not_implemented(self):

View File

@ -43,7 +43,8 @@ def set_defaults(conf):
conf.set_default('sqlite_synchronous', False, group='database')
conf.set_default('policy_file', 'cinder.tests.unit/policy.json',
group='oslo_policy')
conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service')
conf.set_default('backup_driver',
'cinder.tests.unit.backup.fake_service.FakeBackupService')
conf.set_default('backend',
'castellan.tests.unit.key_manager.mock_key_manager.'
'MockKeyManager',

View File

@ -210,6 +210,31 @@ class DBAPIServiceTestCase(BaseTest):
self.assertEqual(1, total)
self.assertEqual(1, updated)
@ddt.data({'count': 5, 'total': 3, 'updated': 3},
{'count': 2, 'total': 3, 'updated': 2})
@ddt.unpack
def test_backup_service_online_migration(self, count, total, updated):
volume = utils.create_volume(self.ctxt)
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.swift',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.ceph',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.glusterfs',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.fake_backup_service',
'volume_id': volume.id
})
t, u = db.backup_service_online_migration(self.ctxt, count)
self.assertEqual(total, t)
self.assertEqual(updated, u)
def test_service_create(self):
# Add a cluster value to the service
values = {'cluster_name': 'cluster'}