Merge "Enhance deletion efficiency when backup init host"
This commit is contained in:
commit
963d3a9d48
@ -57,7 +57,11 @@ LOG = logging.getLogger(__name__)
|
||||
backup_manager_opts = [
|
||||
cfg.StrOpt('backup_driver',
|
||||
default='cinder.backup.drivers.swift',
|
||||
help='Driver to use for backups.',)
|
||||
help='Driver to use for backups.',),
|
||||
cfg.BoolOpt('backup_service_inithost_offload',
|
||||
default=False,
|
||||
help='Offload pending backup delete during '
|
||||
'backup service startup.',),
|
||||
]
|
||||
|
||||
# This map doesn't need to be extended in the future since it's only
|
||||
@ -233,7 +237,14 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
backup.save()
|
||||
if backup['status'] == 'deleting':
|
||||
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
|
||||
self.delete_backup(ctxt, backup)
|
||||
if CONF.backup_service_inithost_offload:
|
||||
# Offload all the pending backup delete operations to the
|
||||
# threadpool to prevent the main backup service thread
|
||||
# from being blocked.
|
||||
self._add_to_threadpool(self.delete_backup, ctxt, backup)
|
||||
else:
|
||||
# By default, delete backups sequentially
|
||||
self.delete_backup(ctxt, backup)
|
||||
|
||||
self._cleanup_temp_volumes_snapshots(backups)
|
||||
|
||||
|
@ -61,6 +61,8 @@ from cinder.db import base
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder import version
|
||||
|
||||
from eventlet import greenpool
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -140,6 +142,7 @@ class SchedulerDependentManager(Manager):
|
||||
self.last_capabilities = None
|
||||
self.service_name = service_name
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
self._tp = greenpool.GreenPool()
|
||||
super(SchedulerDependentManager, self).__init__(host, db_driver)
|
||||
|
||||
def update_service_capabilities(self, capabilities):
|
||||
@ -156,3 +159,6 @@ class SchedulerDependentManager(Manager):
|
||||
self.service_name,
|
||||
self.host,
|
||||
self.last_capabilities)
|
||||
|
||||
def _add_to_threadpool(self, func, *args, **kwargs):
|
||||
self._tp.spawn_n(func, *args, **kwargs)
|
||||
|
@ -240,6 +240,28 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertTrue(mock_delete_volume.called)
|
||||
self.assertTrue(mock_delete_snapshot.called)
|
||||
|
||||
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
|
||||
@mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
|
||||
def test_init_host_with_service_inithost_offload(self,
|
||||
mock_add_threadpool,
|
||||
mock_get_all_by_host):
|
||||
self.override_config('backup_service_inithost_offload', True)
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
|
||||
backup1 = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol1_id)
|
||||
|
||||
vol2_id = self._create_volume_db_entry()
|
||||
db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
|
||||
backup2 = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol2_id)
|
||||
mock_get_all_by_host.return_value = [backup1, backup2]
|
||||
self.backup_mgr.init_host()
|
||||
calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
|
||||
mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
|
||||
mock_add_threadpool.assert_has_calls(calls, any_order=True)
|
||||
self.assertEqual(2, mock_add_threadpool.call_count)
|
||||
|
||||
@mock.patch.object(db, 'volume_get')
|
||||
@ddt.data(KeyError, exception.VolumeNotFound)
|
||||
def test_cleanup_temp_volumes_snapshots_volume_not_found(
|
||||
|
@ -70,8 +70,6 @@ from cinder.volume import rpcapi as volume_rpcapi
|
||||
from cinder.volume import utils as vol_utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
from eventlet import greenpool
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
@ -202,7 +200,6 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
*args, **kwargs)
|
||||
self.configuration = config.Configuration(volume_manager_opts,
|
||||
config_group=service_name)
|
||||
self._tp = greenpool.GreenPool()
|
||||
self.stats = {}
|
||||
|
||||
if not volume_driver:
|
||||
@ -236,9 +233,6 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
LOG.error(_LE("Invalid JSON: %s"),
|
||||
self.driver.configuration.extra_capabilities)
|
||||
|
||||
def _add_to_threadpool(self, func, *args, **kwargs):
|
||||
self._tp.spawn_n(func, *args, **kwargs)
|
||||
|
||||
def _count_allocated_capacity(self, ctxt, volume):
|
||||
pool = vol_utils.extract_host(volume['host'], 'pool')
|
||||
if pool is None:
|
||||
|
Loading…
Reference in New Issue
Block a user