diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index 1946784e6f7..0c43bcfa6eb 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -66,6 +66,18 @@ backup_manager_opts = [ cfg.StrOpt('backup_driver', default='cinder.backup.drivers.swift.SwiftBackupDriver', help='Driver to use for backups.',), + cfg.IntOpt('backup_driver_init_check_interval', + default=60, + min=5, + help='Time in seconds between checks to see if the backup ' + 'driver has been successfully initialized, any time ' + 'the driver is restarted.'), + cfg.IntOpt('backup_driver_status_check_interval', + default=60, + min=10, + help='Time in seconds between checks of the backup driver ' + 'status. If does not report as working, it is ' + 'restarted.'), cfg.BoolOpt('backup_service_inithost_offload', default=True, help='Offload pending backup delete during ' @@ -157,7 +169,7 @@ class BackupManager(manager.ThreadPoolManager): try: init_loop = loopingcall.FixedIntervalLoopingCall( self._setup_backup_driver, ctxt) - init_loop.start(interval=CONF.periodic_interval) + init_loop.start(interval=CONF.backup_driver_init_check_interval) except loopingcall.LoopingCallDone: LOG.info("Backup driver was successfully initialized.") except Exception: @@ -1133,7 +1145,8 @@ class BackupManager(manager.ThreadPoolManager): def is_working(self): return self.is_initialized - @periodic_task.periodic_task(spacing=CONF.periodic_interval) + @periodic_task.periodic_task( + spacing=CONF.backup_driver_status_check_interval) def _report_driver_status(self, context): if not self.is_working(): self.setup_backup_backend(context) diff --git a/cinder/cmd/status.py b/cinder/cmd/status.py index f6a88caac57..958fa358631 100644 --- a/cinder/cmd/status.py +++ b/cinder/cmd/status.py @@ -109,9 +109,34 @@ class Checks(uc.UpgradeCommands): return uc.Result(SUCCESS) + def _check_periodic_interval(self): + """Checks for non-default use of periodic_interval. + + Some new configuration options have been introduced to supplement + periodic_interval, which was being used for multiple, possibly + conflicting purposes. If a non-default value for periodic_interval + is configured, warn the operator to review whether one of the new + options is better suited for the periodic task(s) being tuned. + """ + periodic_interval = CONF.periodic_interval + + if periodic_interval != 60: + return uc.Result( + WARNING, + "Detected non-default value for the 'periodic_interval' " + "option. New configuration options have been introduced to " + "replace the use of 'periodic_interval' for some purposes. " + "Please consult the 'Upgrade' section of the Train release " + "notes for more information.") + + return uc.Result(SUCCESS) + _upgrade_checks = ( + # added in Stein ('Backup Driver Path', _check_backup_module), ('Use of Policy File', _check_policy_file), + # added in Train + ('Periodic Interval Use', _check_periodic_interval), ) diff --git a/cinder/opts.py b/cinder/opts.py index 6432097b660..9fd7e502264 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -239,7 +239,7 @@ def list_opts(): cinder_quota.quota_opts, cinder_scheduler_driver.scheduler_driver_opts, cinder_scheduler_hostmanager.host_manager_opts, - [cinder_scheduler_manager.scheduler_driver_opt], + cinder_scheduler_manager.scheduler_manager_opts, [cinder_scheduler_scheduleroptions. scheduler_json_config_location_opt], cinder_scheduler_weights_capacity.capacity_weight_opts, diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index 193cdda3630..39c145929e1 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -50,13 +50,20 @@ from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import rpcapi as volume_rpcapi -scheduler_driver_opt = cfg.StrOpt('scheduler_driver', - default='cinder.scheduler.filter_scheduler.' - 'FilterScheduler', - help='Default scheduler driver to use') +scheduler_manager_opts = [ + cfg.StrOpt('scheduler_driver', + default='cinder.scheduler.filter_scheduler.' + 'FilterScheduler', + help='Default scheduler driver to use'), + cfg.IntOpt('scheduler_driver_init_wait_time', + default=60, + min=1, + help='Maximum time in seconds to wait for the driver to ' + 'report as ready'), +] CONF = cfg.CONF -CONF.register_opt(scheduler_driver_opt) +CONF.register_opts(scheduler_manager_opts) QUOTAS = quota.QUOTAS @@ -104,7 +111,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) - for __ in range(CONF.periodic_interval): + for __ in range(CONF.scheduler_driver_init_wait_time): if self.driver.is_first_receive(): break eventlet.sleep(1) @@ -163,7 +170,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): def _wait_for_scheduler(self): # NOTE(dulek): We're waiting for scheduler to announce that it's ready - # or CONF.periodic_interval seconds from service startup has passed. + # or CONF.scheduler_driver_init_wait_time seconds from service startup + # has passed. while self._startup_delay and not self.driver.is_ready(): eventlet.sleep(1) diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py index 288db592b50..295b6ce8f99 100644 --- a/cinder/tests/unit/backup/test_backup.py +++ b/cinder/tests/unit/backup/test_backup.py @@ -24,6 +24,7 @@ import mock from os_brick.initiator.connectors import fake as fake_connectors from oslo_config import cfg from oslo_db import exception as db_exc +from oslo_service import loopingcall from oslo_utils import importutils from oslo_utils import timeutils @@ -330,6 +331,21 @@ class BackupTestCase(BaseBackupTest): mock_migrate_fixed_key, backups=mock_get_all_by_host()) + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') + @ddt.data(123456, 654321) + def test_setup_backup_backend_uses_new_config( + self, new_cfg_value, mock_FILC): + # previously used CONF.periodic_interval; see Bug #1828748 + new_cfg_name = 'backup_driver_init_check_interval' + + self.addCleanup(CONF.clear_override, new_cfg_name) + CONF.set_override(new_cfg_name, new_cfg_value) + mock_init_loop = mock.MagicMock() + mock_init_loop.start.side_effect = loopingcall.LoopingCallDone() + mock_FILC.return_value = mock_init_loop + self.backup_mgr.setup_backup_backend(self.ctxt) + mock_init_loop.start.assert_called_once_with(interval=new_cfg_value) + @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', diff --git a/cinder/tests/unit/cmd/test_status.py b/cinder/tests/unit/cmd/test_status.py index 647abff7a65..5fa24e575bb 100644 --- a/cinder/tests/unit/cmd/test_status.py +++ b/cinder/tests/unit/cmd/test_status.py @@ -95,3 +95,17 @@ class TestCinderStatus(testtools.TestCase): self.assertEqual(uc.Code.WARNING, result.code) self.assertIn(policy_path, result.details) + + def test_check_periodic_interval_default(self): + # default value is 60 + self._set_config('periodic_interval', 60) + result = self.checks._check_periodic_interval() + self.assertEqual(uc.Code.SUCCESS, result.code) + + def test_check_periodic_interval_not_default(self): + # default value is 60 + self._set_config('periodic_interval', 22) + result = self.checks._check_periodic_interval() + self.assertEqual(uc.Code.WARNING, result.code) + self.assertIn('New configuration options have been introduced', + result.details) diff --git a/cinder/tests/unit/scheduler/test_scheduler.py b/cinder/tests/unit/scheduler/test_scheduler.py index 3427c4c6ffc..5bc18e48463 100644 --- a/cinder/tests/unit/scheduler/test_scheduler.py +++ b/cinder/tests/unit/scheduler/test_scheduler.py @@ -82,6 +82,22 @@ class SchedulerManagerTestCase(test.TestCase): self.assertEqual(2, sleep_mock.call_count) self.assertFalse(self.manager._startup_delay) + @mock.patch('cinder.scheduler.driver.Scheduler.is_first_receive') + @mock.patch('eventlet.sleep') + @mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities') + @ddt.data(71, 17) + def test_init_host_with_rpc_delay_uses_new_config( + self, new_cfg_value, publish_capabilities_mock, sleep_mock, + is_first_receive_mock): + # previously used CONF.periodic_interval; see Bug #1828748 + new_cfg_name = 'scheduler_driver_init_wait_time' + + self.addCleanup(CONF.clear_override, new_cfg_name) + CONF.set_override(new_cfg_name, new_cfg_value) + is_first_receive_mock.return_value = False + self.manager.init_host_with_rpc() + self.assertEqual(new_cfg_value, sleep_mock.call_count) + @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch( 'cinder.scheduler.host_manager.BackendState.consume_from_volume') diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index aa2db6325ac..758db9ce818 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -137,6 +137,13 @@ volume_manager_opts = [ 'Query results will be obtained in batches from the ' 'database and not in one shot to avoid extreme memory ' 'usage. Set 0 to turn off this functionality.'), + cfg.IntOpt('backend_stats_polling_interval', + default=60, + min=3, + help='Time in seconds between requests for usage statistics ' + 'from the backend. Be aware that generating usage ' + 'statistics is expensive for some backends, so setting ' + 'this value too low may adversely affect performance.'), ] volume_backend_opts = [ @@ -2627,7 +2634,7 @@ class VolumeManager(manager.CleanableManager, return volume_stats - @periodic_task.periodic_task(spacing=CONF.periodic_interval) + @periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval) def publish_service_capabilities(self, context): """Collect driver status and then publish.""" self._report_driver_status(context) diff --git a/doc/source/cli/cinder-status.rst b/doc/source/cli/cinder-status.rst index fb7d835a284..9a040ba7307 100644 --- a/doc/source/cli/cinder-status.rst +++ b/doc/source/cli/cinder-status.rst @@ -90,6 +90,12 @@ Upgrade * Checks for the presence of a **policy.json** file have been added to warn if policy changes should be present in a **policy.yaml** file. + **15.0.0 (Train)** + + * Check added to make operators aware of new finer-grained configuration + options affecting the periodicity of various Cinder tasks. Triggered + when the the ``periodic_interval`` option is not set to its default value. + See Also ======== diff --git a/releasenotes/notes/bug-1695018-a2c01fb9e638a105.yaml b/releasenotes/notes/bug-1695018-a2c01fb9e638a105.yaml deleted file mode 100644 index 39cc4d5f005..00000000000 --- a/releasenotes/notes/bug-1695018-a2c01fb9e638a105.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed issue where Cinder periodic tasks could not be configured to run - more often than every 60 seconds. Setting ``periodic_interval`` in - cinder.conf to less than 60 will now work as expected. diff --git a/releasenotes/notes/new-config-opts-for-periodic_interval-d0cb17a2d72e0cd0.yaml b/releasenotes/notes/new-config-opts-for-periodic_interval-d0cb17a2d72e0cd0.yaml new file mode 100644 index 00000000000..627255b6173 --- /dev/null +++ b/releasenotes/notes/new-config-opts-for-periodic_interval-d0cb17a2d72e0cd0.yaml @@ -0,0 +1,35 @@ +--- +features: + - | + Added new configuration options to allow more specific control over + some periodic processes. See the 'Upgrade' section for details. +upgrade: + - | + The ``periodic_interval`` configuration option was being used in too + many places, and as a result, it had become difficult to tune specific + periodic tasks without affecting other functionality. The following + configuration options should now be used in place of ``periodic_interval``: + + * ``backup_driver_init_check_interval`` + * ``backup_driver_status_check_interval`` + * ``scheduler_driver_init_wait_time`` + * ``backend_stats_polling_interval`` + + See the help text for these options for more information. The default + value of each option is 60, which has been the default value of + ``periodic_interval``. + + * If you *have not* modified ``periodic_interval``, you should see no + differences from current behavior. + * If you *have* modified ``periodic_interval``, please review the new + options to determine which one(s) should be adjusted. Also, you should + consider setting ``periodic_interval`` back to its default value of 60. + + A warning has been added to the ``cinder-status upgrade check`` CLI + to detect whether the ``periodic_interval`` option has been modified + from its default value to remind you which of the above situations + currently applies to you. + + The ``periodic_interval`` configuration option still exists but its + use is now restricted to providing a default periodicity for objects + created from the ``cinder.service.Service`` class.