Add SIGHUP handlers to reset RPC version pins
Adding SIGHUP handlers (by implementing reset from oslo.service) to cinder-scheduler, cinder-backup and cinder-volume that reset cached RPC version pins. This is to avoid the need to restart all the services when upgrade of the deployment is completed. Some changes go a little deep into the stack, because to reload all the pins we need to recreate <service>.rpcapi.<service>API objects that are stored in the memory. Please note that SIGHUP signal is handled by oslo.service only when service runs in daemon mode (without tty attached). To test this commit in DevStack you need to add "&" to the end of the command that starts the service. Situation is more complicated with the API service, so we're leaving it with restart required for now. In the deployments with HA cinder-api is typically behind a load balancer, so restarting individual nodes one-by-one should be easy. DocImpact: Add information on rolling upgrades procedures to the docs. Implements: blueprint rpc-object-compatibility Change-Id: I03ed74e17dc9a4b9aa2ddcfbeb36a106a0f035f8
This commit is contained in:
parent
3631fd2576
commit
c9a55d852e
@ -123,6 +123,11 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
LOG.exception(_LE("Problem cleaning incomplete backup "
|
||||
"operations."))
|
||||
|
||||
def reset(self):
|
||||
super(BackupManager, self).reset()
|
||||
self.backup_rpcapi = backup_rpcapi.BackupAPI()
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
def _cleanup_incomplete_backup_operations(self, ctxt):
|
||||
LOG.info(_LI("Cleaning up incomplete backup operations."))
|
||||
|
||||
|
@ -58,6 +58,8 @@ import oslo_messaging as messaging
|
||||
from oslo_service import periodic_task
|
||||
|
||||
from cinder.db import base
|
||||
from cinder.i18n import _LI
|
||||
from cinder import rpc
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder import version
|
||||
|
||||
@ -127,6 +129,16 @@ class Manager(base.Base, PeriodicTasks):
|
||||
"""
|
||||
return True
|
||||
|
||||
def reset(self):
|
||||
"""Method executed when SIGHUP is caught by the process.
|
||||
|
||||
We're utilizing it to reset RPC API version pins to avoid restart of
|
||||
the service when rolling upgrade is completed.
|
||||
"""
|
||||
LOG.info(_LI('Resetting cached RPC version pins.'))
|
||||
rpc.LAST_OBJ_VERSIONS = {}
|
||||
rpc.LAST_RPC_VERSIONS = {}
|
||||
|
||||
|
||||
class SchedulerDependentManager(Manager):
|
||||
"""Periodically send capability updates to the Scheduler services.
|
||||
@ -162,3 +174,7 @@ class SchedulerDependentManager(Manager):
|
||||
|
||||
def _add_to_threadpool(self, func, *args, **kwargs):
|
||||
self._tp.spawn_n(func, *args, **kwargs)
|
||||
|
||||
def reset(self):
|
||||
super(SchedulerDependentManager, self).reset()
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
|
@ -74,6 +74,10 @@ class Scheduler(object):
|
||||
CONF.scheduler_host_manager)
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
def reset(self):
|
||||
"""Reset volume RPC API object to load new version pins."""
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
def is_ready(self):
|
||||
"""Returns True if Scheduler is ready to accept requests.
|
||||
|
||||
|
@ -75,6 +75,10 @@ class SchedulerManager(manager.Manager):
|
||||
eventlet.sleep(CONF.periodic_interval)
|
||||
self._startup_delay = False
|
||||
|
||||
def reset(self):
|
||||
super(SchedulerManager, self).reset()
|
||||
self.driver.reset()
|
||||
|
||||
def update_service_capabilities(self, context, service_name=None,
|
||||
host=None, capabilities=None, **kwargs):
|
||||
"""Process a capability update from a service node."""
|
||||
|
@ -359,6 +359,10 @@ class Service(service.Service):
|
||||
self.model_disconnected = True
|
||||
LOG.exception(_LE('Exception encountered: '))
|
||||
|
||||
def reset(self):
|
||||
self.manager.reset()
|
||||
super(Service, self).reset()
|
||||
|
||||
|
||||
class WSGIService(service.ServiceBase):
|
||||
"""Provides ability to launch API from a 'paste' configuration."""
|
||||
|
@ -69,6 +69,21 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
sleep_mock.assert_called_once_with(CONF.periodic_interval)
|
||||
self.assertFalse(self.manager._startup_delay)
|
||||
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'})
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.5'})
|
||||
def test_reset(self):
|
||||
mgr = self.manager_cls()
|
||||
|
||||
volume_rpcapi = mgr.driver.volume_rpcapi
|
||||
self.assertEqual('1.3', volume_rpcapi.client.version_cap)
|
||||
self.assertEqual('1.5',
|
||||
volume_rpcapi.client.serializer._base.version_cap)
|
||||
mgr.reset()
|
||||
|
||||
volume_rpcapi = mgr.driver.volume_rpcapi
|
||||
self.assertIsNone(volume_rpcapi.client.version_cap)
|
||||
self.assertIsNone(volume_rpcapi.client.serializer._base.version_cap)
|
||||
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.'
|
||||
'update_service_capabilities')
|
||||
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
|
||||
|
@ -290,6 +290,30 @@ class BackupTestCase(BaseBackupTest):
|
||||
mock_add_threadpool.assert_has_calls(calls, any_order=True)
|
||||
self.assertEqual(2, mock_add_threadpool.call_count)
|
||||
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3',
|
||||
'cinder-volume': '1.7'})
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.5',
|
||||
'cinder-volume': '1.4'})
|
||||
def test_reset(self):
|
||||
backup_mgr = manager.BackupManager()
|
||||
|
||||
backup_rpcapi = backup_mgr.backup_rpcapi
|
||||
volume_rpcapi = backup_mgr.volume_rpcapi
|
||||
self.assertEqual('1.3', backup_rpcapi.client.version_cap)
|
||||
self.assertEqual('1.5',
|
||||
backup_rpcapi.client.serializer._base.version_cap)
|
||||
self.assertEqual('1.7', volume_rpcapi.client.version_cap)
|
||||
self.assertEqual('1.4',
|
||||
volume_rpcapi.client.serializer._base.version_cap)
|
||||
backup_mgr.reset()
|
||||
|
||||
backup_rpcapi = backup_mgr.backup_rpcapi
|
||||
volume_rpcapi = backup_mgr.volume_rpcapi
|
||||
self.assertIsNone(backup_rpcapi.client.version_cap)
|
||||
self.assertIsNone(backup_rpcapi.client.serializer._base.version_cap)
|
||||
self.assertIsNone(volume_rpcapi.client.version_cap)
|
||||
self.assertIsNone(volume_rpcapi.client.serializer._base.version_cap)
|
||||
|
||||
def test_is_working(self):
|
||||
self.assertTrue(self.backup_mgr.is_working())
|
||||
|
||||
|
@ -83,6 +83,18 @@ class ServiceManagerTestCase(test.TestCase):
|
||||
serv.start()
|
||||
self.assertEqual('service', serv.test_method())
|
||||
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'})
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'})
|
||||
def test_reset(self):
|
||||
serv = service.Service('test',
|
||||
'test',
|
||||
'test',
|
||||
'cinder.tests.unit.test_service.FakeManager')
|
||||
serv.start()
|
||||
serv.reset()
|
||||
self.assertEqual({}, rpc.LAST_OBJ_VERSIONS)
|
||||
self.assertEqual({}, rpc.LAST_RPC_VERSIONS)
|
||||
|
||||
|
||||
class ServiceFlagsTestCase(test.TestCase):
|
||||
def test_service_enabled_on_create_based_on_flag(self):
|
||||
|
@ -374,6 +374,21 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
manager.init_host()
|
||||
self.assertEqual(0, mock_add_p_task.call_count)
|
||||
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'})
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.5'})
|
||||
def test_reset(self):
|
||||
vol_mgr = vol_manager.VolumeManager()
|
||||
|
||||
scheduler_rpcapi = vol_mgr.scheduler_rpcapi
|
||||
self.assertEqual('1.3', scheduler_rpcapi.client.version_cap)
|
||||
self.assertEqual('1.5',
|
||||
scheduler_rpcapi.client.serializer._base.version_cap)
|
||||
vol_mgr.reset()
|
||||
|
||||
scheduler_rpcapi = vol_mgr.scheduler_rpcapi
|
||||
self.assertIsNone(scheduler_rpcapi.client.version_cap)
|
||||
self.assertIsNone(scheduler_rpcapi.client.serializer._base.version_cap)
|
||||
|
||||
@mock.patch.object(vol_manager.VolumeManager,
|
||||
'update_service_capabilities')
|
||||
def test_report_filter_goodness_function(self, mock_update):
|
||||
|
16
releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml
Normal file
16
releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
features:
|
||||
- Added RPC backward compatibility layer similar to the
|
||||
one implemented in Nova. This means that Cinder
|
||||
services can be upgraded one-by-one without breakage.
|
||||
After all the services are upgraded SIGHUP signals
|
||||
should be issued to all the services to signal them
|
||||
to reload cached minimum RPC versions. Alternative
|
||||
is of course restart of them. Please note that
|
||||
cinder-api service doesn't support SIGHUP yet.
|
||||
Please also take into account that all the rolling
|
||||
upgrades capabilities are considered tech preview,
|
||||
as we don't have a CI testing it yet.
|
||||
upgrade:
|
||||
- Starting from Mitaka release Cinder is having a tech
|
||||
preview of rolling upgrades support.
|
Loading…
Reference in New Issue
Block a user