Move replication_status update to init_with_rpc

We were using host_init to read replication_status from the
driver and update the service entry in the DB.  It turns out
that on a fresh install this doesn't actually work, because
while we have multiple init methods for the backend, the
Service entry isn't actually created in a fresh deploy until
AFTER init_host.  The result was that in some cases we were
trying to update a column on a non-existent Service in the
DataBase.

This patch moves the replication_status updates for the
service into the init_with_rpc method.  That method was
just a noop stub in the parent manager class, so we just
implement it in cinder.volume.manager and do what we need
with the replication update info.

Change-Id: I18b2658e2f93959f74377ccb86ce8b01b6970c60
Closes-Bug: #1555370
This commit is contained in:
John Griffith 2016-03-09 16:43:14 -07:00
parent b043410f39
commit af941066b3
1 changed files with 23 additions and 22 deletions

View File

@ -489,6 +489,8 @@ class VolumeManager(manager.SchedulerDependentManager):
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
@ -507,39 +509,38 @@ class VolumeManager(manager.SchedulerDependentManager):
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
if stats and stats.get('replication', False):
@periodic_task.periodic_task
def run_replication_task(self, ctxt):
self._update_replication_relationship_status(ctxt)
self.add_periodic_task(run_replication_task)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
# NOTE(jdg): may be some things to think about here in failover
# scenarios
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
service = objects.Service.get_by_host_and_topic(
context.get_admin_context(), svc_host,
CONF.volume_topic)
except exception.ServiceNotFound:
# FIXME(jdg): no idea what we'd do if we hit this case
LOG.info(_LI("Service not found for updating "
"replication_status."))
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if stats and stats.get('replication', False):
if service.replication_status == (
fields.ReplicationStatus.FAILED_OVER):
pass
elif stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
else:
service.replication_status = fields.ReplicationStatus.DISABLED
LOG.info(_LI("Driver initialization completed successfully."),
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})