Fix status after a VolumeDriverException

If a driver raised VolumeDriverException during failover_host then the
service would remain in "failing-over" replication status and we would
see an AttributeError exception in the logs.

That is because we were trying to set the "status" field in a service,
and that field doesn't exist.

This patch fixes this by setting the replication_status field to error
instead and by disabling the service.

We had no failover_host manager tests, and this patch doesn't try to add
all the tests that we should have, it just adds the tests pertinent for
this patch to avoid regressions and test this specific functionality.
Missing tests should be added in another patch.

Closes-Bug: #1641716
Change-Id: I374a8a187b93da5f946f0243c841ba0b54273401
This commit is contained in:
Gorka Eguileor 2016-11-07 12:16:01 +01:00
parent 7a66835d4a
commit 4fb420c136
2 changed files with 18 additions and 1 deletions

View File

@ -62,3 +62,18 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(expected, db_svc.replication_status)
@mock.patch('cinder.volume.driver.BaseVD.failover_host',
mock.Mock(side_effect=exception.VolumeDriverException('')))
def test_failover_host_driver_exception(self):
svc = utils.create_service(
self.context,
host=self.host,
active_backend_id=None,
replication_status=fields.ReplicationStatus.FAILING_OVER)
self.manager.failover_host(self.context, mock.sentinel.backend_id)
db_svc = objects.Service.get_by_id(self.context, svc.id)
self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
db_svc.replication_status)

View File

@ -4052,7 +4052,9 @@ class VolumeManager(manager.CleanableManager,
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.disabled = True
service.save()
exception_encountered = True
if exception_encountered: