Use get_by_args instead of host_and_topic
The replication v2.1 code (cheesecake) requires a number of service queries. The default by_host_and_topic however doesn't include an interface to get disabled services which is required at times. Rather than adding a new version of get_by_host_and_topic and introduce the arg, we probably shouldn't have been using this particular query at all. The only service we're interested in is the cinder-volume service, granted in most cases volume_topic is set to this as a default but we shouldn't be relying on it. This patch changes the get_by_host_and_topic to use the more appropriate (in this case) get_by_args and specifically requests the cinder-volume binary. Note that for manage_existing, we check the resturned service and ensure that it disabled == False, otherwise we raise an exception. Change-Id: I2e8c5142c589af6c1ddadeb661f86e7194faac7a Closes-Bug: #1552006
This commit is contained in:
parent
6fa468270c
commit
cd8e3c3978
|
@ -68,7 +68,7 @@ class SnapshotManageTest(test.TestCase):
|
||||||
|
|
||||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
|
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
|
||||||
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
|
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
|
||||||
@mock.patch('cinder.db.service_get_by_host_and_topic')
|
@mock.patch('cinder.db.service_get_by_args')
|
||||||
def test_manage_snapshot_ok(self, mock_db,
|
def test_manage_snapshot_ok(self, mock_db,
|
||||||
mock_create_snapshot, mock_rpcapi):
|
mock_create_snapshot, mock_rpcapi):
|
||||||
"""Test successful manage volume execution.
|
"""Test successful manage volume execution.
|
||||||
|
@ -79,7 +79,9 @@ class SnapshotManageTest(test.TestCase):
|
||||||
code to the caller.
|
code to the caller.
|
||||||
"""
|
"""
|
||||||
ctxt = context.RequestContext('admin', 'fake', True)
|
ctxt = context.RequestContext('admin', 'fake', True)
|
||||||
mock_db.return_value = fake_service.fake_service_obj(ctxt)
|
mock_db.return_value = fake_service.fake_service_obj(
|
||||||
|
ctxt,
|
||||||
|
binary='cinder-volume')
|
||||||
|
|
||||||
body = {'snapshot': {'volume_id': 'fake_volume_id', 'ref': 'fake_ref'}}
|
body = {'snapshot': {'volume_id': 'fake_volume_id', 'ref': 'fake_ref'}}
|
||||||
res = self._get_resp(body)
|
res = self._get_resp(body)
|
||||||
|
|
|
@ -1524,13 +1524,18 @@ class API(base.Base):
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
try:
|
try:
|
||||||
svc_host = volume_utils.extract_host(host, 'backend')
|
svc_host = volume_utils.extract_host(host, 'backend')
|
||||||
service = objects.Service.get_by_host_and_topic(
|
service = objects.Service.get_by_args(
|
||||||
elevated, svc_host, CONF.volume_topic)
|
elevated, svc_host, 'cinder-volume')
|
||||||
except exception.ServiceNotFound:
|
except exception.ServiceNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Unable to find service: %(service)s for '
|
LOG.error(_LE('Unable to find service: %(service)s for '
|
||||||
'given host: %(host)s.'),
|
'given host: %(host)s.'),
|
||||||
{'service': CONF.volume_topic, 'host': host})
|
{'service': CONF.volume_topic, 'host': host})
|
||||||
|
if service.disabled:
|
||||||
|
LOG.error(_LE('Unable to manage_existing volume on a disabled '
|
||||||
|
'service.'))
|
||||||
|
raise exception.ServiceUnavailable()
|
||||||
|
|
||||||
availability_zone = service.get('availability_zone')
|
availability_zone = service.get('availability_zone')
|
||||||
|
|
||||||
manage_what = {
|
manage_what = {
|
||||||
|
@ -1569,13 +1574,19 @@ class API(base.Base):
|
||||||
metadata=None):
|
metadata=None):
|
||||||
host = volume_utils.extract_host(volume['host'])
|
host = volume_utils.extract_host(volume['host'])
|
||||||
try:
|
try:
|
||||||
objects.Service.get_by_host_and_topic(context.elevated(), host,
|
# NOTE(jdg): We don't use this, we just make sure it's valid
|
||||||
CONF.volume_topic)
|
# and exists before sending off the call
|
||||||
|
service = objects.Service.get_by_args(
|
||||||
|
context.elevated(), host, 'cinder-volume')
|
||||||
except exception.ServiceNotFound:
|
except exception.ServiceNotFound:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Unable to find service: %(service)s for '
|
LOG.error(_LE('Unable to find service: %(service)s for '
|
||||||
'given host: %(host)s.'),
|
'given host: %(host)s.'),
|
||||||
{'service': CONF.volume_topic, 'host': host})
|
{'service': CONF.volume_topic, 'host': host})
|
||||||
|
if service.disabled:
|
||||||
|
LOG.error(_LE('Unable to manage_existing snapshot on a disabled '
|
||||||
|
'service.'))
|
||||||
|
raise exception.ServiceUnavailable()
|
||||||
|
|
||||||
snapshot_object = self.create_snapshot_in_db(context, volume, name,
|
snapshot_object = self.create_snapshot_in_db(context, volume, name,
|
||||||
description, False,
|
description, False,
|
||||||
|
@ -1595,7 +1606,7 @@ class API(base.Base):
|
||||||
svc_host = volume_utils.extract_host(host, 'backend')
|
svc_host = volume_utils.extract_host(host, 'backend')
|
||||||
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
ctxt, svc_host, CONF.volume_topic)
|
ctxt, svc_host, 'cinder-volume')
|
||||||
expected = {'replication_status': [fields.ReplicationStatus.ENABLED,
|
expected = {'replication_status': [fields.ReplicationStatus.ENABLED,
|
||||||
fields.ReplicationStatus.FAILED_OVER]}
|
fields.ReplicationStatus.FAILED_OVER]}
|
||||||
result = service.conditional_update(
|
result = service.conditional_update(
|
||||||
|
@ -1618,9 +1629,8 @@ class API(base.Base):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
svc_host = volume_utils.extract_host(host, 'backend')
|
svc_host = volume_utils.extract_host(host, 'backend')
|
||||||
|
|
||||||
# NOTE(jdg): get_by_host_and_topic filters out disabled
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
ctxt, svc_host, CONF.volume_topic)
|
ctxt, svc_host, 'cinder-volume')
|
||||||
expected = {'frozen': False}
|
expected = {'frozen': False}
|
||||||
result = service.conditional_update(
|
result = service.conditional_update(
|
||||||
{'frozen': True}, expected)
|
{'frozen': True}, expected)
|
||||||
|
@ -1639,9 +1649,8 @@ class API(base.Base):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
svc_host = volume_utils.extract_host(host, 'backend')
|
svc_host = volume_utils.extract_host(host, 'backend')
|
||||||
|
|
||||||
# NOTE(jdg): get_by_host_and_topic filters out disabled
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
ctxt, svc_host, CONF.volume_topic)
|
ctxt, svc_host, 'cinder-volume')
|
||||||
expected = {'frozen': True}
|
expected = {'frozen': True}
|
||||||
result = service.conditional_update(
|
result = service.conditional_update(
|
||||||
{'frozen': False}, expected)
|
{'frozen': False}, expected)
|
||||||
|
|
|
@ -245,9 +245,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
curr_active_backend_id = None
|
curr_active_backend_id = None
|
||||||
svc_host = vol_utils.extract_host(self.host, 'backend')
|
svc_host = vol_utils.extract_host(self.host, 'backend')
|
||||||
try:
|
try:
|
||||||
service = objects.Service.get_by_host_and_topic(
|
service = objects.Service.get_by_args(
|
||||||
context.get_admin_context(), svc_host,
|
context.get_admin_context(),
|
||||||
CONF.volume_topic)
|
svc_host,
|
||||||
|
'cinder-volume')
|
||||||
except exception.ServiceNotFound:
|
except exception.ServiceNotFound:
|
||||||
# NOTE(jdg): This is to solve problems with unit tests
|
# NOTE(jdg): This is to solve problems with unit tests
|
||||||
LOG.info(_LI("Service not found for updating "
|
LOG.info(_LI("Service not found for updating "
|
||||||
|
@ -503,9 +504,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
try:
|
try:
|
||||||
# NOTE(jdg): may be some things to think about here in failover
|
# NOTE(jdg): may be some things to think about here in failover
|
||||||
# scenarios
|
# scenarios
|
||||||
service = objects.Service.get_by_host_and_topic(
|
service = objects.Service.get_by_args(
|
||||||
context.get_admin_context(), svc_host,
|
context.get_admin_context(),
|
||||||
CONF.volume_topic)
|
svc_host,
|
||||||
|
'cinder-volume')
|
||||||
except exception.ServiceNotFound:
|
except exception.ServiceNotFound:
|
||||||
# FIXME(jdg): no idea what we'd do if we hit this case
|
# FIXME(jdg): no idea what we'd do if we hit this case
|
||||||
LOG.info(_LI("Service not found for updating "
|
LOG.info(_LI("Service not found for updating "
|
||||||
|
@ -3258,10 +3260,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
"""
|
"""
|
||||||
svc_host = vol_utils.extract_host(self.host, 'backend')
|
svc_host = vol_utils.extract_host(self.host, 'backend')
|
||||||
|
|
||||||
# NOTE(jdg): get_by_host_and_topic filters out disabled
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
context, svc_host,
|
context,
|
||||||
CONF.volume_topic)
|
svc_host,
|
||||||
|
'cinder-volume')
|
||||||
volumes = objects.VolumeList.get_all_by_host(context, self.host)
|
volumes = objects.VolumeList.get_all_by_host(context, self.host)
|
||||||
|
|
||||||
exception_encountered = False
|
exception_encountered = False
|
||||||
|
@ -3357,10 +3359,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
'notification to driver has failed.'))
|
'notification to driver has failed.'))
|
||||||
svc_host = vol_utils.extract_host(self.host, 'backend')
|
svc_host = vol_utils.extract_host(self.host, 'backend')
|
||||||
|
|
||||||
# NOTE(jdg): get_by_host_and_topic filters out disabled
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
context, svc_host,
|
context,
|
||||||
CONF.volume_topic)
|
svc_host,
|
||||||
|
'cinder-volume')
|
||||||
service.disabled = True
|
service.disabled = True
|
||||||
service.disabled_reason = "frozen"
|
service.disabled_reason = "frozen"
|
||||||
service.save()
|
service.save()
|
||||||
|
@ -3390,10 +3392,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
return False
|
return False
|
||||||
svc_host = vol_utils.extract_host(self.host, 'backend')
|
svc_host = vol_utils.extract_host(self.host, 'backend')
|
||||||
|
|
||||||
# NOTE(jdg): get_by_host_and_topic filters out disabled
|
|
||||||
service = objects.Service.get_by_args(
|
service = objects.Service.get_by_args(
|
||||||
context, svc_host,
|
context,
|
||||||
CONF.volume_topic)
|
svc_host,
|
||||||
|
'cinder-volume')
|
||||||
service.disabled = False
|
service.disabled = False
|
||||||
service.disabled_reason = ""
|
service.disabled_reason = ""
|
||||||
service.save()
|
service.save()
|
||||||
|
|
Loading…
Reference in New Issue