Fix bug case by none token context
When set reclaim_instance_interval > 0, and then delete an instance which booted from volume with `delete_on_termination` set as true. After reclaim_instance_interval time pass, all volumes boot instance will with state: attached and in-use, but attached instances was deleted. This bug case as admin context from `nova.compute.manager._reclaim_queued_deletes` did not have any token info, then call cinder api would be failed. So add user/project CONF with admin role at cinder group, and when determine context is_admin and without token, do authenticaion with user/project info to call cinder api. Change-Id: I3c35bba43fee81baebe8261f546c1424ce3a3383 Closes-Bug: #1733736 Closes-Bug: #1734025 Partial-Bug: #1736773
This commit is contained in:
parent
bb2d0e5f85
commit
ca6daf148d
@ -102,11 +102,16 @@ def register_opts(conf):
|
||||
conf.register_opts(cinder_opts, group=cinder_group)
|
||||
ks_loading.register_session_conf_options(conf,
|
||||
cinder_group.name)
|
||||
ks_loading.register_auth_conf_options(conf, cinder_group.name)
|
||||
|
||||
|
||||
def list_opts():
|
||||
return {
|
||||
cinder_group.name: (
|
||||
cinder_opts +
|
||||
ks_loading.get_session_conf_options())
|
||||
ks_loading.get_session_conf_options() +
|
||||
ks_loading.get_auth_common_conf_options() +
|
||||
ks_loading.get_auth_plugin_conf_options('password') +
|
||||
ks_loading.get_auth_plugin_conf_options('v2password') +
|
||||
ks_loading.get_auth_plugin_conf_options('v3password'))
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ class CinderV1TestCase(test.NoDBTestCase):
|
||||
get_endpoint = mock.Mock(
|
||||
return_value='http://localhost:8776/v1/%(project_id)s')
|
||||
fake_session = mock.Mock(get_endpoint=get_endpoint)
|
||||
ctxt = context.get_admin_context()
|
||||
ctxt = context.get_context()
|
||||
with mock.patch.object(cinder, '_SESSION', fake_session):
|
||||
self.assertRaises(exception.UnsupportedCinderAPIVersion,
|
||||
cinder.cinderclient, ctxt)
|
||||
@ -147,7 +147,7 @@ class CinderV2TestCase(test.NoDBTestCase):
|
||||
get_endpoint = mock.Mock(
|
||||
return_value='http://localhost:8776/v2/%(project_id)s')
|
||||
fake_session = mock.Mock(get_endpoint=get_endpoint)
|
||||
ctxt = context.get_admin_context()
|
||||
ctxt = context.get_context()
|
||||
with mock.patch.object(cinder, '_SESSION', fake_session):
|
||||
self.assertRaises(exception.UnsupportedCinderAPIVersion,
|
||||
cinder.cinderclient, ctxt)
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
from cinderclient import api_versions as cinder_api_versions
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from keystoneauth1 import loading as ks_loading
|
||||
from keystoneclient import exceptions as keystone_exception
|
||||
import mock
|
||||
from oslo_utils import timeutils
|
||||
@ -934,3 +935,18 @@ class CinderClientTestCase(test.NoDBTestCase):
|
||||
client.api_version)
|
||||
get_volume_api.assert_called_once_with(
|
||||
self.mock_session.get_endpoint.return_value)
|
||||
|
||||
@mock.patch.object(ks_loading, 'load_auth_from_conf_options')
|
||||
def test_load_auth_plugin_failed(self, mock_load_from_conf):
|
||||
mock_load_from_conf.return_value = None
|
||||
self.assertRaises(cinder_exception.Unauthorized,
|
||||
cinder._load_auth_plugin, CONF)
|
||||
|
||||
@mock.patch('nova.volume.cinder._ADMIN_AUTH')
|
||||
def test_admin_context_without_token(self,
|
||||
mock_admin_auth):
|
||||
|
||||
mock_admin_auth.return_value = '_FAKE_ADMIN_AUTH'
|
||||
admin_ctx = context.get_admin_context()
|
||||
params = cinder._get_cinderclient_parameters(admin_ctx)
|
||||
self.assertEqual(params[0], mock_admin_auth)
|
||||
|
@ -47,16 +47,31 @@ CONF = nova.conf.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_ADMIN_AUTH = None
|
||||
_SESSION = None
|
||||
|
||||
|
||||
def reset_globals():
|
||||
"""Testing method to reset globals.
|
||||
"""
|
||||
global _ADMIN_AUTH
|
||||
global _SESSION
|
||||
|
||||
_ADMIN_AUTH = None
|
||||
_SESSION = None
|
||||
|
||||
|
||||
def _load_auth_plugin(conf):
|
||||
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
|
||||
nova.conf.cinder.cinder_group.name)
|
||||
|
||||
if auth_plugin:
|
||||
return auth_plugin
|
||||
|
||||
err_msg = _('Unknown auth type: %s') % conf.cinder.auth_type
|
||||
raise cinder_exception.Unauthorized(401, message=err_msg)
|
||||
|
||||
|
||||
def _check_microversion(url, microversion):
|
||||
"""Checks to see if the requested microversion is supported by the current
|
||||
version of python-cinderclient and the volume API endpoint.
|
||||
@ -80,15 +95,27 @@ def _check_microversion(url, microversion):
|
||||
|
||||
|
||||
def _get_cinderclient_parameters(context):
|
||||
global _ADMIN_AUTH
|
||||
global _SESSION
|
||||
|
||||
if not _SESSION:
|
||||
_SESSION = ks_loading.load_session_from_conf_options(
|
||||
CONF, nova.conf.cinder.cinder_group.name)
|
||||
|
||||
# NOTE(lixipeng): Auth token is none when call
|
||||
# cinder API from compute periodic tasks, context
|
||||
# from them generated from 'context.get_admin_context'
|
||||
# which only set is_admin=True but is without token.
|
||||
# So add load_auth_plugin when this condition appear.
|
||||
if context.is_admin and not context.auth_token:
|
||||
if not _ADMIN_AUTH:
|
||||
_ADMIN_AUTH = _load_auth_plugin(CONF)
|
||||
auth = _ADMIN_AUTH
|
||||
else:
|
||||
auth = service_auth.get_auth_plugin(context)
|
||||
|
||||
url = None
|
||||
|
||||
auth = service_auth.get_auth_plugin(context)
|
||||
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
|
||||
|
||||
service_parameters = {'service_type': service_type,
|
||||
|
@ -0,0 +1,19 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
It is now possible to configure the ``[cinder]`` section of nova.conf to
|
||||
allow setting admin-role credentials for scenarios where a user token is
|
||||
not available to perform actions on a volume. For example, when
|
||||
``reclaim_instance_interval`` is a positive integer, instances are
|
||||
soft deleted until the nova-compute service periodic task removes them.
|
||||
If a soft deleted instance has volumes attached, the compute service needs
|
||||
to be able to detach and possibly delete the associated volumes, otherwise
|
||||
they will be orphaned in the block storage service. Similarly, if
|
||||
``running_deleted_instance_poll_interval`` is set and
|
||||
``running_deleted_instance_action = reap``, then the compute service will
|
||||
need to be able to detach and possibly delete volumes attached to
|
||||
instances that are reaped. See `bug 1733736`_ and `bug 1734025`_ for more
|
||||
details.
|
||||
|
||||
.. _bug 1733736: https://bugs.launchpad.net/nova/+bug/1733736
|
||||
.. _bug 1734025: https://bugs.launchpad.net/nova/+bug/1734025
|
Loading…
Reference in New Issue
Block a user