Add periodic task to clean expired console tokens
Console auth tokens and related connection information will be stored in the database. Each compute manager will be responsible for cleaning up expired tokens related to instances it manages as well as cleaning up when instances are deleted or moved. This patch adds a periodic task to the compute manager to cleanup expired tokens. It also adds token cleanup for instances when they are deleted or live migrated. TODOs have been added to add token cleanup for other move operations such as resize, evacuate, and shelve offload. They are not changed as part of this patch series because for now, we are keeping parity with existing behavior. Changing the behavior will be a separate future patch. Later patches in this series will add the code to put console auth tokens in the database. The consoleauth server will be deprecated. During the deprecation period tokens will be stored in both the consoleauth server and the databaes but console proxies will chose which to use for authorization. partially-implements: blueprint convert-consoles-to-objects Change-Id: I66762703709340a2f5c68dcd6802993c9a68c263
This commit is contained in:
parent
21eec0c313
commit
1853c74e4b
|
@ -1869,6 +1869,11 @@ class API(base.Base):
|
|||
# NOTE(dtp): cells.enable = False means "use cells v2".
|
||||
# Run everywhere except v1 compute cells.
|
||||
if not CONF.cells.enable or self.cell_type == 'api':
|
||||
# TODO(melwitt): In Rocky, we store console authorizations
|
||||
# in both the consoleauth service and the database while
|
||||
# we convert to using the database. Remove the consoleauth
|
||||
# line below when authorizations are no longer being
|
||||
# stored in consoleauth, in Stein.
|
||||
self.consoleauth_rpcapi.delete_tokens_for_instance(
|
||||
context, instance.uuid)
|
||||
|
||||
|
@ -4204,6 +4209,11 @@ class API(base.Base):
|
|||
self._record_action_start(context, instance,
|
||||
instance_actions.LIVE_MIGRATION)
|
||||
|
||||
# TODO(melwitt): In Rocky, we store console authorizations
|
||||
# in both the consoleauth service and the database while
|
||||
# we convert to using the database. Remove the consoleauth
|
||||
# line below when authorizations are no longer being
|
||||
# stored in consoleauth, in Stein.
|
||||
self.consoleauth_rpcapi.delete_tokens_for_instance(
|
||||
context, instance.uuid)
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ from nova.network import model as network_model
|
|||
from nova.network.security_group import openstack_driver
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.objects import console_auth_token as obj_console_auth_token
|
||||
from nova.objects import fields
|
||||
from nova.objects import instance as obj_instance
|
||||
from nova.objects import migrate_data as migrate_data_obj
|
||||
|
@ -753,6 +754,7 @@ class ComputeManager(manager.Manager):
|
|||
compute_utils.notify_about_instance_action(context, instance,
|
||||
self.host, action=fields.NotificationAction.DELETE,
|
||||
phase=fields.NotificationPhase.END, bdms=bdms)
|
||||
self._clean_instance_console_tokens(context, instance)
|
||||
self._delete_scheduler_instance_info(context, instance.uuid)
|
||||
|
||||
def _init_instance(self, context, instance):
|
||||
|
@ -3132,6 +3134,9 @@ class ComputeManager(manager.Manager):
|
|||
instance.progress = 0
|
||||
instance.save()
|
||||
self.stop_instance(context, instance, False)
|
||||
# TODO(melwitt): We should clean up instance console tokens here in the
|
||||
# case of evacuate. The instance is on a new host and will need to
|
||||
# establish a new console connection.
|
||||
self._update_scheduler_instance_info(context, instance)
|
||||
self._notify_about_instance_usage(
|
||||
context, instance, "rebuild.end",
|
||||
|
@ -4503,6 +4508,9 @@ class ComputeManager(manager.Manager):
|
|||
network_info = self._finish_resize(context, instance, migration,
|
||||
disk_info, image_meta, bdms)
|
||||
|
||||
# TODO(melwitt): We should clean up instance console tokens here. The
|
||||
# instance is on a new host and will need to establish a new console
|
||||
# connection.
|
||||
self._update_scheduler_instance_info(context, instance)
|
||||
self._notify_about_instance_usage(
|
||||
context, instance, "finish_resize.end",
|
||||
|
@ -4869,6 +4877,9 @@ class ComputeManager(manager.Manager):
|
|||
self._nil_out_instance_obj_host_and_node(instance)
|
||||
instance.save(expected_task_state=None)
|
||||
|
||||
# TODO(melwitt): We should clean up instance console tokens here. The
|
||||
# instance has no host at this point and will need to establish a new
|
||||
# console connection in the future after it is unshelved.
|
||||
self._delete_scheduler_instance_info(context, instance.uuid)
|
||||
self._notify_about_instance_usage(context, instance,
|
||||
'shelve_offload.end')
|
||||
|
@ -6321,6 +6332,7 @@ class ComputeManager(manager.Manager):
|
|||
LOG.info('Migrating instance to %s finished successfully.',
|
||||
dest, instance=instance)
|
||||
|
||||
self._clean_instance_console_tokens(ctxt, instance)
|
||||
if migrate_data and migrate_data.obj_attr_is_set('migration'):
|
||||
migrate_data.migration.status = 'completed'
|
||||
migrate_data.migration.save()
|
||||
|
@ -6360,6 +6372,14 @@ class ComputeManager(manager.Manager):
|
|||
CONF.rdp.enabled or CONF.serial_console.enabled or
|
||||
CONF.mks.enabled)
|
||||
|
||||
def _clean_instance_console_tokens(self, ctxt, instance):
|
||||
"""Clean console tokens stored for an instance."""
|
||||
# If the database backend isn't in use, don't bother trying to clean
|
||||
# tokens. The database backend is not supported for cells v1.
|
||||
if not CONF.cells.enable and self._consoles_enabled():
|
||||
obj_console_auth_token.ConsoleAuthToken.\
|
||||
clean_console_auths_for_instance(ctxt, instance.uuid)
|
||||
|
||||
@wrap_exception()
|
||||
@wrap_instance_event(prefix='compute')
|
||||
@wrap_instance_fault
|
||||
|
@ -7814,3 +7834,18 @@ class ComputeManager(manager.Manager):
|
|||
error, instance=instance)
|
||||
image_meta = objects.ImageMeta.from_instance(instance)
|
||||
self.driver.unquiesce(context, instance, image_meta)
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
|
||||
def _cleanup_expired_console_auth_tokens(self, context):
|
||||
"""Remove expired console auth tokens for this host.
|
||||
|
||||
Console authorization tokens and their connection data are stored
|
||||
in the database when a user asks for a console connection to an
|
||||
instance. After a time they expire. We periodically remove any expired
|
||||
tokens from the database.
|
||||
"""
|
||||
# If the database backend isn't in use, don't bother looking for
|
||||
# expired tokens. The database backend is not supported for cells v1.
|
||||
if not CONF.cells.enable:
|
||||
obj_console_auth_token.ConsoleAuthToken.\
|
||||
clean_expired_console_auths_for_host(context, self.host)
|
||||
|
|
|
@ -794,6 +794,10 @@ Possible values:
|
|||
* Any positive integer in seconds: The instance will exist for
|
||||
the specified number of seconds before being offloaded.
|
||||
"""),
|
||||
# NOTE(melwitt): We're also using this option as the interval for cleaning
|
||||
# up expired console authorizations from the database. It's related to the
|
||||
# delete_instance_interval in that it's another task for cleaning up
|
||||
# resources related to an instance.
|
||||
cfg.IntOpt('instance_delete_interval',
|
||||
default=300,
|
||||
help="""
|
||||
|
|
|
@ -19,6 +19,7 @@ import time
|
|||
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from cursive import exception as cursive_exception
|
||||
import ddt
|
||||
from eventlet import event as eventlet_event
|
||||
import mock
|
||||
import netaddr
|
||||
|
@ -77,6 +78,7 @@ CONF = nova.conf.CONF
|
|||
fake_host_list = [mock.sentinel.host1]
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(ComputeManagerUnitTestCase, self).setUp()
|
||||
|
@ -296,11 +298,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
self.assertTrue(mock_log.warning.called)
|
||||
self.assertFalse(mock_log.error.called)
|
||||
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
@mock.patch('nova.compute.utils.notify_about_instance_action')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_detach_volume')
|
||||
def test_delete_instance_without_info_cache(self, mock_detach,
|
||||
mock_notify):
|
||||
mock_notify, mock_clean):
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
self.context,
|
||||
uuid=uuids.instance,
|
||||
|
@ -326,6 +330,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
action='delete', phase='start', bdms=[]),
|
||||
mock.call(self.context, instance, 'fake-mini',
|
||||
action='delete', phase='end', bdms=[])])
|
||||
mock_clean.assert_called_once_with(self.context, instance.uuid)
|
||||
|
||||
def test_check_device_tagging_no_tagging(self):
|
||||
bdms = objects.BlockDeviceMappingList(objects=[
|
||||
|
@ -4264,6 +4269,60 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
self.assertEqual(args[1], self.compute.host)
|
||||
self.assertEqual(args[2], mock.sentinel.inst_uuid)
|
||||
|
||||
@ddt.data(('vnc', 'spice', 'rdp', 'serial_console', 'mks'),
|
||||
('spice', 'vnc', 'rdp', 'serial_console', 'mks'),
|
||||
('rdp', 'vnc', 'spice', 'serial_console', 'mks'),
|
||||
('serial_console', 'vnc', 'spice', 'rdp', 'mks'),
|
||||
('mks', 'vnc', 'spice', 'rdp', 'serial_console'))
|
||||
@ddt.unpack
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
def test_clean_instance_console_tokens(self, g1, g2, g3, g4, g5,
|
||||
mock_clean):
|
||||
# Make sure cells v1 is disabled
|
||||
self.flags(enable=False, group='cells')
|
||||
# Enable one of each of the console types and disable the rest
|
||||
self.flags(enabled=True, group=g1)
|
||||
for g in [g2, g3, g4, g5]:
|
||||
self.flags(enabled=False, group=g)
|
||||
instance = objects.Instance(uuid=uuids.instance)
|
||||
self.compute._clean_instance_console_tokens(self.context, instance)
|
||||
mock_clean.assert_called_once_with(self.context, instance.uuid)
|
||||
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
def test_clean_instance_console_tokens_no_consoles_enabled(self,
|
||||
mock_clean):
|
||||
for g in ['vnc', 'spice', 'rdp', 'serial_console', 'mks']:
|
||||
self.flags(enabled=False, group=g)
|
||||
instance = objects.Instance(uuid=uuids.instance)
|
||||
self.compute._clean_instance_console_tokens(self.context, instance)
|
||||
mock_clean.assert_not_called()
|
||||
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
def test_clean_instance_console_tokens_cells_v1_enabled(self, mock_clean):
|
||||
# Enable cells v1
|
||||
self.flags(enable=True, group='cells')
|
||||
self.flags(enabled=True, group='vnc')
|
||||
instance = objects.Instance(uuid=uuids.instance)
|
||||
self.compute._clean_instance_console_tokens(self.context, instance)
|
||||
mock_clean.assert_not_called()
|
||||
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_expired_console_auths_for_host')
|
||||
def test_cleanup_expired_console_auth_tokens(self, mock_clean):
|
||||
# Make sure cells v1 is disabled
|
||||
self.flags(enable=False, group='cells')
|
||||
self.compute._cleanup_expired_console_auth_tokens(self.context)
|
||||
mock_clean.assert_called_once_with(self.context, self.compute.host)
|
||||
|
||||
# Enable cells v1
|
||||
mock_clean.reset_mock()
|
||||
self.flags(enable=True, group='cells')
|
||||
self.compute._cleanup_expired_console_auth_tokens(self.context)
|
||||
mock_clean.assert_not_called()
|
||||
|
||||
@mock.patch.object(nova.context.RequestContext, 'elevated')
|
||||
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
|
||||
@mock.patch.object(nova.scheduler.client.SchedulerClient,
|
||||
|
@ -7046,7 +7105,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||
|
||||
_do_test()
|
||||
|
||||
def _call_post_live_migration(self, *args, **kwargs):
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
def _call_post_live_migration(self, mock_clean, *args, **kwargs):
|
||||
@mock.patch.object(self.compute, 'update_available_resource')
|
||||
@mock.patch.object(self.compute, 'compute_rpcapi')
|
||||
@mock.patch.object(self.compute, '_notify_about_instance_usage')
|
||||
|
@ -7057,7 +7118,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||
self.instance,
|
||||
'foo',
|
||||
*args, **kwargs)
|
||||
return _do_call()
|
||||
result = _do_call()
|
||||
mock_clean.assert_called_once_with(self.context, self.instance.uuid)
|
||||
return result
|
||||
|
||||
def test_post_live_migration_new_allocations(self):
|
||||
# We have a migrate_data with a migration...
|
||||
|
@ -7168,6 +7231,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||
migrate_data.old_vol_attachment_ids = {volume_id: orig_attachment_id}
|
||||
image_bdm.attachment_id = uuids.attachment3
|
||||
|
||||
@mock.patch('nova.objects.ConsoleAuthToken.'
|
||||
'clean_console_auths_for_instance')
|
||||
@mock.patch.object(migrate_data.migration, 'save',
|
||||
new=lambda: None)
|
||||
@mock.patch.object(compute.reportclient,
|
||||
|
@ -7185,7 +7250,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||
'get_by_instance_uuid')
|
||||
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
|
||||
mock_rpc, mock_get_bdm_info, mock_attach_delete,
|
||||
mock_update_resource, mock_bdm_save, mock_rt, mock_ga):
|
||||
mock_update_resource, mock_bdm_save, mock_rt, mock_ga,
|
||||
mock_clean):
|
||||
mock_rt.return_value = mock.Mock()
|
||||
mock_get_bdms.return_value = [vol_bdm, image_bdm]
|
||||
|
||||
|
@ -7194,6 +7260,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
|||
|
||||
mock_attach_delete.assert_called_once_with(
|
||||
self.context, orig_attachment_id)
|
||||
mock_clean.assert_called_once_with(self.context, instance.uuid)
|
||||
|
||||
_test()
|
||||
|
||||
|
|
Loading…
Reference in New Issue