Merge "Add periodic task to clean expired console tokens"

This commit is contained in:
Zuul 2018-04-24 17:20:20 +00:00 committed by Gerrit Code Review
commit f77751f373
4 changed files with 120 additions and 4 deletions

View File

@ -1860,6 +1860,11 @@ class API(base.Base):
# NOTE(dtp): cells.enable = False means "use cells v2".
# Run everywhere except v1 compute cells.
if not CONF.cells.enable or self.cell_type == 'api':
# TODO(melwitt): In Rocky, we store console authorizations
# in both the consoleauth service and the database while
# we convert to using the database. Remove the consoleauth
# line below when authorizations are no longer being
# stored in consoleauth, in Stein.
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
@ -4179,6 +4184,11 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
# TODO(melwitt): In Rocky, we store console authorizations
# in both the consoleauth service and the database while
# we convert to using the database. Remove the consoleauth
# line below when authorizations are no longer being
# stored in consoleauth, in Stein.
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)

View File

@ -81,6 +81,7 @@ from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.objects import console_auth_token as obj_console_auth_token
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
@ -747,6 +748,7 @@ class ComputeManager(manager.Manager):
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.END, bdms=bdms)
self._clean_instance_console_tokens(context, instance)
self._delete_scheduler_instance_info(context, instance.uuid)
def _init_instance(self, context, instance):
@ -3117,6 +3119,9 @@ class ComputeManager(manager.Manager):
instance.progress = 0
instance.save()
self.stop_instance(context, instance, False)
# TODO(melwitt): We should clean up instance console tokens here in the
# case of evacuate. The instance is on a new host and will need to
# establish a new console connection.
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
@ -4488,6 +4493,9 @@ class ComputeManager(manager.Manager):
network_info = self._finish_resize(context, instance, migration,
disk_info, image_meta, bdms)
# TODO(melwitt): We should clean up instance console tokens here. The
# instance is on a new host and will need to establish a new console
# connection.
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
@ -4854,6 +4862,9 @@ class ComputeManager(manager.Manager):
self._nil_out_instance_obj_host_and_node(instance)
instance.save(expected_task_state=None)
# TODO(melwitt): We should clean up instance console tokens here. The
# instance has no host at this point and will need to establish a new
# console connection in the future after it is unshelved.
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@ -6302,6 +6313,7 @@ class ComputeManager(manager.Manager):
LOG.info('Migrating instance to %s finished successfully.',
dest, instance=instance)
self._clean_instance_console_tokens(ctxt, instance)
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
@ -6341,6 +6353,14 @@ class ComputeManager(manager.Manager):
CONF.rdp.enabled or CONF.serial_console.enabled or
CONF.mks.enabled)
def _clean_instance_console_tokens(self, ctxt, instance):
"""Clean console tokens stored for an instance."""
# If the database backend isn't in use, don't bother trying to clean
# tokens. The database backend is not supported for cells v1.
if not CONF.cells.enable and self._consoles_enabled():
obj_console_auth_token.ConsoleAuthToken.\
clean_console_auths_for_instance(ctxt, instance.uuid)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
@ -7813,3 +7833,18 @@ class ComputeManager(manager.Manager):
error, instance=instance)
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.unquiesce(context, instance, image_meta)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_expired_console_auth_tokens(self, context):
"""Remove expired console auth tokens for this host.
Console authorization tokens and their connection data are stored
in the database when a user asks for a console connection to an
instance. After a time they expire. We periodically remove any expired
tokens from the database.
"""
# If the database backend isn't in use, don't bother looking for
# expired tokens. The database backend is not supported for cells v1.
if not CONF.cells.enable:
obj_console_auth_token.ConsoleAuthToken.\
clean_expired_console_auths_for_host(context, self.host)

View File

@ -792,6 +792,10 @@ Possible values:
* Any positive integer in seconds: The instance will exist for
the specified number of seconds before being offloaded.
"""),
# NOTE(melwitt): We're also using this option as the interval for cleaning
# up expired console authorizations from the database. It's related to the
# delete_instance_interval in that it's another task for cleaning up
# resources related to an instance.
cfg.IntOpt('instance_delete_interval',
default=300,
help="""

View File

@ -18,6 +18,7 @@ import time
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
import ddt
from eventlet import event as eventlet_event
import mock
import netaddr
@ -76,6 +77,7 @@ CONF = nova.conf.CONF
fake_host_list = [mock.sentinel.host1]
@ddt.ddt
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@ -295,11 +297,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertTrue(mock_log.warning.called)
self.assertFalse(mock_log.error.called)
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.manager.ComputeManager.'
'_detach_volume')
def test_delete_instance_without_info_cache(self, mock_detach,
mock_notify):
mock_notify, mock_clean):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
@ -325,6 +329,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
action='delete', phase='start', bdms=[]),
mock.call(self.context, instance, 'fake-mini',
action='delete', phase='end', bdms=[])])
mock_clean.assert_called_once_with(self.context, instance.uuid)
def test_check_device_tagging_no_tagging(self):
bdms = objects.BlockDeviceMappingList(objects=[
@ -4300,6 +4305,60 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertEqual(args[1], self.compute.host)
self.assertEqual(args[2], mock.sentinel.inst_uuid)
@ddt.data(('vnc', 'spice', 'rdp', 'serial_console', 'mks'),
('spice', 'vnc', 'rdp', 'serial_console', 'mks'),
('rdp', 'vnc', 'spice', 'serial_console', 'mks'),
('serial_console', 'vnc', 'spice', 'rdp', 'mks'),
('mks', 'vnc', 'spice', 'rdp', 'serial_console'))
@ddt.unpack
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
def test_clean_instance_console_tokens(self, g1, g2, g3, g4, g5,
mock_clean):
# Make sure cells v1 is disabled
self.flags(enable=False, group='cells')
# Enable one of each of the console types and disable the rest
self.flags(enabled=True, group=g1)
for g in [g2, g3, g4, g5]:
self.flags(enabled=False, group=g)
instance = objects.Instance(uuid=uuids.instance)
self.compute._clean_instance_console_tokens(self.context, instance)
mock_clean.assert_called_once_with(self.context, instance.uuid)
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
def test_clean_instance_console_tokens_no_consoles_enabled(self,
mock_clean):
for g in ['vnc', 'spice', 'rdp', 'serial_console', 'mks']:
self.flags(enabled=False, group=g)
instance = objects.Instance(uuid=uuids.instance)
self.compute._clean_instance_console_tokens(self.context, instance)
mock_clean.assert_not_called()
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
def test_clean_instance_console_tokens_cells_v1_enabled(self, mock_clean):
# Enable cells v1
self.flags(enable=True, group='cells')
self.flags(enabled=True, group='vnc')
instance = objects.Instance(uuid=uuids.instance)
self.compute._clean_instance_console_tokens(self.context, instance)
mock_clean.assert_not_called()
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_expired_console_auths_for_host')
def test_cleanup_expired_console_auth_tokens(self, mock_clean):
# Make sure cells v1 is disabled
self.flags(enable=False, group='cells')
self.compute._cleanup_expired_console_auth_tokens(self.context)
mock_clean.assert_called_once_with(self.context, self.compute.host)
# Enable cells v1
mock_clean.reset_mock()
self.flags(enable=True, group='cells')
self.compute._cleanup_expired_console_auth_tokens(self.context)
mock_clean.assert_not_called()
@mock.patch.object(nova.context.RequestContext, 'elevated')
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
@ -7085,7 +7144,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
_do_test()
def _call_post_live_migration(self, *args, **kwargs):
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
def _call_post_live_migration(self, mock_clean, *args, **kwargs):
@mock.patch.object(self.compute, 'update_available_resource')
@mock.patch.object(self.compute, 'compute_rpcapi')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@ -7096,7 +7157,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
self.instance,
'foo',
*args, **kwargs)
return _do_call()
result = _do_call()
mock_clean.assert_called_once_with(self.context, self.instance.uuid)
return result
def test_post_live_migration_new_allocations(self):
# We have a migrate_data with a migration...
@ -7207,6 +7270,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
migrate_data.old_vol_attachment_ids = {volume_id: orig_attachment_id}
image_bdm.attachment_id = uuids.attachment3
@mock.patch('nova.objects.ConsoleAuthToken.'
'clean_console_auths_for_instance')
@mock.patch.object(migrate_data.migration, 'save',
new=lambda: None)
@mock.patch.object(compute.reportclient,
@ -7224,7 +7289,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
mock_rpc, mock_get_bdm_info, mock_attach_delete,
mock_update_resource, mock_bdm_save, mock_rt, mock_ga):
mock_update_resource, mock_bdm_save, mock_rt, mock_ga,
mock_clean):
mock_rt.return_value = mock.Mock()
mock_get_bdms.return_value = [vol_bdm, image_bdm]
@ -7233,6 +7299,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_attach_delete.assert_called_once_with(
self.context, orig_attachment_id)
mock_clean.assert_called_once_with(self.context, instance.uuid)
_test()