Move get_stashed_volume_connector to compute.utils

The utility method will be needed in the compute manager in
an upcoming change, so this moves it from a private method
in the compute.api to compute.utils, makes it public and adjusts
tests and such.

As a result, may_have_ports_or_volumes also has to move.

Change-Id: Iedd6be5ef473dcb4f2c465709f3e070ff529f456
This commit is contained in:
Matt Riedemann 2018-03-29 14:00:06 -04:00
parent 00b19c73cf
commit 00cfb0b454
4 changed files with 90 additions and 67 deletions

View File

@ -29,7 +29,6 @@ from castellan import key_manager
from oslo_log import log as logging
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import base64 as base64utils
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
@ -1798,7 +1797,8 @@ class API(base.Base):
# in error state), the instance has been scheduled and sent to a
# cell/compute which means it was pulled from the cell db.
# Normal delete should be attempted.
may_have_ports_or_volumes = self._may_have_ports_or_volumes(instance)
may_have_ports_or_volumes = compute_utils.may_have_ports_or_volumes(
instance)
if not instance.host and not may_have_ports_or_volumes:
try:
if self._delete_while_booting(context, instance):
@ -1951,16 +1951,6 @@ class API(base.Base):
# NOTE(comstud): Race condition. Instance already gone.
pass
def _may_have_ports_or_volumes(self, instance):
# NOTE(melwitt): When an instance build fails in the compute manager,
# the instance host and node are set to None and the vm_state is set
# to ERROR. In the case, the instance with host = None has actually
# been scheduled and may have ports and/or volumes allocated on the
# compute node.
if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
return True
return False
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
@ -1991,40 +1981,6 @@ class API(base.Base):
self.compute_rpcapi.confirm_resize(context,
instance, migration, src_host, cast=False)
def _get_stashed_volume_connector(self, bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set
Gets the stashed connector dict out of the bdm.connection_info if set
and the connector host matches the instance host.
:param bdm: nova.objects.block_device.BlockDeviceMapping
:param instance: nova.objects.instance.Instance
:returns: volume connector dict or None
"""
if 'connection_info' in bdm and bdm.connection_info is not None:
# NOTE(mriedem): We didn't start stashing the connector in the
# bdm.connection_info until Mitaka so it might not be there on old
# attachments. Also, if the volume was attached when the instance
# was in shelved_offloaded state and it hasn't been unshelved yet
# we don't have the attachment/connection information either.
connector = jsonutils.loads(bdm.connection_info).get('connector')
if connector:
if connector.get('host') == instance.host:
return connector
LOG.debug('Found stashed volume connector for instance but '
'connector host %(connector_host)s does not match '
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
if (instance.host is None and
self._may_have_ports_or_volumes(instance)):
LOG.debug('Allowing use of stashed volume connector with '
'instance host None because instance with '
'vm_state %(vm_state)s has been scheduled in '
'the past.', {'vm_state': instance.vm_state},
instance=instance)
return connector
def _local_cleanup_bdm_volumes(self, bdms, instance, context):
"""The method deletes the bdm records and, if a bdm is a volume, call
the terminate connection and the detach volume via the Volume API.
@ -2037,7 +1993,7 @@ class API(base.Base):
self.volume_api.attachment_delete(context,
bdm.attachment_id)
else:
connector = self._get_stashed_volume_connector(
connector = compute_utils.get_stashed_volume_connector(
bdm, instance)
if connector:
self.volume_api.terminate_connection(context,

View File

@ -24,11 +24,13 @@ import traceback
import netifaces
from oslo_log import log
from oslo_serialization import jsonutils
import six
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova import notifications
@ -941,6 +943,59 @@ def remove_shelved_keys_from_system_metadata(instance):
del (instance.system_metadata[key])
def may_have_ports_or_volumes(instance):
"""Checks to see if an instance may have ports or volumes based on vm_state
This is primarily only useful when instance.host is None.
:param instance: The nova.objects.Instance in question.
:returns: True if the instance may have ports of volumes, False otherwise
"""
# NOTE(melwitt): When an instance build fails in the compute manager,
# the instance host and node are set to None and the vm_state is set
# to ERROR. In the case, the instance with host = None has actually
# been scheduled and may have ports and/or volumes allocated on the
# compute node.
if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
return True
return False
def get_stashed_volume_connector(bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set
Gets the stashed connector dict out of the bdm.connection_info if set
and the connector host matches the instance host.
:param bdm: nova.objects.block_device.BlockDeviceMapping
:param instance: nova.objects.instance.Instance
:returns: volume connector dict or None
"""
if 'connection_info' in bdm and bdm.connection_info is not None:
# NOTE(mriedem): We didn't start stashing the connector in the
# bdm.connection_info until Mitaka so it might not be there on old
# attachments. Also, if the volume was attached when the instance
# was in shelved_offloaded state and it hasn't been unshelved yet
# we don't have the attachment/connection information either.
connector = jsonutils.loads(bdm.connection_info).get('connector')
if connector:
if connector.get('host') == instance.host:
return connector
LOG.debug('Found stashed volume connector for instance but '
'connector host %(connector_host)s does not match '
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
if (instance.host is None and
may_have_ports_or_volumes(instance)):
LOG.debug('Allowing use of stashed volume connector with '
'instance host None because instance with '
'vm_state %(vm_state)s has been scheduled in '
'the past.', {'vm_state': instance.vm_state},
instance=instance)
return connector
class EventReporter(object):
"""Context manager to report instance action events."""

View File

@ -1473,26 +1473,6 @@ class _ComputeAPIUnitTestMixIn(object):
mock_destroy.assert_called_once_with()
do_test(self)
def test_get_stashed_volume_connector_none(self):
inst = self._create_instance_obj()
# connection_info isn't set
bdm = objects.BlockDeviceMapping(self.context)
self.assertIsNone(
self.compute_api._get_stashed_volume_connector(bdm, inst))
# connection_info is None
bdm.connection_info = None
self.assertIsNone(
self.compute_api._get_stashed_volume_connector(bdm, inst))
# connector is not set in connection_info
bdm.connection_info = jsonutils.dumps({})
self.assertIsNone(
self.compute_api._get_stashed_volume_connector(bdm, inst))
# connector is set but different host
conn_info = {'connector': {'host': 'other_host'}}
bdm.connection_info = jsonutils.dumps(conn_info)
self.assertIsNone(
self.compute_api._get_stashed_volume_connector(bdm, inst))
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
def test_local_cleanup_bdm_volumes_stashed_connector_host_none(
self, mock_destroy):

View File

@ -20,6 +20,7 @@ import copy
import string
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import six
@ -28,6 +29,7 @@ from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.image import glance
@ -1069,6 +1071,36 @@ class ComputeUtilsTestCase(test.NoDBTestCase):
]
mock_notify_usage.assert_has_calls(expected_notify_calls)
def test_get_stashed_volume_connector_none(self):
inst = fake_instance.fake_instance_obj(self.context)
# connection_info isn't set
bdm = objects.BlockDeviceMapping(self.context)
self.assertIsNone(
compute_utils.get_stashed_volume_connector(bdm, inst))
# connection_info is None
bdm.connection_info = None
self.assertIsNone(
compute_utils.get_stashed_volume_connector(bdm, inst))
# connector is not set in connection_info
bdm.connection_info = jsonutils.dumps({})
self.assertIsNone(
compute_utils.get_stashed_volume_connector(bdm, inst))
# connector is set but different host
conn_info = {'connector': {'host': 'other_host'}}
bdm.connection_info = jsonutils.dumps(conn_info)
self.assertIsNone(
compute_utils.get_stashed_volume_connector(bdm, inst))
def test_may_have_ports_or_volumes(self):
inst = objects.Instance()
for vm_state, expected_result in ((vm_states.ERROR, True),
(vm_states.SHELVED_OFFLOADED, True),
(vm_states.BUILDING, False)):
inst.vm_state = vm_state
self.assertEqual(
expected_result, compute_utils.may_have_ports_or_volumes(inst),
vm_state)
class ServerGroupTestCase(test.TestCase):
def setUp(self):