Merge "Move allocation manipulation out of drop_move_claim()"
This commit is contained in:
@@ -89,6 +89,7 @@ from nova.pci import whitelist
|
||||
from nova import rpc
|
||||
from nova import safe_utils
|
||||
from nova.scheduler import client as scheduler_client
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova import utils
|
||||
from nova.virt import block_device as driver_block_device
|
||||
from nova.virt import configdrive
|
||||
@@ -505,6 +506,7 @@ class ComputeManager(manager.Manager):
|
||||
openstack_driver.is_neutron_security_groups())
|
||||
self.cells_rpcapi = cells_rpcapi.CellsAPI()
|
||||
self.scheduler_client = scheduler_client.SchedulerClient()
|
||||
self.reportclient = self.scheduler_client.reportclient
|
||||
self._resource_tracker = None
|
||||
self.instance_events = InstanceEvents()
|
||||
self._sync_power_pool = eventlet.GreenPool(
|
||||
@@ -2753,8 +2755,8 @@ class ComputeManager(manager.Manager):
|
||||
#
|
||||
# For scenarios #2 and #3, we must do rebuild claim as server is
|
||||
# being evacuated to a different node.
|
||||
rt = self._get_resource_tracker()
|
||||
if recreate or scheduled_node is not None:
|
||||
rt = self._get_resource_tracker()
|
||||
rebuild_claim = rt.rebuild_claim
|
||||
else:
|
||||
rebuild_claim = claims.NopClaim
|
||||
@@ -2813,6 +2815,9 @@ class ComputeManager(manager.Manager):
|
||||
self._notify_instance_rebuild_error(context, instance, e)
|
||||
except Exception as e:
|
||||
self._set_migration_status(migration, 'failed')
|
||||
if recreate or scheduled_node is not None:
|
||||
rt.delete_allocation_for_evacuated_instance(
|
||||
instance, scheduled_node, node_type='destination')
|
||||
self._notify_instance_rebuild_error(context, instance, e)
|
||||
raise
|
||||
else:
|
||||
@@ -3567,6 +3572,9 @@ class ComputeManager(manager.Manager):
|
||||
rt = self._get_resource_tracker()
|
||||
rt.drop_move_claim(context, instance, migration.source_node,
|
||||
old_instance_type, prefix='old_')
|
||||
self._delete_allocation_after_move(instance, migration,
|
||||
old_instance_type,
|
||||
migration.source_node)
|
||||
instance.drop_migration_context()
|
||||
|
||||
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
|
||||
@@ -3593,6 +3601,28 @@ class ComputeManager(manager.Manager):
|
||||
context, instance, "resize.confirm.end",
|
||||
network_info=network_info)
|
||||
|
||||
def _delete_allocation_after_move(self, instance, migration, flavor,
|
||||
nodename):
|
||||
# NOTE(jaypipes): This sucks, but due to the fact that confirm_resize()
|
||||
# only runs on the source host and revert_resize() runs on the
|
||||
# destination host, we need to do this here. Basically, what we're
|
||||
# doing here is grabbing the existing allocations for this instance
|
||||
# from the placement API, dropping the resources in the doubled-up
|
||||
# allocation set that refer to the source host UUID and calling PUT
|
||||
# /allocations back to the placement API. The allocation that gets
|
||||
# PUT'd back to placement will only include the destination host and
|
||||
# any shared providers in the case of a confirm_resize operation and
|
||||
# the source host and shared providers for a revert_resize operation..
|
||||
my_resources = scheduler_utils.resources_from_flavor(instance, flavor)
|
||||
rt = self._get_resource_tracker()
|
||||
cn_uuid = rt.get_node_uuid(nodename)
|
||||
res = self.reportclient.remove_provider_from_instance_allocation(
|
||||
instance.uuid, cn_uuid, instance.user_id,
|
||||
instance.project_id, my_resources)
|
||||
if not res:
|
||||
LOG.error("Failed to save manipulated allocation",
|
||||
instance=instance)
|
||||
|
||||
@wrap_exception()
|
||||
@reverts_task_state
|
||||
@wrap_instance_event(prefix='compute')
|
||||
@@ -3650,6 +3680,9 @@ class ComputeManager(manager.Manager):
|
||||
|
||||
rt = self._get_resource_tracker()
|
||||
rt.drop_move_claim(context, instance, instance.node)
|
||||
self._delete_allocation_after_move(instance, migration,
|
||||
instance.flavor,
|
||||
instance.node)
|
||||
|
||||
# RPC cast back to the source host to finish the revert there.
|
||||
self.compute_rpcapi.finish_revert_resize(context, instance,
|
||||
|
||||
@@ -472,36 +472,6 @@ class ResourceTracker(object):
|
||||
ctxt = context.elevated()
|
||||
self._update(ctxt, self.compute_nodes[nodename])
|
||||
|
||||
# NOTE(jaypipes): This sucks, but due to the fact that confirm_resize()
|
||||
# only runs on the source host and revert_resize() runs on the
|
||||
# destination host, we need to do this here. Basically, what we're
|
||||
# doing here is grabbing the existing allocations for this instance
|
||||
# from the placement API, dropping the resources in the doubled-up
|
||||
# allocation set that refer to the source host UUID and calling PUT
|
||||
# /allocations back to the placement API. The allocation that gets
|
||||
# PUT'd back to placement will only include the destination host and
|
||||
# any shared providers in the case of a confirm_resize operation and
|
||||
# the source host and shared providers for a revert_resize operation..
|
||||
my_resources = scheduler_utils.resources_from_flavor(instance,
|
||||
instance_type or instance.flavor)
|
||||
cn_uuid = self.compute_nodes[nodename].uuid
|
||||
operation = 'Confirming'
|
||||
source_or_dest = 'source'
|
||||
if prefix == 'new_':
|
||||
operation = 'Reverting'
|
||||
source_or_dest = 'destination'
|
||||
LOG.debug("%s resize on %s host. Removing resources claimed on "
|
||||
"provider %s from allocation",
|
||||
operation, source_or_dest, cn_uuid, instance=instance)
|
||||
res = self.reportclient.remove_provider_from_instance_allocation(
|
||||
instance.uuid, cn_uuid, instance.user_id,
|
||||
instance.project_id, my_resources)
|
||||
if not res:
|
||||
LOG.error("Failed to save manipulated allocation when "
|
||||
"%s resize on %s host %s.",
|
||||
operation.lower(), source_or_dest, cn_uuid,
|
||||
instance=instance)
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
|
||||
def update_usage(self, context, instance, nodename):
|
||||
"""Update the resource usage and stats after a change in an
|
||||
|
||||
@@ -1839,8 +1839,9 @@ class ServerMovingTests(ProviderUsageBaseTestCase):
|
||||
|
||||
def test_evacuate_rebuild_on_dest_fails(self):
|
||||
"""Tests that the allocations on the destination node are cleaned up
|
||||
by the drop_move_claim in the ResourceTracker automatically when
|
||||
the claim is made but the actual rebuild via the driver fails.
|
||||
automatically when the claim is made but the actual rebuild
|
||||
via the driver fails.
|
||||
|
||||
"""
|
||||
source_hostname = self.compute1.host
|
||||
dest_hostname = self.compute2.host
|
||||
|
||||
@@ -3578,13 +3578,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
'_do_rebuild_instance_with_claim')
|
||||
@mock.patch('nova.compute.utils.notify_about_instance_action')
|
||||
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
|
||||
def _test_rebuild_ex(self, instance, exc, mock_notify_about_instance_usage,
|
||||
mock_notify, mock_rebuild, mock_set):
|
||||
def _test_rebuild_ex(self, instance, exc,
|
||||
mock_notify_about_instance_usage,
|
||||
mock_notify, mock_rebuild, mock_set,
|
||||
recreate=False, scheduled_node=None):
|
||||
|
||||
mock_rebuild.side_effect = exc
|
||||
|
||||
self.compute.rebuild_instance(self.context, instance, None, None, None,
|
||||
None, None, None, None)
|
||||
None, None, None, recreate,
|
||||
scheduled_node=scheduled_node)
|
||||
mock_set.assert_called_once_with(None, 'failed')
|
||||
mock_notify_about_instance_usage.assert_called_once_with(
|
||||
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
|
||||
@@ -3604,6 +3607,34 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
ex = exception.InstanceNotFound(instance_id=instance.uuid)
|
||||
self._test_rebuild_ex(instance, ex)
|
||||
|
||||
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
|
||||
@mock.patch.object(manager.ComputeManager,
|
||||
'_error_out_instance_on_exception')
|
||||
def test_rebuild_driver_error_same_host(self, mock_error, mock_aiffe):
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
ex = test.TestingException('foo')
|
||||
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
|
||||
self.assertRaises(test.TestingException,
|
||||
self._test_rebuild_ex, instance, ex)
|
||||
rt = mrt.return_value
|
||||
self.assertFalse(
|
||||
rt.delete_allocation_for_evacuated_instance.called)
|
||||
|
||||
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
|
||||
@mock.patch.object(manager.ComputeManager,
|
||||
'_error_out_instance_on_exception')
|
||||
def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe):
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
ex = test.TestingException('foo')
|
||||
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
|
||||
self.assertRaises(test.TestingException,
|
||||
self._test_rebuild_ex, instance, ex,
|
||||
recreate=True, scheduled_node='foo')
|
||||
rt = mrt.return_value
|
||||
delete_alloc = rt.delete_allocation_for_evacuated_instance
|
||||
delete_alloc.assert_called_once_with(instance, 'foo',
|
||||
node_type='destination')
|
||||
|
||||
def test_rebuild_node_not_updated_if_not_recreate(self):
|
||||
node = uuidutils.generate_uuid() # ironic node uuid
|
||||
instance = fake_instance.fake_instance_obj(self.context, node=node)
|
||||
@@ -3612,7 +3643,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
mock.patch.object(self.compute, '_get_compute_info'),
|
||||
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
|
||||
mock.patch.object(objects.Instance, 'save'),
|
||||
mock.patch.object(self.compute, '_set_migration_status')
|
||||
mock.patch.object(self.compute, '_set_migration_status'),
|
||||
) as (mock_get, mock_rebuild, mock_save, mock_set):
|
||||
self.compute.rebuild_instance(self.context, instance, None, None,
|
||||
None, None, None, None, False)
|
||||
@@ -3630,7 +3661,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
mock.patch.object(self.compute, '_get_compute_info'),
|
||||
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
|
||||
mock.patch.object(objects.Instance, 'save'),
|
||||
mock.patch.object(self.compute, '_set_migration_status')
|
||||
mock.patch.object(self.compute, '_set_migration_status'),
|
||||
) as (mock_rt, mock_get, mock_rebuild, mock_save, mock_set):
|
||||
mock_get.return_value.hypervisor_hostname = 'new-node'
|
||||
self.compute.rebuild_instance(self.context, instance, None, None,
|
||||
@@ -5688,6 +5719,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'drop_move_claim')
|
||||
@mock.patch.object(self.compute, '_delete_allocation_after_move')
|
||||
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
|
||||
@mock.patch.object(self.instance, 'revert_migration_context')
|
||||
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
|
||||
@@ -5716,6 +5748,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
mock_get_instance_nw_info,
|
||||
mock_revert_migration_context,
|
||||
mock_finish_revert,
|
||||
mock_delete_allocation,
|
||||
mock_drop_move_claim):
|
||||
|
||||
self.instance.migration_context = objects.MigrationContext()
|
||||
@@ -5728,6 +5761,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
reservations=None)
|
||||
mock_drop_move_claim.assert_called_once_with(self.context,
|
||||
self.instance, self.instance.node)
|
||||
mock_delete_allocation.assert_called_once_with(
|
||||
self.instance, self.migration, self.instance.flavor,
|
||||
self.instance.node)
|
||||
self.assertIsNotNone(self.instance.migration_context)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_minimum_version',
|
||||
@@ -5765,6 +5801,50 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
do_revert_resize()
|
||||
do_finish_revert_resize()
|
||||
|
||||
def test_confirm_resize_deletes_allocations(self):
|
||||
@mock.patch.object(self.migration, 'save')
|
||||
@mock.patch.object(self.compute, '_notify_about_instance_usage')
|
||||
@mock.patch.object(self.compute, 'network_api')
|
||||
@mock.patch.object(self.compute.driver, 'confirm_migration')
|
||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
||||
@mock.patch.object(self.compute, '_delete_allocation_after_move')
|
||||
@mock.patch.object(self.instance, 'drop_migration_context')
|
||||
@mock.patch.object(self.instance, 'save')
|
||||
def do_confirm_resize(mock_save, mock_drop, mock_delete, mock_get_rt,
|
||||
mock_confirm, mock_nwapi, mock_notify,
|
||||
mock_mig_save):
|
||||
self.instance.migration_context = objects.MigrationContext()
|
||||
self.migration.source_compute = self.instance['host']
|
||||
self.migration.source_node = self.instance['node']
|
||||
self.compute._confirm_resize(self.context, self.instance,
|
||||
self.migration)
|
||||
mock_delete.assert_called_once_with(self.instance, self.migration,
|
||||
self.instance.old_flavor,
|
||||
self.migration.source_node)
|
||||
|
||||
do_confirm_resize()
|
||||
|
||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
||||
def test_delete_allocation_after_move(self, mock_resources):
|
||||
@mock.patch.object(self.compute, '_get_resource_tracker')
|
||||
@mock.patch.object(self.compute, 'reportclient')
|
||||
def do_it(mock_rc, mock_grt):
|
||||
instance = mock.MagicMock()
|
||||
self.compute._delete_allocation_after_move(instance,
|
||||
mock.sentinel.migration,
|
||||
mock.sentinel.flavor,
|
||||
mock.sentinel.node)
|
||||
mock_resources.assert_called_once_with(instance,
|
||||
mock.sentinel.flavor)
|
||||
rt = mock_grt.return_value
|
||||
rt.get_node_uuid.assert_called_once_with(mock.sentinel.node)
|
||||
remove = mock_rc.remove_provider_from_instance_allocation
|
||||
remove.assert_called_once_with(
|
||||
instance.uuid, rt.get_node_uuid.return_value,
|
||||
instance.user_id, instance.project_id,
|
||||
mock_resources.return_value)
|
||||
do_it()
|
||||
|
||||
def test_consoles_enabled(self):
|
||||
self.flags(enabled=False, group='vnc')
|
||||
self.flags(enabled=False, group='spice')
|
||||
|
||||
@@ -31,7 +31,6 @@ from nova.objects import base as obj_base
|
||||
from nova.objects import fields as obj_fields
|
||||
from nova.objects import pci_device
|
||||
from nova.pci import manager as pci_manager
|
||||
from nova.scheduler import utils as sched_utils
|
||||
from nova import test
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.tests.unit.objects import test_pci_device as fake_pci_device
|
||||
@@ -1915,15 +1914,6 @@ class TestResize(BaseTestCase):
|
||||
ignore=['stats']
|
||||
))
|
||||
|
||||
cn = self.rt.compute_nodes[_NODENAME]
|
||||
cn_uuid = cn.uuid
|
||||
rc = self.sched_client_mock.reportclient
|
||||
remove_method = rc.remove_provider_from_instance_allocation
|
||||
expected_resources = sched_utils.resources_from_flavor(instance,
|
||||
flavor)
|
||||
remove_method.assert_called_once_with(instance.uuid, cn_uuid,
|
||||
instance.user_id, instance.project_id, expected_resources)
|
||||
|
||||
def test_instance_build_resize_revert(self):
|
||||
self._test_instance_build_resize(revert=True)
|
||||
|
||||
@@ -2036,8 +2026,7 @@ class TestResize(BaseTestCase):
|
||||
self.assertEqual(request, pci_req_mock.return_value.requests[0])
|
||||
alloc_mock.assert_called_once_with(instance)
|
||||
|
||||
@mock.patch('nova.scheduler.utils.resources_from_flavor')
|
||||
def test_drop_move_claim_on_revert(self, mock_resources):
|
||||
def test_drop_move_claim_on_revert(self):
|
||||
self._setup_rt()
|
||||
cn = _COMPUTE_NODE_FIXTURES[0].obj_clone()
|
||||
self.rt.compute_nodes[_NODENAME] = cn
|
||||
@@ -2071,9 +2060,6 @@ class TestResize(BaseTestCase):
|
||||
self.rt.drop_move_claim(ctx, instance, _NODENAME)
|
||||
mock_pci_free_device.assert_called_once_with(
|
||||
pci_dev, mock.ANY)
|
||||
# Check that we grabbed resourced for the right flavor...
|
||||
mock_resources.assert_called_once_with(instance,
|
||||
instance.flavor)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_minimum_version',
|
||||
return_value=22)
|
||||
|
||||
Reference in New Issue
Block a user