Merge "[placement] Add sending global request ID in put (3)" into stable/queens

This commit is contained in:
Zuul 2018-03-06 15:58:17 +00:00 committed by Gerrit Code Review
commit b2ae8fa2f5
9 changed files with 150 additions and 94 deletions

View File

@ -697,7 +697,7 @@ class ComputeManager(manager.Manager):
cn_uuid = compute_nodes[migration.source_node]
if not scheduler_utils.remove_allocation_from_compute(
instance, cn_uuid, self.reportclient):
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of evacuated instance "
"on the source node %s",
cn_uuid, instance=instance)
@ -2902,7 +2902,7 @@ class ComputeManager(manager.Manager):
# on the same host (not evacuate) uses the NopClaim which will
# not raise ComputeResourcesUnavailable.
rt.delete_allocation_for_evacuated_instance(
instance, scheduled_node, node_type='destination')
context, instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
@ -2916,7 +2916,8 @@ class ComputeManager(manager.Manager):
self._set_migration_status(migration, 'failed')
if recreate or scheduled_node is not None:
rt.delete_allocation_for_evacuated_instance(
instance, scheduled_node, node_type='destination')
context, instance, scheduled_node,
node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise
else:
@ -3832,7 +3833,7 @@ class ComputeManager(manager.Manager):
# any shared providers in the case of a confirm_resize operation and
# the source host and shared providers for a revert_resize operation..
if not scheduler_utils.remove_allocation_from_compute(
instance, cn_uuid, self.reportclient, flavor):
context, instance, cn_uuid, self.reportclient, flavor):
LOG.error("Failed to save manipulated allocation",
instance=instance)
@ -6320,7 +6321,7 @@ class ComputeManager(manager.Manager):
# attempt to clean up any doubled per-instance allocation
rt = self._get_resource_tracker()
rt.delete_allocation_for_migrated_instance(
instance, source_node)
ctxt, instance, source_node)
def _consoles_enabled(self):
"""Returns whether a console is enable."""

View File

@ -894,7 +894,7 @@ class ResourceTracker(object):
# that the resource provider exists in the tree and has had its
# cached traits refreshed.
self.reportclient.set_traits_for_provider(
compute_node.uuid, traits)
context, compute_node.uuid, traits)
if self.pci_tracker:
self.pci_tracker.save(context)
@ -1316,27 +1316,30 @@ class ResourceTracker(object):
"host that might need to be removed: %s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, instance, node,
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
instance, node, 'evacuated', node_type)
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, instance, node):
self._delete_allocation_for_moved_instance(instance, node, 'migrated')
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, instance, node, move_type, node_type='source'):
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not scheduler_utils.remove_allocation_from_compute(
instance, cn_uuid, self.reportclient):
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, instance, node, flavor):
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
"""Delete instance allocations for the node during a failed resize
:param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
@ -1345,7 +1348,7 @@ class ResourceTracker(object):
"""
cn = self.compute_nodes[node]
if not scheduler_utils.remove_allocation_from_compute(
instance, cn.uuid, self.reportclient, flavor):
context, instance, cn.uuid, self.reportclient, flavor):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:

View File

@ -376,8 +376,8 @@ class LiveMigrationTask(base.TaskBase):
# allocated for the given (destination) node.
self.scheduler_client.reportclient.\
remove_provider_from_instance_allocation(
self.instance.uuid, compute_node.uuid, self.instance.user_id,
self.instance.project_id, resources)
self.context, self.instance.uuid, compute_node.uuid,
self.instance.user_id, self.instance.project_id, resources)
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:

View File

@ -1090,9 +1090,10 @@ class SchedulerReportClient(object):
self._delete_inventory(context, rp_uuid)
@safe_connect
def _ensure_traits(self, traits):
def _ensure_traits(self, context, traits):
"""Make sure all specified traits exist in the placement service.
:param context: The security context
:param traits: Iterable of trait strings to ensure exist.
:raises: TraitCreationFailed if traits contains a trait that did not
exist in placement, and couldn't be created. When this
@ -1118,7 +1119,8 @@ class SchedulerReportClient(object):
# Might be neat to have a batch create. But creating multiple
# traits will generally happen once, at initial startup, if at all.
for trait in traits_to_create:
resp = self.put('/traits/' + trait, None, version='1.6')
resp = self.put('/traits/' + trait, None, version='1.6',
global_request_id=context.global_id)
if not resp:
raise exception.TraitCreationFailed(name=trait,
error=resp.text)
@ -1136,11 +1138,12 @@ class SchedulerReportClient(object):
raise exception.TraitRetrievalFailed(error=resp.text)
@safe_connect
def set_traits_for_provider(self, rp_uuid, traits):
def set_traits_for_provider(self, context, rp_uuid, traits):
"""Replace a provider's traits with those specified.
The provider must exist - this method does not attempt to create it.
:param context: The security context
:param rp_uuid: The UUID of the provider whose traits are to be updated
:param traits: Iterable of traits to set on the provider
:raises: ResourceProviderUpdateConflict if the provider's generation
@ -1158,7 +1161,7 @@ class SchedulerReportClient(object):
if not self._provider_tree.have_traits_changed(rp_uuid, traits):
return
self._ensure_traits(traits)
self._ensure_traits(context, traits)
url = '/resource_providers/%s/traits' % rp_uuid
# NOTE(efried): Don't use the DELETE API when traits is empty, because
@ -1170,7 +1173,8 @@ class SchedulerReportClient(object):
'resource_provider_generation': generation,
'traits': traits,
}
resp = self.put(url, payload, version='1.6')
resp = self.put(url, payload, version='1.6',
global_request_id=context.global_id)
if resp.status_code == 200:
json = resp.json()
@ -1201,11 +1205,12 @@ class SchedulerReportClient(object):
raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text)
@safe_connect
def set_aggregates_for_provider(self, rp_uuid, aggregates):
def set_aggregates_for_provider(self, context, rp_uuid, aggregates):
"""Replace a provider's aggregates with those specified.
The provider must exist - this method does not attempt to create it.
:param context: The security context
:param rp_uuid: The UUID of the provider whose aggregates are to be
updated.
:param aggregates: Iterable of aggregates to set on the provider.
@ -1214,7 +1219,8 @@ class SchedulerReportClient(object):
# TODO(efried): Handle generation conflicts when supported by placement
url = '/resource_providers/%s/aggregates' % rp_uuid
aggregates = list(aggregates) if aggregates else []
resp = self.put(url, aggregates, version='1.1')
resp = self.put(url, aggregates, version='1.1',
global_request_id=context.global_id)
if resp.status_code == 200:
placement_aggs = resp.json()['aggregates']
@ -1376,7 +1382,7 @@ class SchedulerReportClient(object):
return allocations.get(
rp_uuid, {}).get('resources', {})
def _allocate_for_instance(self, rp_uuid, instance):
def _allocate_for_instance(self, context, rp_uuid, instance):
my_allocations = _instance_to_allocations_dict(instance)
current_allocations = self.get_allocations_for_consumer_by_provider(
rp_uuid, instance.uuid)
@ -1390,8 +1396,9 @@ class SchedulerReportClient(object):
LOG.debug('Sending allocation for instance %s',
my_allocations,
instance=instance)
res = self.put_allocations(rp_uuid, instance.uuid, my_allocations,
instance.project_id, instance.user_id)
res = self.put_allocations(context, rp_uuid, instance.uuid,
my_allocations, instance.project_id,
instance.user_id)
if res:
LOG.info('Submitted allocation for instance', instance=instance)
@ -1491,8 +1498,8 @@ class SchedulerReportClient(object):
return r.status_code == 204
@safe_connect
def remove_provider_from_instance_allocation(self, consumer_uuid, rp_uuid,
user_id, project_id,
def remove_provider_from_instance_allocation(self, context, consumer_uuid,
rp_uuid, user_id, project_id,
resources):
"""Grabs an allocation for a particular consumer UUID, strips parts of
the allocation that refer to a supplied resource provider UUID, and
@ -1508,6 +1515,7 @@ class SchedulerReportClient(object):
subtract resources from the single allocation to ensure we do not
exceed the reserved or max_unit amounts for the resource on the host.
:param context: The security context
:param consumer_uuid: The instance/consumer UUID
:param rp_uuid: The UUID of the provider whose resources we wish to
remove from the consumer's allocation
@ -1580,7 +1588,8 @@ class SchedulerReportClient(object):
LOG.debug("Sending updated allocation %s for instance %s after "
"removing resources for %s.",
new_allocs, consumer_uuid, rp_uuid)
r = self.put(url, payload, version='1.10')
r = self.put(url, payload, version='1.10',
global_request_id=context.global_id)
if r.status_code != 204:
LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s",
consumer_uuid, r.status_code, r.text)
@ -1656,8 +1665,8 @@ class SchedulerReportClient(object):
@safe_connect
@retries
def put_allocations(self, rp_uuid, consumer_uuid, alloc_data, project_id,
user_id):
def put_allocations(self, context, rp_uuid, consumer_uuid, alloc_data,
project_id, user_id):
"""Creates allocation records for the supplied instance UUID against
the supplied resource provider.
@ -1665,6 +1674,7 @@ class SchedulerReportClient(object):
Once shared storage and things like NUMA allocations are a
reality, this will change to allocate against multiple providers.
:param context: The security context
:param rp_uuid: The UUID of the resource provider to allocate against.
:param consumer_uuid: The instance's UUID.
:param alloc_data: Dict, keyed by resource class, of amounts to
@ -1688,7 +1698,8 @@ class SchedulerReportClient(object):
'user_id': user_id,
}
url = '/allocations/%s' % consumer_uuid
r = self.put(url, payload, version='1.8')
r = self.put(url, payload, version='1.8',
global_request_id=context.global_id)
if r.status_code == 406:
# microversion 1.8 not available so try the earlier way
# TODO(melwitt): Remove this when we can be sure all placement
@ -1734,7 +1745,7 @@ class SchedulerReportClient(object):
def update_instance_allocation(self, context, compute_node, instance,
sign):
if sign > 0:
self._allocate_for_instance(compute_node.uuid, instance)
self._allocate_for_instance(context, compute_node.uuid, instance)
else:
self.delete_allocation_for_instance(context, instance.uuid)

View File

@ -797,10 +797,11 @@ def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req,
user_id, allocation_request_version=allocation_request_version)
def remove_allocation_from_compute(instance, compute_node_uuid, reportclient,
flavor=None):
def remove_allocation_from_compute(context, instance, compute_node_uuid,
reportclient, flavor=None):
"""Removes the instance allocation from the compute host.
:param context: The request context
:param instance: the instance object owning the allocation
:param compute_node_uuid: the UUID of the compute node where the allocation
needs to be removed
@ -817,5 +818,5 @@ def remove_allocation_from_compute(instance, compute_node_uuid, reportclient,
my_resources = resources_from_flavor(instance, flavor)
return reportclient.remove_provider_from_instance_allocation(
instance.uuid, compute_node_uuid, instance.user_id,
context, instance.uuid, compute_node_uuid, instance.user_id,
instance.project_id, my_resources)

View File

@ -331,7 +331,8 @@ class SchedulerReportClientTests(test.TestCase):
self.client.update_compute_node(self.context, self.compute_node)
# The compute node is associated with two of the shared storages
self.client.set_aggregates_for_provider(
self.compute_uuid, set([uuids.agg_disk_1, uuids.agg_disk_2]))
self.context, self.compute_uuid,
set([uuids.agg_disk_1, uuids.agg_disk_2]))
# Register two SR-IOV PFs with VF and bandwidth inventory
for x in (1, 2):
@ -357,10 +358,11 @@ class SchedulerReportClientTests(test.TestCase):
},
}, parent_provider_uuid=self.compute_uuid)
# They're associated with an IP address aggregate
self.client.set_aggregates_for_provider(uuid, [uuids.agg_ip])
self.client.set_aggregates_for_provider(self.context, uuid,
[uuids.agg_ip])
# Set some traits on 'em
self.client.set_traits_for_provider(
uuid, ['CUSTOM_PHYSNET_%d' % x])
self.context, uuid, ['CUSTOM_PHYSNET_%d' % x])
# Register three shared storage pools with disk inventory
for x in (1, 2, 3):
@ -379,11 +381,12 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider
self.client.set_traits_for_provider(
uuid, ['MISC_SHARES_VIA_AGGREGATE'])
self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE'])
# Associate each with its own aggregate. The compute node is
# associated with the first two (agg_disk_1 and agg_disk_2).
agg = getattr(uuids, 'agg_disk_%d' % x)
self.client.set_aggregates_for_provider(uuid, [agg])
self.client.set_aggregates_for_provider(self.context, uuid,
[agg])
# Register a shared IP address provider with IP address inventory
self.client.set_inventory_for_provider(
@ -399,9 +402,11 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider, and add another trait
self.client.set_traits_for_provider(
uuids.sip, set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
self.context, uuids.sip,
set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
# It's associated with the same aggregate as both PFs
self.client.set_aggregates_for_provider(uuids.sip, [uuids.agg_ip])
self.client.set_aggregates_for_provider(self.context, uuids.sip,
[uuids.agg_ip])
# Register a shared network bandwidth provider
self.client.set_inventory_for_provider(
@ -417,9 +422,10 @@ class SchedulerReportClientTests(test.TestCase):
})
# Mark as a sharing provider
self.client.set_traits_for_provider(
uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE'])
self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE'])
# It's associated with some other aggregate.
self.client.set_aggregates_for_provider(uuids.sbw, [uuids.agg_bw])
self.client.set_aggregates_for_provider(self.context, uuids.sbw,
[uuids.agg_bw])
# Setup is done. Grab the ProviderTree
prov_tree = self.client.get_provider_tree_and_ensure_root(

View File

@ -725,7 +725,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.init_host()
mock_remove_allocation.assert_called_once_with(
deleted_instance.uuid, uuids.our_node_uuid,
self.context, deleted_instance.uuid, uuids.our_node_uuid,
deleted_instance.user_id, deleted_instance.project_id,
mock.sentinel.my_resources)
@ -3595,8 +3595,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
get_node.assert_called_once_with(
self.context, our_host, migration.source_node)
remove_allocation.assert_called_once_with(
instance_2.uuid, uuids.our_node_uuid, uuids.user_id,
uuids.project_id, mock.sentinel.resources)
self.context, instance_2.uuid, uuids.our_node_uuid,
uuids.user_id, uuids.project_id, mock.sentinel.resources)
def test_destroy_evacuated_instances_node_deleted(self):
our_host = self.compute.host
@ -3672,8 +3672,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
# but only instance_2 is deallocated as the compute node for
# instance_1 is already deleted
remove_allocation.assert_called_once_with(
instance_2.uuid, uuids.our_node_uuid, uuids.user_id,
uuids.project_id, mock.sentinel.resources)
self.context, instance_2.uuid, uuids.our_node_uuid,
uuids.user_id, uuids.project_id, mock.sentinel.resources)
self.assertEqual(2, get_node.call_count)
@ -3923,10 +3923,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertFalse(
rt.delete_allocation_for_evacuated_instance.called)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
@mock.patch.object(manager.ComputeManager,
'_error_out_instance_on_exception')
def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe):
def test_rebuild_driver_error_evacuate(self, mock_error, mock_aiffe,
mock_elevated):
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(self.context)
ex = test.TestingException('foo')
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
@ -3935,7 +3938,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
recreate=True, scheduled_node='foo')
rt = mrt.return_value
delete_alloc = rt.delete_allocation_for_evacuated_instance
delete_alloc.assert_called_once_with(instance, 'foo',
delete_alloc.assert_called_once_with(self.context, instance, 'foo',
node_type='destination')
@mock.patch('nova.context.RequestContext.elevated')
@ -4018,7 +4021,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
mock_delete_allocation.assert_called_once_with(
instance, 'fake-node', node_type='destination')
elevated_context, instance, 'fake-node', node_type='destination')
mock_notify.assert_called_once_with(
elevated_context, instance, 'fake-mini', action='rebuild',
bdms=None, exception=exc, phase='error')
@ -6394,7 +6397,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
rt.get_node_uuid.assert_called_once_with(mock.sentinel.node)
remove = mock_rc.remove_provider_from_instance_allocation
remove.assert_called_once_with(
instance.uuid, rt.get_node_uuid.return_value,
self.context, instance.uuid, rt.get_node_uuid.return_value,
instance.user_id, instance.project_id,
mock_resources.return_value)
do_it()
@ -7023,7 +7026,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
fn.assert_called_once_with(self.instance, self.instance.node)
fn.assert_called_once_with(self.context, self.instance,
self.instance.node)
def test_post_live_migration_legacy(self):
# We have no migrate_data...
@ -7045,7 +7049,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
# ...so we should have called the old style delete
mock_delete.assert_not_called()
fn = mock_rt.return_value.delete_allocation_for_migrated_instance
fn.assert_called_once_with(self.instance, self.instance.node)
fn.assert_called_once_with(self.context, self.instance,
self.instance.node)
def test_post_live_migration_cinder_v3_api(self):
# Because live migration has succeeded, _post_live_migration

View File

@ -1315,6 +1315,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
rc.set_traits_for_provider.assert_called_once_with(
mock.sentinel.ctx,
new_compute.uuid,
mock.sentinel.traits,
)
@ -2842,13 +2843,15 @@ class TestUpdateUsageFromInstance(BaseTestCase):
mock_resource_from_flavor.return_value = mock_resource
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.uuid = uuids.inst0
ctxt = context.get_admin_context()
self.rt.delete_allocation_for_evacuated_instance(instance, _NODENAME)
self.rt.delete_allocation_for_evacuated_instance(
ctxt, instance, _NODENAME)
rc = self.rt.reportclient
mock_remove_allocation = rc.remove_provider_from_instance_allocation
mock_remove_allocation.assert_called_once_with(
instance.uuid, self.rt.compute_nodes[_NODENAME].uuid,
ctxt, instance.uuid, self.rt.compute_nodes[_NODENAME].uuid,
instance.user_id, instance.project_id, mock_resource)

View File

@ -266,11 +266,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
mock_put.assert_called_once_with(
expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_fail_fallback_succeeds(self, mock_put):
@ -285,12 +288,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
# Should fall back to earlier way if 1.8 fails.
call1 = mock.call(expected_url, mock.ANY, version='1.8')
call1 = mock.call(expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)
call2 = mock.call(expected_url, mock.ANY)
self.assertEqual(2, mock_put.call_count)
mock_put.assert_has_calls([call1, call2])
@ -304,11 +309,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
mock_put.assert_called_once_with(
expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)
log_msg = mock_warn.call_args[0][0]
self.assertIn("Unable to submit allocation for instance", log_msg)
@ -328,13 +336,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertTrue(resp)
mock_put.assert_has_calls([
mock.call(expected_url, mock.ANY, version='1.8'),
mock.call(expected_url, mock.ANY, version='1.8')])
mock.call(expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)] * 2)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retry_gives_up(self, mock_put):
@ -349,14 +358,14 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
resp = self.client.put_allocations(self.context, rp_uuid,
consumer_uuid, data,
mock.sentinel.project_id,
mock.sentinel.user_id)
self.assertFalse(resp)
mock_put.assert_has_calls([
mock.call(expected_url, mock.ANY, version='1.8'),
mock.call(expected_url, mock.ANY, version='1.8'),
mock.call(expected_url, mock.ANY, version='1.8')])
mock.call(expected_url, mock.ANY, version='1.8',
global_request_id=self.context.global_id)] * 3)
def test_claim_resources_success_with_old_version(self):
get_resp_mock = mock.Mock(status_code=200)
@ -898,7 +907,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
@ -928,7 +938,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
headers={})
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
@ -971,7 +981,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
@ -1009,7 +1020,7 @@ class TestPutAllocations(SchedulerReportClientTestCase):
self.assertEqual(expected_allocations, actual_allocations)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False,
headers={})
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertTrue(res)
@ -1043,7 +1054,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
@ -1061,7 +1073,8 @@ class TestPutAllocations(SchedulerReportClientTestCase):
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_from_instance_allocation(
consumer_uuid, uuids.source, user_id, project_id, mock.Mock())
self.context, consumer_uuid, uuids.source, user_id, project_id,
mock.Mock())
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
@ -1989,11 +2002,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
self.client.set_aggregates_for_provider(uuids.rp, aggs)
self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/aggregates' % uuids.rp, json=aggs,
raise_exc=False, microversion='1.1', headers={})
raise_exc=False, microversion='1.1',
headers={'X-Openstack-Request-Id': self.context.global_id})
# Cache was updated
self.assertEqual(set(aggs),
self.client._provider_tree.data(uuids.rp).aggregates)
@ -2002,7 +2016,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_aggregates_for_provider, uuids.rp, [])
self.client.set_aggregates_for_provider,
self.context, uuids.rp, [])
class TestAggregates(SchedulerReportClientTestCase):
@ -2107,18 +2122,20 @@ class TestTraits(SchedulerReportClientTestCase):
# Request all traits; custom traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(all_traits)
self.client._ensure_traits(self.context, all_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_has_calls(
[mock.call('/traits/' + trait, headers={}, **self.trait_api_kwargs)
[mock.call('/traits/' + trait,
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
for trait in custom_traits], any_order=True)
self.ks_adap_mock.reset_mock()
# Request standard traits; no traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(standard_traits)
self.client._ensure_traits(self.context, standard_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(standard_traits),
**self.trait_api_kwargs)
@ -2127,8 +2144,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.reset_mock()
# Request no traits - short circuit
self.client._ensure_traits(None)
self.client._ensure_traits([])
self.client._ensure_traits(self.context, None)
self.client._ensure_traits(self.context, [])
self.ks_adap_mock.get.assert_not_called()
self.ks_adap_mock.put.assert_not_called()
@ -2136,7 +2153,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.get.return_value = mock.Mock(status_code=400)
self.assertRaises(exception.TraitRetrievalFailed,
self.client._ensure_traits, ['FOO'])
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO', **self.trait_api_kwargs)
@ -2151,12 +2169,15 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = put_mock
self.assertRaises(exception.TraitCreationFailed,
self.client._ensure_traits, ['FOO'])
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO', **self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/traits/FOO', headers={}, **self.trait_api_kwargs)
'/traits/FOO',
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
def test_set_traits_for_provider(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
@ -2176,7 +2197,7 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = put_mock
# Invoke
self.client.set_traits_for_provider(uuids.rp, traits)
self.client.set_traits_for_provider(self.context, uuids.rp, traits)
# Verify API calls
self.ks_adap_mock.get.assert_called_once_with(
@ -2184,7 +2205,8 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/traits' % uuids.rp,
json={'traits': traits, 'resource_provider_generation': 0},
headers={}, **self.trait_api_kwargs)
headers={'X-Openstack-Request-Id': self.context.global_id},
**self.trait_api_kwargs)
# And ensure the provider tree cache was updated appropriately
self.assertFalse(
@ -2205,7 +2227,8 @@ class TestTraits(SchedulerReportClientTestCase):
get_mock.status_code = 400
self.assertRaises(
exception.TraitRetrievalFailed,
self.client.set_traits_for_provider, uuids.rp, traits)
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
self.ks_adap_mock.put.assert_not_called()
get_mock.status_code = 200
@ -2215,13 +2238,15 @@ class TestTraits(SchedulerReportClientTestCase):
self.ks_adap_mock.put.return_value = mock.Mock(status_code=409)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.set_traits_for_provider, uuids.rp, traits)
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
# Other error
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_traits_for_provider, uuids.rp, traits)
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
class TestAssociations(SchedulerReportClientTestCase):
@ -3512,7 +3537,8 @@ class TestAllocations(SchedulerReportClientTestCase):
self.client.update_instance_allocation(self.context, cn, inst, 1)
mock_put.assert_called_once_with(
'/allocations/%s' % inst.uuid,
expected, version='1.8')
expected, version='1.8',
global_request_id=self.context.global_id)
self.assertTrue(mock_get.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'