[placement] Add sending global request ID in get
Add the 'X-Openstack-Request-Id' header in the request of GET in SchedulerReportClient. Change-Id: I306ac6f5c6b67d77d91a7ba24d4d863ab3e1bf5c Closes-Bug: #1734625
This commit is contained in:
parent
f66bd7369a
commit
ab4efbba61
@ -2223,7 +2223,8 @@ class ComputeManager(manager.Manager):
|
||||
|
||||
try:
|
||||
resources['allocations'] = (
|
||||
self.reportclient.get_allocations_for_consumer(instance.uuid))
|
||||
self.reportclient.get_allocations_for_consumer(context,
|
||||
instance.uuid))
|
||||
except Exception:
|
||||
LOG.exception('Failure retrieving placement allocations',
|
||||
instance=instance)
|
||||
@ -3035,7 +3036,7 @@ class ComputeManager(manager.Manager):
|
||||
context, instance, self.host, migration)
|
||||
|
||||
allocations = self.reportclient.get_allocations_for_consumer(
|
||||
instance.uuid)
|
||||
context, instance.uuid)
|
||||
|
||||
network_info = instance.get_network_info()
|
||||
if bdms is None:
|
||||
@ -3786,7 +3787,7 @@ class ComputeManager(manager.Manager):
|
||||
# so, avoid doing the legacy behavior below.
|
||||
mig_allocs = (
|
||||
self.reportclient.get_allocations_for_consumer_by_provider(
|
||||
cn_uuid, migration.uuid))
|
||||
context, cn_uuid, migration.uuid))
|
||||
if mig_allocs:
|
||||
LOG.info(_('Source node %(node)s reverted migration '
|
||||
'%(mig)s; not deleting migration-based '
|
||||
@ -3801,7 +3802,7 @@ class ComputeManager(manager.Manager):
|
||||
# accounting
|
||||
allocs = (
|
||||
self.reportclient.get_allocations_for_consumer_by_provider(
|
||||
cn_uuid, migration.uuid))
|
||||
context, cn_uuid, migration.uuid))
|
||||
if allocs:
|
||||
# NOTE(danms): The source did migration-based allocation
|
||||
# accounting, so we should let the source node rejigger
|
||||
@ -4000,7 +4001,7 @@ class ComputeManager(manager.Manager):
|
||||
# Fetch the original allocation that the instance had on the source
|
||||
# node, which are now held by the migration
|
||||
orig_alloc = self.reportclient.get_allocations_for_consumer(
|
||||
migration.uuid)
|
||||
context, migration.uuid)
|
||||
if not orig_alloc:
|
||||
# NOTE(danms): This migration did not do per-migration allocation
|
||||
# accounting, so nothing to do here.
|
||||
@ -4879,7 +4880,7 @@ class ComputeManager(manager.Manager):
|
||||
limits = filter_properties.get('limits', {})
|
||||
|
||||
allocations = self.reportclient.get_allocations_for_consumer(
|
||||
instance.uuid)
|
||||
context, instance.uuid)
|
||||
|
||||
shelved_image_ref = instance.image_ref
|
||||
if image:
|
||||
@ -6264,7 +6265,7 @@ class ComputeManager(manager.Manager):
|
||||
migration = migrate_data.migration
|
||||
rc = self.scheduler_client.reportclient
|
||||
# Check to see if our migration has its own allocations
|
||||
allocs = rc.get_allocations_for_consumer(migration.uuid)
|
||||
allocs = rc.get_allocations_for_consumer(ctxt, migration.uuid)
|
||||
else:
|
||||
# We didn't have data on a migration, which means we can't
|
||||
# look up to see if we had new-style migration-based
|
||||
|
@ -1232,7 +1232,7 @@ class ResourceTracker(object):
|
||||
# always creates allocations for an instance
|
||||
known_instances = set(self.tracked_instances.keys())
|
||||
allocations = self.reportclient.get_allocations_for_resource_provider(
|
||||
cn.uuid) or {}
|
||||
context, cn.uuid) or {}
|
||||
read_deleted_context = context.elevated(read_deleted='yes')
|
||||
for consumer_uuid, alloc in allocations.items():
|
||||
if consumer_uuid in known_instances:
|
||||
|
@ -45,7 +45,7 @@ def replace_allocation_with_migration(context, instance, migration):
|
||||
reportclient = schedclient.reportclient
|
||||
|
||||
orig_alloc = reportclient.get_allocations_for_consumer_by_provider(
|
||||
source_cn.uuid, instance.uuid)
|
||||
context, source_cn.uuid, instance.uuid)
|
||||
if not orig_alloc:
|
||||
LOG.debug('Unable to find existing allocations for instance on '
|
||||
'source compute node: %s. This is normal if you are not '
|
||||
|
@ -273,8 +273,11 @@ class SchedulerReportClient(object):
|
||||
client.additional_headers = {'accept': 'application/json'}
|
||||
return client
|
||||
|
||||
def get(self, url, version=None):
|
||||
return self._client.get(url, raise_exc=False, microversion=version)
|
||||
def get(self, url, version=None, global_request_id=None):
|
||||
headers = ({request_id.INBOUND_HEADER: global_request_id}
|
||||
if global_request_id else {})
|
||||
return self._client.get(url, raise_exc=False, microversion=version,
|
||||
headers=headers)
|
||||
|
||||
def post(self, url, data, version=None, global_request_id=None):
|
||||
headers = ({request_id.INBOUND_HEADER: global_request_id}
|
||||
@ -305,7 +308,7 @@ class SchedulerReportClient(object):
|
||||
headers=headers)
|
||||
|
||||
@safe_connect
|
||||
def get_allocation_candidates(self, resources):
|
||||
def get_allocation_candidates(self, context, resources):
|
||||
"""Returns a tuple of (allocation_requests, provider_summaries,
|
||||
allocation_request_version).
|
||||
|
||||
@ -323,6 +326,7 @@ class SchedulerReportClient(object):
|
||||
this data from placement, or (None, None, None) if the
|
||||
request failed
|
||||
|
||||
:param context: The security context
|
||||
:param nova.scheduler.utils.ResourceRequest resources:
|
||||
A ResourceRequest object representing the requested resources and
|
||||
traits from the request spec.
|
||||
@ -346,7 +350,8 @@ class SchedulerReportClient(object):
|
||||
|
||||
version = '1.17'
|
||||
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
|
||||
resp = self.get(url, version=version)
|
||||
resp = self.get(url, version=version,
|
||||
global_request_id=context.global_id)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
return (data['allocation_requests'], data['provider_summaries'],
|
||||
@ -370,7 +375,7 @@ class SchedulerReportClient(object):
|
||||
return None, None, None
|
||||
|
||||
@safe_connect
|
||||
def _get_provider_aggregates(self, rp_uuid):
|
||||
def _get_provider_aggregates(self, context, rp_uuid):
|
||||
"""Queries the placement API for a resource provider's aggregates.
|
||||
|
||||
:param rp_uuid: UUID of the resource provider to grab aggregates for.
|
||||
@ -382,7 +387,7 @@ class SchedulerReportClient(object):
|
||||
does not exist.
|
||||
"""
|
||||
resp = self.get("/resource_providers/%s/aggregates" % rp_uuid,
|
||||
version='1.1')
|
||||
version='1.1', global_request_id=context.global_id)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
return set(data['aggregates'])
|
||||
@ -401,9 +406,10 @@ class SchedulerReportClient(object):
|
||||
raise exception.ResourceProviderAggregateRetrievalFailed(uuid=rp_uuid)
|
||||
|
||||
@safe_connect
|
||||
def _get_provider_traits(self, rp_uuid):
|
||||
def _get_provider_traits(self, context, rp_uuid):
|
||||
"""Queries the placement API for a resource provider's traits.
|
||||
|
||||
:param context: The security context
|
||||
:param rp_uuid: UUID of the resource provider to grab traits for.
|
||||
:return: A set() of string trait names, which may be empty if the
|
||||
specified provider has no traits.
|
||||
@ -412,7 +418,7 @@ class SchedulerReportClient(object):
|
||||
empty set()) if the specified resource provider does not exist.
|
||||
"""
|
||||
resp = self.get("/resource_providers/%s/traits" % rp_uuid,
|
||||
version='1.6')
|
||||
version='1.6', global_request_id=context.global_id)
|
||||
|
||||
if resp.status_code == 200:
|
||||
return set(resp.json()['traits'])
|
||||
@ -427,17 +433,19 @@ class SchedulerReportClient(object):
|
||||
raise exception.ResourceProviderTraitRetrievalFailed(uuid=rp_uuid)
|
||||
|
||||
@safe_connect
|
||||
def _get_resource_provider(self, uuid):
|
||||
def _get_resource_provider(self, context, uuid):
|
||||
"""Queries the placement API for a resource provider record with the
|
||||
supplied UUID.
|
||||
|
||||
:param context: The security context
|
||||
:param uuid: UUID identifier for the resource provider to look up
|
||||
:return: A dict of resource provider information if found or None if no
|
||||
such resource provider could be found.
|
||||
:raise: ResourceProviderRetrievalFailed on error.
|
||||
"""
|
||||
resp = self.get("/resource_providers/%s" % uuid,
|
||||
version=NESTED_PROVIDER_API_VERSION)
|
||||
version=NESTED_PROVIDER_API_VERSION,
|
||||
global_request_id=context.global_id)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
return data
|
||||
@ -458,10 +466,11 @@ class SchedulerReportClient(object):
|
||||
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
|
||||
|
||||
@safe_connect
|
||||
def _get_providers_in_aggregates(self, agg_uuids):
|
||||
def _get_providers_in_aggregates(self, context, agg_uuids):
|
||||
"""Queries the placement API for a list of the resource providers
|
||||
associated with any of the specified aggregates.
|
||||
|
||||
:param context: The security context
|
||||
:param agg_uuids: Iterable of string UUIDs of aggregates to filter on.
|
||||
:return: A list of dicts of resource provider information, which may be
|
||||
empty if no provider exists with the specified UUID.
|
||||
@ -472,7 +481,7 @@ class SchedulerReportClient(object):
|
||||
|
||||
qpval = ','.join(agg_uuids)
|
||||
resp = self.get("/resource_providers?member_of=in:" + qpval,
|
||||
version='1.3')
|
||||
version='1.3', global_request_id=context.global_id)
|
||||
if resp.status_code == 200:
|
||||
return resp.json()['resource_providers']
|
||||
|
||||
@ -491,17 +500,19 @@ class SchedulerReportClient(object):
|
||||
raise exception.ResourceProviderRetrievalFailed(message=msg % args)
|
||||
|
||||
@safe_connect
|
||||
def _get_providers_in_tree(self, uuid):
|
||||
def _get_providers_in_tree(self, context, uuid):
|
||||
"""Queries the placement API for a list of the resource providers in
|
||||
the tree associated with the specified UUID.
|
||||
|
||||
:param context: The security context
|
||||
:param uuid: UUID identifier for the resource provider to look up
|
||||
:return: A list of dicts of resource provider information, which may be
|
||||
empty if no provider exists with the specified UUID.
|
||||
:raise: ResourceProviderRetrievalFailed on error.
|
||||
"""
|
||||
resp = self.get("/resource_providers?in_tree=%s" % uuid,
|
||||
version=NESTED_PROVIDER_API_VERSION)
|
||||
version=NESTED_PROVIDER_API_VERSION,
|
||||
global_request_id=context.global_id)
|
||||
|
||||
if resp.status_code == 200:
|
||||
return resp.json()['resource_providers']
|
||||
@ -576,7 +587,7 @@ class SchedulerReportClient(object):
|
||||
'placement_req_id': placement_req_id,
|
||||
}
|
||||
LOG.info(msg, args)
|
||||
return self._get_resource_provider(uuid)
|
||||
return self._get_resource_provider(context, uuid)
|
||||
|
||||
# A provider with the same *name* already exists, or some other error.
|
||||
msg = ("[%(placement_req_id)s] Failed to create resource provider "
|
||||
@ -633,12 +644,12 @@ class SchedulerReportClient(object):
|
||||
# If we had the requested provider locally, refresh it and its
|
||||
# descendants, but only if stale.
|
||||
for u in self._provider_tree.get_provider_uuids(uuid):
|
||||
self._refresh_associations(u, force=False)
|
||||
self._refresh_associations(context, u, force=False)
|
||||
return uuid
|
||||
|
||||
# We don't have it locally; check placement or create it.
|
||||
created_rp = None
|
||||
rps_to_refresh = self._get_providers_in_tree(uuid)
|
||||
rps_to_refresh = self._get_providers_in_tree(context, uuid)
|
||||
if not rps_to_refresh:
|
||||
created_rp = self._create_resource_provider(
|
||||
context, uuid, name or uuid,
|
||||
@ -653,7 +664,7 @@ class SchedulerReportClient(object):
|
||||
|
||||
for rp_to_refresh in rps_to_refresh:
|
||||
self._refresh_associations(
|
||||
rp_to_refresh['uuid'],
|
||||
context, rp_to_refresh['uuid'],
|
||||
generation=rp_to_refresh.get('generation'), force=True)
|
||||
|
||||
return uuid
|
||||
@ -692,14 +703,14 @@ class SchedulerReportClient(object):
|
||||
raise exception.ResourceProviderInUse()
|
||||
raise exception.ResourceProviderDeletionFailed(uuid=rp_uuid)
|
||||
|
||||
def _get_inventory(self, rp_uuid):
|
||||
def _get_inventory(self, context, rp_uuid):
|
||||
url = '/resource_providers/%s/inventories' % rp_uuid
|
||||
result = self.get(url)
|
||||
result = self.get(url, global_request_id=context.global_id)
|
||||
if not result:
|
||||
return None
|
||||
return result.json()
|
||||
|
||||
def _refresh_and_get_inventory(self, rp_uuid):
|
||||
def _refresh_and_get_inventory(self, context, rp_uuid):
|
||||
"""Helper method that retrieves the current inventory for the supplied
|
||||
resource provider according to the placement API.
|
||||
|
||||
@ -708,7 +719,7 @@ class SchedulerReportClient(object):
|
||||
generation and attempt to update inventory if any exists, otherwise
|
||||
return empty inventories.
|
||||
"""
|
||||
curr = self._get_inventory(rp_uuid)
|
||||
curr = self._get_inventory(context, rp_uuid)
|
||||
if curr is None:
|
||||
return None
|
||||
|
||||
@ -718,8 +729,8 @@ class SchedulerReportClient(object):
|
||||
self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen)
|
||||
return curr
|
||||
|
||||
def _refresh_associations(self, rp_uuid, generation=None, force=False,
|
||||
refresh_sharing=True):
|
||||
def _refresh_associations(self, context, rp_uuid, generation=None,
|
||||
force=False, refresh_sharing=True):
|
||||
"""Refresh aggregates, traits, and (optionally) aggregate-associated
|
||||
sharing providers for the specified resource provider uuid.
|
||||
|
||||
@ -731,6 +742,7 @@ class SchedulerReportClient(object):
|
||||
historical: all code paths that get us here are doing inventory refresh
|
||||
themselves.
|
||||
|
||||
:param context: The security context
|
||||
:param rp_uuid: UUID of the resource provider to check for fresh
|
||||
aggregates and traits
|
||||
:param generation: The resource provider generation to set. If None,
|
||||
@ -747,7 +759,7 @@ class SchedulerReportClient(object):
|
||||
"""
|
||||
if force or self._associations_stale(rp_uuid):
|
||||
# Refresh aggregates
|
||||
aggs = self._get_provider_aggregates(rp_uuid)
|
||||
aggs = self._get_provider_aggregates(context, rp_uuid)
|
||||
msg = ("Refreshing aggregate associations for resource provider "
|
||||
"%s, aggregates: %s")
|
||||
LOG.debug(msg, rp_uuid, ','.join(aggs or ['None']))
|
||||
@ -758,7 +770,7 @@ class SchedulerReportClient(object):
|
||||
rp_uuid, aggs, generation=generation)
|
||||
|
||||
# Refresh traits
|
||||
traits = self._get_provider_traits(rp_uuid)
|
||||
traits = self._get_provider_traits(context, rp_uuid)
|
||||
msg = ("Refreshing trait associations for resource provider %s, "
|
||||
"traits: %s")
|
||||
LOG.debug(msg, rp_uuid, ','.join(traits or ['None']))
|
||||
@ -769,7 +781,7 @@ class SchedulerReportClient(object):
|
||||
|
||||
if refresh_sharing:
|
||||
# Refresh providers associated by aggregate
|
||||
for rp in self._get_providers_in_aggregates(aggs):
|
||||
for rp in self._get_providers_in_aggregates(context, aggs):
|
||||
if not self._provider_tree.exists(rp['uuid']):
|
||||
# NOTE(efried): Right now sharing providers are always
|
||||
# treated as roots. This is deliberate. From the
|
||||
@ -782,7 +794,8 @@ class SchedulerReportClient(object):
|
||||
# providers). No need to override force=True for newly-
|
||||
# added providers - the missing timestamp will always
|
||||
# trigger them to refresh.
|
||||
self._refresh_associations(rp['uuid'], force=force,
|
||||
self._refresh_associations(context, rp['uuid'],
|
||||
force=force,
|
||||
refresh_sharing=False)
|
||||
self.association_refresh_time[rp_uuid] = time.time()
|
||||
|
||||
@ -808,7 +821,7 @@ class SchedulerReportClient(object):
|
||||
# TODO(jaypipes): Should we really be calling the placement API to get
|
||||
# the current inventory for every resource provider each and every time
|
||||
# update_resource_stats() is called? :(
|
||||
curr = self._refresh_and_get_inventory(rp_uuid)
|
||||
curr = self._refresh_and_get_inventory(context, rp_uuid)
|
||||
if curr is None:
|
||||
return False
|
||||
|
||||
@ -934,7 +947,7 @@ class SchedulerReportClient(object):
|
||||
if not self._provider_tree.has_inventory(rp_uuid):
|
||||
return None
|
||||
|
||||
curr = self._refresh_and_get_inventory(rp_uuid)
|
||||
curr = self._refresh_and_get_inventory(context, rp_uuid)
|
||||
|
||||
# Check to see if we need to update placement's view
|
||||
if not curr.get('inventories', {}):
|
||||
@ -1016,7 +1029,7 @@ class SchedulerReportClient(object):
|
||||
parent_provider_uuid=parent_provider_uuid)
|
||||
# Ensure inventories are up to date (for *all* cached RPs)
|
||||
for uuid in self._provider_tree.get_provider_uuids():
|
||||
self._refresh_and_get_inventory(uuid)
|
||||
self._refresh_and_get_inventory(context, uuid)
|
||||
# Return a *copy* of the tree.
|
||||
return copy.deepcopy(self._provider_tree)
|
||||
|
||||
@ -1077,7 +1090,8 @@ class SchedulerReportClient(object):
|
||||
# service knows. If the caller tries to ensure a nonexistent
|
||||
# "standard" trait, they deserve the TraitCreationFailed exception
|
||||
# they'll get.
|
||||
resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6')
|
||||
resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6',
|
||||
global_request_id=context.global_id)
|
||||
if resp.status_code == 200:
|
||||
traits_to_create = set(traits) - set(resp.json()['traits'])
|
||||
# Might be neat to have a batch create. But creating multiple
|
||||
@ -1255,19 +1269,20 @@ class SchedulerReportClient(object):
|
||||
self._delete_inventory(context, compute_node.uuid)
|
||||
|
||||
@safe_connect
|
||||
def get_allocations_for_consumer(self, consumer):
|
||||
def get_allocations_for_consumer(self, context, consumer):
|
||||
url = '/allocations/%s' % consumer
|
||||
resp = self.get(url)
|
||||
resp = self.get(url, global_request_id=context.global_id)
|
||||
if not resp:
|
||||
return {}
|
||||
else:
|
||||
return resp.json()['allocations']
|
||||
|
||||
def get_allocations_for_consumer_by_provider(self, rp_uuid, consumer):
|
||||
def get_allocations_for_consumer_by_provider(self, context, rp_uuid,
|
||||
consumer):
|
||||
# NOTE(cdent): This trims to just the allocations being
|
||||
# used on this resource provider. In the future when there
|
||||
# are shared resources there might be other providers.
|
||||
allocations = self.get_allocations_for_consumer(consumer)
|
||||
allocations = self.get_allocations_for_consumer(context, consumer)
|
||||
if allocations is None:
|
||||
# safe_connect can return None on 404
|
||||
allocations = {}
|
||||
@ -1277,7 +1292,7 @@ class SchedulerReportClient(object):
|
||||
def _allocate_for_instance(self, context, rp_uuid, instance):
|
||||
my_allocations = _instance_to_allocations_dict(instance)
|
||||
current_allocations = self.get_allocations_for_consumer_by_provider(
|
||||
rp_uuid, instance.uuid)
|
||||
context, rp_uuid, instance.uuid)
|
||||
if current_allocations == my_allocations:
|
||||
allocstr = ','.join(['%s=%s' % (k, v)
|
||||
for k, v in my_allocations.items()])
|
||||
@ -1362,7 +1377,7 @@ class SchedulerReportClient(object):
|
||||
# We first need to determine if this is a move operation and if so
|
||||
# create the "doubled-up" allocation that exists for the duration of
|
||||
# the move operation against both the source and destination hosts
|
||||
r = self.get(url)
|
||||
r = self.get(url, global_request_id=context.global_id)
|
||||
if r.status_code == 200:
|
||||
current_allocs = r.json()['allocations']
|
||||
if current_allocs:
|
||||
@ -1418,7 +1433,7 @@ class SchedulerReportClient(object):
|
||||
url = '/allocations/%s' % consumer_uuid
|
||||
|
||||
# Grab the "doubled-up" allocation that we will manipulate
|
||||
r = self.get(url)
|
||||
r = self.get(url, global_request_id=context.global_id)
|
||||
if r.status_code != 200:
|
||||
LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s",
|
||||
consumer_uuid, r.status_code)
|
||||
@ -1635,9 +1650,9 @@ class SchedulerReportClient(object):
|
||||
self.delete_allocation_for_instance(context, instance.uuid)
|
||||
|
||||
@safe_connect
|
||||
def get_allocations_for_resource_provider(self, rp_uuid):
|
||||
def get_allocations_for_resource_provider(self, context, rp_uuid):
|
||||
url = '/resource_providers/%s/allocations' % rp_uuid
|
||||
resp = self.get(url)
|
||||
resp = self.get(url, global_request_id=context.global_id)
|
||||
if not resp:
|
||||
return {}
|
||||
else:
|
||||
|
@ -119,7 +119,8 @@ class SchedulerManager(manager.Manager):
|
||||
alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
|
||||
= None, None, None
|
||||
if self.driver.USES_ALLOCATION_CANDIDATES:
|
||||
res = self.placement_client.get_allocation_candidates(resources)
|
||||
res = self.placement_client.get_allocation_candidates(ctxt,
|
||||
resources)
|
||||
if res is None:
|
||||
# We have to handle the case that we failed to connect to the
|
||||
# Placement service and the safe_connect decorator on
|
||||
|
@ -366,7 +366,7 @@ def claim_resources_on_destination(
|
||||
if not source_node_allocations:
|
||||
source_node_allocations = (
|
||||
reportclient.get_allocations_for_consumer_by_provider(
|
||||
source_node.uuid, instance.uuid))
|
||||
context, source_node.uuid, instance.uuid))
|
||||
if source_node_allocations:
|
||||
# Generate an allocation request for the destination node.
|
||||
alloc_request = {
|
||||
|
@ -106,9 +106,11 @@ class SchedulerReportClientTests(test.TestCase):
|
||||
res_class = fields.ResourceClass.VCPU
|
||||
with self._interceptor():
|
||||
# When we start out there are no resource providers.
|
||||
rp = self.client._get_resource_provider(self.compute_uuid)
|
||||
rp = self.client._get_resource_provider(self.context,
|
||||
self.compute_uuid)
|
||||
self.assertIsNone(rp)
|
||||
rps = self.client._get_providers_in_tree(self.compute_uuid)
|
||||
rps = self.client._get_providers_in_tree(self.context,
|
||||
self.compute_uuid)
|
||||
self.assertEqual([], rps)
|
||||
# But get_provider_tree_and_ensure_root creates one (via
|
||||
# _ensure_resource_provider)
|
||||
@ -120,15 +122,18 @@ class SchedulerReportClientTests(test.TestCase):
|
||||
self.client.update_compute_node(self.context, self.compute_node)
|
||||
|
||||
# So now we have a resource provider
|
||||
rp = self.client._get_resource_provider(self.compute_uuid)
|
||||
rp = self.client._get_resource_provider(self.context,
|
||||
self.compute_uuid)
|
||||
self.assertIsNotNone(rp)
|
||||
rps = self.client._get_providers_in_tree(self.compute_uuid)
|
||||
rps = self.client._get_providers_in_tree(self.context,
|
||||
self.compute_uuid)
|
||||
self.assertEqual(1, len(rps))
|
||||
|
||||
# We should also have empty sets of aggregate and trait
|
||||
# associations
|
||||
self.assertEqual(
|
||||
[], self.client._get_providers_in_aggregates([uuids.agg]))
|
||||
[], self.client._get_providers_in_aggregates(self.context,
|
||||
[uuids.agg]))
|
||||
self.assertFalse(
|
||||
self.client._provider_tree.have_aggregates_changed(
|
||||
self.compute_uuid, []))
|
||||
@ -312,6 +317,8 @@ class SchedulerReportClientTests(test.TestCase):
|
||||
self.client.put('/resource_providers/%s' % self.compute_uuid,
|
||||
payload,
|
||||
global_request_id=global_request_id)
|
||||
self.client.get('/resource_providers/%s' % self.compute_uuid,
|
||||
global_request_id=global_request_id)
|
||||
|
||||
def test_get_provider_tree_with_nested_and_aggregates(self):
|
||||
"""A more in-depth test of get_provider_tree_and_ensure_root with
|
||||
|
@ -5601,7 +5601,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
|
||||
except Exception as e:
|
||||
self.assertIsInstance(e, exception.BuildAbortException)
|
||||
|
||||
self.mock_get_allocs.assert_called_once_with(self.instance.uuid)
|
||||
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||
self.instance.uuid)
|
||||
mock_net_wait.assert_called_once_with(do_raise=False)
|
||||
|
||||
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
|
||||
@ -6448,7 +6449,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
'src')
|
||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
||||
ga.assert_called_once_with(
|
||||
mock_rt().get_node_uuid.return_value, self.migration.uuid)
|
||||
self.context, mock_rt().get_node_uuid.return_value,
|
||||
self.migration.uuid)
|
||||
|
||||
old = mock_report.remove_provider_from_instance_allocation
|
||||
if new_rules:
|
||||
@ -6482,7 +6484,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||
'dst')
|
||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
||||
cn_uuid = mock_rt().get_node_uuid.return_value
|
||||
ga.assert_called_once_with(cn_uuid, self.migration.uuid)
|
||||
ga.assert_called_once_with(self.context, cn_uuid,
|
||||
self.migration.uuid)
|
||||
|
||||
old = mock_report.remove_provider_from_instance_allocation
|
||||
if new_rules:
|
||||
|
@ -350,7 +350,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||
test.MatchType(objects.ImageMeta), injected_files=[],
|
||||
admin_password=None, allocations={}, network_info=[],
|
||||
block_device_info='fake_bdm')
|
||||
self.mock_get_allocs.assert_called_once_with(instance.uuid)
|
||||
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||
instance.uuid)
|
||||
mock_get_power_state.assert_called_once_with(self.context, instance)
|
||||
|
||||
self.assertNotIn('shelved_at', instance.system_metadata)
|
||||
@ -451,7 +452,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||
test.MatchType(objects.ImageMeta),
|
||||
injected_files=[], admin_password=None,
|
||||
allocations={}, network_info=[], block_device_info='fake_bdm')
|
||||
self.mock_get_allocs.assert_called_once_with(instance.uuid)
|
||||
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||
instance.uuid)
|
||||
mock_get_power_state.assert_called_once_with(self.context, instance)
|
||||
|
||||
@mock.patch.object(objects.InstanceList, 'get_by_filters')
|
||||
|
@ -48,12 +48,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
A missing endpoint entry should not explode.
|
||||
"""
|
||||
req.side_effect = ks_exc.EndpointNotFound()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls still
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
@ -65,7 +65,7 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
A missing endpoint should cause _create_client to be called.
|
||||
"""
|
||||
req.side_effect = ks_exc.EndpointNotFound()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# This is the second time _create_client is called, but the first since
|
||||
# the mock was created.
|
||||
@ -79,12 +79,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
|
||||
"""
|
||||
req.side_effect = ks_exc.MissingAuthPlugin()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls still
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
@mock.patch('keystoneauth1.session.Session.request')
|
||||
@ -95,12 +95,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
|
||||
"""
|
||||
req.side_effect = ks_exc.Unauthorized()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls still
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
@mock.patch('keystoneauth1.session.Session.request')
|
||||
@ -112,12 +112,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
|
||||
"""
|
||||
req.side_effect = ks_exc.ConnectFailure()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls do
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
@mock.patch.object(report, 'LOG')
|
||||
@ -138,12 +138,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
Failed discovery should not blow up.
|
||||
"""
|
||||
req.side_effect = ks_exc.DiscoveryFailure()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls still
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.client._get_resource_provider(self.context, "fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
|
||||
@ -1220,12 +1220,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
},
|
||||
]
|
||||
self.client._ensure_resource_provider(self.context, cn.uuid)
|
||||
get_pia_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
|
||||
get_pia_mock.assert_called_once_with(
|
||||
self.context, set([uuids.agg1, uuids.agg2]))
|
||||
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
|
||||
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
|
||||
# _get_provider_aggregates and _traits were called thrice: one for the
|
||||
# compute RP and once for each of the sharing RPs.
|
||||
expected_calls = [mock.call(uuid)
|
||||
expected_calls = [mock.call(self.context, uuid)
|
||||
for uuid in (cn.uuid, uuids.shr1, uuids.shr2)]
|
||||
get_agg_mock.assert_has_calls(expected_calls)
|
||||
get_trait_mock.assert_has_calls(expected_calls)
|
||||
@ -1278,23 +1279,24 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
|
||||
self.client._ensure_resource_provider(self.context, uuids.compute_node)
|
||||
|
||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
||||
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
||||
get_agg_mock.assert_called_once_with(uuids.compute_node)
|
||||
get_agg_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||
self.assertTrue(
|
||||
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
||||
[uuids.agg1]))
|
||||
self.assertFalse(
|
||||
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
||||
[uuids.agg2]))
|
||||
get_trait_mock.assert_called_once_with(uuids.compute_node)
|
||||
get_trait_mock.assert_called_once_with(self.context,
|
||||
uuids.compute_node)
|
||||
self.assertTrue(
|
||||
self.client._provider_tree.has_traits(uuids.compute_node,
|
||||
['CUSTOM_GOLD']))
|
||||
self.assertFalse(
|
||||
self.client._provider_tree.has_traits(uuids.compute_node,
|
||||
['CUSTOM_SILVER']))
|
||||
get_pia_mock.assert_called_once_with(set([uuids.agg1]))
|
||||
get_pia_mock.assert_called_once_with(self.context, set([uuids.agg1]))
|
||||
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
||||
self.assertFalse(create_rp_mock.called)
|
||||
|
||||
@ -1318,7 +1320,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.client._ensure_resource_provider, self.context,
|
||||
uuids.compute_node)
|
||||
|
||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
||||
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||
create_rp_mock.assert_called_once_with(
|
||||
self.context, uuids.compute_node, uuids.compute_node,
|
||||
parent_provider_uuid=None)
|
||||
@ -1358,7 +1360,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
|
||||
# We don't refresh for a just-created provider
|
||||
refresh_mock.assert_not_called()
|
||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
||||
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||
create_rp_mock.assert_called_once_with(
|
||||
self.context,
|
||||
uuids.compute_node,
|
||||
@ -1458,7 +1460,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
mock_exists.assert_called_once_with(uuids.root)
|
||||
mock_gpu.assert_called_once_with(uuids.root)
|
||||
mock_refresh.assert_has_calls(
|
||||
[mock.call(uuid, force=False) for uuid in tree_uuids])
|
||||
[mock.call(self.context, uuid, force=False)
|
||||
for uuid in tree_uuids])
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_get_providers_in_tree')
|
||||
@ -1475,9 +1478,9 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.assertEqual(uuids.root,
|
||||
self.client._ensure_resource_provider(self.context,
|
||||
uuids.root))
|
||||
mock_gpit.assert_called_once_with(uuids.root)
|
||||
mock_gpit.assert_called_once_with(self.context, uuids.root)
|
||||
mock_refresh.assert_has_calls(
|
||||
[mock.call(uuid, generation=42, force=True)
|
||||
[mock.call(self.context, uuid, generation=42, force=True)
|
||||
for uuid in tree_uuids])
|
||||
self.assertEqual(tree_uuids,
|
||||
set(self.client._provider_tree.get_provider_uuids()))
|
||||
@ -1497,7 +1500,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.assertEqual(uuids.root,
|
||||
self.client._ensure_resource_provider(self.context,
|
||||
uuids.root))
|
||||
mock_gpit.assert_called_once_with(uuids.root)
|
||||
mock_gpit.assert_called_once_with(self.context, uuids.root)
|
||||
mock_create.assert_called_once_with(self.context, uuids.root,
|
||||
uuids.root,
|
||||
parent_provider_uuid=None)
|
||||
@ -1527,10 +1530,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
alloc_reqs, p_sums, allocation_request_version = \
|
||||
self.client.get_allocation_candidates(resources)
|
||||
self.client.get_allocation_candidates(self.context, resources)
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
mock.ANY, raise_exc=False, microversion='1.17')
|
||||
mock.ANY, raise_exc=False, microversion='1.17',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
url = self.ks_adap_mock.get.call_args[0][0]
|
||||
split_url = parse.urlsplit(url)
|
||||
query = parse.parse_qs(split_url.query)
|
||||
@ -1558,10 +1562,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
alloc_reqs, p_sums, allocation_request_version = \
|
||||
self.client.get_allocation_candidates(resources)
|
||||
self.client.get_allocation_candidates(self.context, resources)
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
mock.ANY, raise_exc=False, microversion='1.17')
|
||||
mock.ANY, raise_exc=False, microversion='1.17',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
url = self.ks_adap_mock.get.call_args[0][0]
|
||||
split_url = parse.urlsplit(url)
|
||||
query = parse.parse_qs(split_url.query)
|
||||
@ -1584,10 +1589,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
resources = scheduler_utils.ResourceRequest.from_extra_specs(
|
||||
{'resources:MEMORY_MB': '1024'})
|
||||
|
||||
res = self.client.get_allocation_candidates(resources)
|
||||
res = self.client.get_allocation_candidates(self.context, resources)
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
mock.ANY, raise_exc=False, microversion='1.17')
|
||||
mock.ANY, raise_exc=False, microversion='1.17',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
url = self.ks_adap_mock.get.call_args[0][0]
|
||||
split_url = parse.urlsplit(url)
|
||||
query = parse.parse_qs(split_url.query)
|
||||
@ -1609,7 +1615,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
resp_mock.json.return_value = json_data
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
result = self.client._get_resource_provider(uuid)
|
||||
result = self.client._get_resource_provider(self.context, uuid)
|
||||
|
||||
expected_provider_dict = dict(
|
||||
uuid=uuid,
|
||||
@ -1619,7 +1625,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
)
|
||||
expected_url = '/resource_providers/' + uuid
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.14')
|
||||
expected_url, raise_exc=False, microversion='1.14',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(expected_provider_dict, result)
|
||||
|
||||
def test_get_resource_provider_not_found(self):
|
||||
@ -1629,11 +1636,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
uuid = uuids.compute_node
|
||||
result = self.client._get_resource_provider(uuid)
|
||||
result = self.client._get_resource_provider(self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.14')
|
||||
expected_url, raise_exc=False, microversion='1.14',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(report.LOG, 'error')
|
||||
@ -1649,11 +1657,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
uuid = uuids.compute_node
|
||||
self.assertRaises(
|
||||
exception.ResourceProviderRetrievalFailed,
|
||||
self.client._get_resource_provider, uuid)
|
||||
self.client._get_resource_provider, self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.14')
|
||||
expected_url, raise_exc=False, microversion='1.14',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# A 503 Service Unavailable should trigger an error log that
|
||||
# includes the placement request id and return None
|
||||
# from _get_resource_provider()
|
||||
@ -1687,17 +1696,20 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
resp_mock.json.return_value = {'resource_providers': rpjson}
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
result = self.client._get_providers_in_aggregates([uuids.agg1,
|
||||
result = self.client._get_providers_in_aggregates(self.context,
|
||||
[uuids.agg1,
|
||||
uuids.agg2])
|
||||
|
||||
expected_url = ('/resource_providers?member_of=in:' +
|
||||
','.join((uuids.agg1, uuids.agg2)))
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.3')
|
||||
expected_url, raise_exc=False, microversion='1.3',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(rpjson, result)
|
||||
|
||||
def test_get_providers_in_aggregates_emptylist(self):
|
||||
self.assertEqual([], self.client._get_providers_in_aggregates([]))
|
||||
self.assertEqual(
|
||||
[], self.client._get_providers_in_aggregates(self.context, []))
|
||||
self.ks_adap_mock.get.assert_not_called()
|
||||
|
||||
@mock.patch.object(report.LOG, 'error')
|
||||
@ -1711,11 +1723,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
|
||||
uuid = uuids.agg
|
||||
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
||||
self.client._get_providers_in_aggregates, [uuid])
|
||||
self.client._get_providers_in_aggregates,
|
||||
self.context, [uuid])
|
||||
|
||||
expected_url = '/resource_providers?member_of=in:' + uuid
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.3')
|
||||
expected_url, raise_exc=False, microversion='1.3',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# A 503 Service Unavailable should trigger an error log that
|
||||
# includes the placement request id
|
||||
self.assertTrue(logging_mock.called)
|
||||
@ -1745,11 +1759,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
resp_mock.json.return_value = {'resource_providers': rpjson}
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
result = self.client._get_providers_in_tree(root)
|
||||
result = self.client._get_providers_in_tree(self.context, root)
|
||||
|
||||
expected_url = '/resource_providers?in_tree=' + root
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.14')
|
||||
expected_url, raise_exc=False, microversion='1.14',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(rpjson, result)
|
||||
|
||||
@mock.patch.object(report.LOG, 'error')
|
||||
@ -1763,11 +1778,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
|
||||
uuid = uuids.compute_node
|
||||
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
||||
self.client._get_providers_in_tree, uuid)
|
||||
self.client._get_providers_in_tree, self.context,
|
||||
uuid)
|
||||
|
||||
expected_url = '/resource_providers?in_tree=' + uuid
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.14')
|
||||
expected_url, raise_exc=False, microversion='1.14',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
# A 503 Service Unavailable should trigger an error log that includes
|
||||
# the placement request id
|
||||
self.assertTrue(logging_mock.called)
|
||||
@ -2000,11 +2017,12 @@ class TestAggregates(SchedulerReportClientTestCase):
|
||||
resp_mock.json.return_value = {'aggregates': aggs}
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
result = self.client._get_provider_aggregates(uuid)
|
||||
result = self.client._get_provider_aggregates(self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.1')
|
||||
expected_url, raise_exc=False, microversion='1.1',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertEqual(set(aggs), result)
|
||||
|
||||
@mock.patch.object(report.LOG, 'error')
|
||||
@ -2021,11 +2039,12 @@ class TestAggregates(SchedulerReportClientTestCase):
|
||||
resp_mock.status_code = status_code
|
||||
self.assertRaises(
|
||||
exception.ResourceProviderAggregateRetrievalFailed,
|
||||
self.client._get_provider_aggregates, uuid)
|
||||
self.client._get_provider_aggregates, self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.1')
|
||||
expected_url, raise_exc=False, microversion='1.1',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertTrue(log_mock.called)
|
||||
self.assertEqual(uuids.request_id,
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
@ -2046,11 +2065,13 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
resp_mock.json.return_value = {'traits': traits}
|
||||
self.ks_adap_mock.get.return_value = resp_mock
|
||||
|
||||
result = self.client._get_provider_traits(uuid)
|
||||
result = self.client._get_provider_traits(self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid + '/traits'
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, **self.trait_api_kwargs)
|
||||
expected_url,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.assertEqual(set(traits), result)
|
||||
|
||||
@mock.patch.object(report.LOG, 'error')
|
||||
@ -2067,11 +2088,13 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
resp_mock.status_code = status_code
|
||||
self.assertRaises(
|
||||
exception.ResourceProviderTraitRetrievalFailed,
|
||||
self.client._get_provider_traits, uuid)
|
||||
self.client._get_provider_traits, self.context, uuid)
|
||||
|
||||
expected_url = '/resource_providers/' + uuid + '/traits'
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, **self.trait_api_kwargs)
|
||||
expected_url,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.assertTrue(log_mock.called)
|
||||
self.assertEqual(uuids.request_id,
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
@ -2093,7 +2116,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
get_mock.json.return_value = {'traits': standard_traits}
|
||||
self.client._ensure_traits(self.context, all_traits)
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
'/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs)
|
||||
'/traits?name=in:' + ','.join(all_traits),
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.ks_adap_mock.put.assert_has_calls(
|
||||
[mock.call('/traits/' + trait,
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
@ -2107,6 +2132,7 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
self.client._ensure_traits(self.context, standard_traits)
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
'/traits?name=in:' + ','.join(standard_traits),
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.ks_adap_mock.put.assert_not_called()
|
||||
|
||||
@ -2126,7 +2152,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
self.context, ['FOO'])
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
'/traits?name=in:FOO', **self.trait_api_kwargs)
|
||||
'/traits?name=in:FOO',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.ks_adap_mock.put.assert_not_called()
|
||||
|
||||
def test_ensure_traits_fail_creation(self):
|
||||
@ -2142,7 +2170,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
self.context, ['FOO'])
|
||||
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
'/traits?name=in:FOO', **self.trait_api_kwargs)
|
||||
'/traits?name=in:FOO',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
'/traits/FOO',
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
@ -2170,7 +2200,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
|
||||
# Verify API calls
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
'/traits?name=in:' + ','.join(traits), **self.trait_api_kwargs)
|
||||
'/traits?name=in:' + ','.join(traits),
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||
**self.trait_api_kwargs)
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
'/resource_providers/%s/traits' % uuids.rp,
|
||||
json={'traits': traits, 'resource_provider_generation': 0},
|
||||
@ -2233,10 +2265,11 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
self.client._provider_tree.new_root('compute', uuid, 1)
|
||||
mock_agg_get.return_value = set([uuids.agg1])
|
||||
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
||||
self.client._refresh_associations(uuid)
|
||||
mock_agg_get.assert_called_once_with(uuid)
|
||||
mock_trait_get.assert_called_once_with(uuid)
|
||||
mock_pia_get.assert_called_once_with(mock_agg_get.return_value)
|
||||
self.client._refresh_associations(self.context, uuid)
|
||||
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_pia_get.assert_called_once_with(
|
||||
self.context, mock_agg_get.return_value)
|
||||
self.assertIn(uuid, self.client.association_refresh_time)
|
||||
self.assertTrue(
|
||||
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
|
||||
@ -2262,9 +2295,10 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
self.client._provider_tree.new_root('compute', uuid, 1)
|
||||
mock_agg_get.return_value = set([uuids.agg1])
|
||||
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
||||
self.client._refresh_associations(uuid, refresh_sharing=False)
|
||||
mock_agg_get.assert_called_once_with(uuid)
|
||||
mock_trait_get.assert_called_once_with(uuid)
|
||||
self.client._refresh_associations(self.context, uuid,
|
||||
refresh_sharing=False)
|
||||
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_pia_get.assert_not_called()
|
||||
self.assertIn(uuid, self.client.association_refresh_time)
|
||||
self.assertTrue(
|
||||
@ -2291,7 +2325,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
"""
|
||||
mock_stale.return_value = False
|
||||
uuid = uuids.compute_node
|
||||
self.client._refresh_associations(uuid)
|
||||
self.client._refresh_associations(self.context, uuid)
|
||||
mock_agg_get.assert_not_called()
|
||||
mock_trait_get.assert_not_called()
|
||||
mock_pia_get.assert_not_called()
|
||||
@ -2316,10 +2350,10 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
|
||||
# Called a first time because association_refresh_time is empty.
|
||||
now = time.time()
|
||||
self.client._refresh_associations(uuid)
|
||||
mock_agg_get.assert_called_once_with(uuid)
|
||||
mock_trait_get.assert_called_once_with(uuid)
|
||||
mock_pia_get.assert_called_once_with(set())
|
||||
self.client._refresh_associations(self.context, uuid)
|
||||
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_pia_get.assert_called_once_with(self.context, set())
|
||||
log_mock.assert_has_calls([
|
||||
mock.call('Refreshing aggregate associations for resource '
|
||||
'provider %s, aggregates: %s', uuid, 'None'),
|
||||
@ -2336,17 +2370,17 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
with mock.patch('time.time') as mock_future:
|
||||
# Not called a second time because not enough time has passed.
|
||||
mock_future.return_value = now + report.ASSOCIATION_REFRESH / 2
|
||||
self.client._refresh_associations(uuid)
|
||||
self.client._refresh_associations(self.context, uuid)
|
||||
mock_agg_get.assert_not_called()
|
||||
mock_trait_get.assert_not_called()
|
||||
mock_pia_get.assert_not_called()
|
||||
|
||||
# Called because time has passed.
|
||||
mock_future.return_value = now + report.ASSOCIATION_REFRESH + 1
|
||||
self.client._refresh_associations(uuid)
|
||||
mock_agg_get.assert_called_once_with(uuid)
|
||||
mock_trait_get.assert_called_once_with(uuid)
|
||||
mock_pia_get.assert_called_once_with(set())
|
||||
self.client._refresh_associations(self.context, uuid)
|
||||
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_pia_get.assert_called_once_with(self.context, set())
|
||||
|
||||
|
||||
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||
@ -2686,7 +2720,8 @@ There was a conflict when trying to complete your request.
|
||||
self.assertTrue(result)
|
||||
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(exp_url)
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# Updated with the new inventory from the PUT call
|
||||
self._validate_provider(uuid, generation=44)
|
||||
expected = {
|
||||
@ -2763,7 +2798,8 @@ There was a conflict when trying to complete your request.
|
||||
self.assertTrue(result)
|
||||
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(exp_url)
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# Updated with the new inventory from the PUT call
|
||||
self._validate_provider(uuid, generation=44)
|
||||
expected = {
|
||||
@ -2849,7 +2885,8 @@ There was a conflict when trying to complete your request.
|
||||
)
|
||||
self.assertTrue(result)
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(exp_url)
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# No update so put should not be called
|
||||
self.assertFalse(mock_put.called)
|
||||
# Make sure we updated the generation from the inventory records
|
||||
@ -3395,7 +3432,8 @@ class TestAllocations(SchedulerReportClientTestCase):
|
||||
self.client.update_instance_allocation(self.context, cn, inst, 1)
|
||||
self.assertFalse(mock_put.called)
|
||||
mock_get.assert_called_once_with(
|
||||
'/allocations/%s' % inst.uuid)
|
||||
'/allocations/%s' % inst.uuid,
|
||||
global_request_id=self.context.global_id)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get')
|
||||
|
@ -119,12 +119,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||
}
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, spec_obj=fake_spec,
|
||||
self.manager.select_destinations(self.context, spec_obj=fake_spec,
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_called_once_with(None, fake_spec,
|
||||
select_destinations.assert_called_once_with(
|
||||
self.context, fake_spec,
|
||||
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
||||
mock.sentinel.p_sums, fake_version, False)
|
||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
||||
mock_get_ac.assert_called_once_with(
|
||||
self.context, mock_rfrs.return_value)
|
||||
|
||||
# Now call select_destinations() with True values for the params
|
||||
# introduced in RPC version 4.5
|
||||
@ -196,10 +198,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.assertRaises(messaging.rpc.dispatcher.ExpectedException,
|
||||
self.manager.select_destinations, None, spec_obj=fake_spec,
|
||||
self.manager.select_destinations, self.context,
|
||||
spec_obj=fake_spec,
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_not_called()
|
||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
||||
mock_get_ac.assert_called_once_with(
|
||||
self.context, mock_rfrs.return_value)
|
||||
|
||||
def test_select_destination_old_placement(self):
|
||||
"""Tests that we will raise NoValidhost when the scheduler
|
||||
@ -240,11 +244,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||
}
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, spec_obj=fake_spec)
|
||||
select_destinations.assert_called_once_with(None, fake_spec, None,
|
||||
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0",
|
||||
False)
|
||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
||||
self.manager.select_destinations(self.context, spec_obj=fake_spec)
|
||||
select_destinations.assert_called_once_with(self.context,
|
||||
fake_spec, None, expected_alloc_reqs_by_rp_uuid,
|
||||
mock.sentinel.p_sums, "42.0", False)
|
||||
mock_get_ac.assert_called_once_with(
|
||||
self.context, mock_rfrs.return_value)
|
||||
|
||||
# TODO(sbauza): Remove that test once the API v4 is removed
|
||||
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
|
||||
@ -264,13 +269,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||
}
|
||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||
) as select_destinations:
|
||||
self.manager.select_destinations(None, request_spec='fake_spec',
|
||||
filter_properties='fake_props',
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_called_once_with(None, fake_spec,
|
||||
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
||||
mock.sentinel.p_sums, "42.0", False)
|
||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
||||
self.manager.select_destinations(
|
||||
self.context, request_spec='fake_spec',
|
||||
filter_properties='fake_props',
|
||||
instance_uuids=[fake_spec.instance_uuid])
|
||||
select_destinations.assert_called_once_with(
|
||||
self.context, fake_spec,
|
||||
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
||||
mock.sentinel.p_sums, "42.0", False)
|
||||
mock_get_ac.assert_called_once_with(
|
||||
self.context, mock_rfrs.return_value)
|
||||
|
||||
def test_update_aggregates(self):
|
||||
with mock.patch.object(self.manager.driver.host_manager,
|
||||
|
@ -465,7 +465,7 @@ class TestUtils(test.NoDBTestCase):
|
||||
utils.claim_resources_on_destination(
|
||||
self.context, reportclient, instance, source_node, dest_node)
|
||||
mock_get_allocs.assert_called_once_with(
|
||||
uuids.source_node, instance.uuid)
|
||||
self.context, uuids.source_node, instance.uuid)
|
||||
|
||||
test()
|
||||
|
||||
@ -505,7 +505,7 @@ class TestUtils(test.NoDBTestCase):
|
||||
self.context, reportclient, instance,
|
||||
source_node, dest_node)
|
||||
mock_get_allocs.assert_called_once_with(
|
||||
uuids.source_node, instance.uuid)
|
||||
self.context, uuids.source_node, instance.uuid)
|
||||
mock_claim.assert_called_once_with(
|
||||
self.context, instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id,
|
||||
|
9
releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml
Normal file
9
releasenotes/notes/bug-1734625-419fd0e21bd332f6.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
The SchedulerReportClient
|
||||
(``nova.scheduler.client.report.SchedulerReportClient``) sends requests
|
||||
with the global request ID in the ``X-Openstack-Request-Id`` header
|
||||
to the placement service. `Bug 1734625`_
|
||||
|
||||
.. _Bug 1734625: https://bugs.launchpad.net/nova/+bug/1734625
|
Loading…
Reference in New Issue
Block a user