Merge "[placement] Add sending global request ID in get" into stable/queens
This commit is contained in:
commit
57b3d59fae
|
@ -2223,7 +2223,8 @@ class ComputeManager(manager.Manager):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resources['allocations'] = (
|
resources['allocations'] = (
|
||||||
self.reportclient.get_allocations_for_consumer(instance.uuid))
|
self.reportclient.get_allocations_for_consumer(context,
|
||||||
|
instance.uuid))
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Failure retrieving placement allocations',
|
LOG.exception('Failure retrieving placement allocations',
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
@ -3046,7 +3047,7 @@ class ComputeManager(manager.Manager):
|
||||||
context, instance, self.host, migration)
|
context, instance, self.host, migration)
|
||||||
|
|
||||||
allocations = self.reportclient.get_allocations_for_consumer(
|
allocations = self.reportclient.get_allocations_for_consumer(
|
||||||
instance.uuid)
|
context, instance.uuid)
|
||||||
|
|
||||||
network_info = instance.get_network_info()
|
network_info = instance.get_network_info()
|
||||||
if bdms is None:
|
if bdms is None:
|
||||||
|
@ -3799,7 +3800,7 @@ class ComputeManager(manager.Manager):
|
||||||
# so, avoid doing the legacy behavior below.
|
# so, avoid doing the legacy behavior below.
|
||||||
mig_allocs = (
|
mig_allocs = (
|
||||||
self.reportclient.get_allocations_for_consumer_by_provider(
|
self.reportclient.get_allocations_for_consumer_by_provider(
|
||||||
cn_uuid, migration.uuid))
|
context, cn_uuid, migration.uuid))
|
||||||
if mig_allocs:
|
if mig_allocs:
|
||||||
LOG.info(_('Source node %(node)s reverted migration '
|
LOG.info(_('Source node %(node)s reverted migration '
|
||||||
'%(mig)s; not deleting migration-based '
|
'%(mig)s; not deleting migration-based '
|
||||||
|
@ -3814,7 +3815,7 @@ class ComputeManager(manager.Manager):
|
||||||
# accounting
|
# accounting
|
||||||
allocs = (
|
allocs = (
|
||||||
self.reportclient.get_allocations_for_consumer_by_provider(
|
self.reportclient.get_allocations_for_consumer_by_provider(
|
||||||
cn_uuid, migration.uuid))
|
context, cn_uuid, migration.uuid))
|
||||||
if allocs:
|
if allocs:
|
||||||
# NOTE(danms): The source did migration-based allocation
|
# NOTE(danms): The source did migration-based allocation
|
||||||
# accounting, so we should let the source node rejigger
|
# accounting, so we should let the source node rejigger
|
||||||
|
@ -4013,7 +4014,7 @@ class ComputeManager(manager.Manager):
|
||||||
# Fetch the original allocation that the instance had on the source
|
# Fetch the original allocation that the instance had on the source
|
||||||
# node, which are now held by the migration
|
# node, which are now held by the migration
|
||||||
orig_alloc = self.reportclient.get_allocations_for_consumer(
|
orig_alloc = self.reportclient.get_allocations_for_consumer(
|
||||||
migration.uuid)
|
context, migration.uuid)
|
||||||
if not orig_alloc:
|
if not orig_alloc:
|
||||||
# NOTE(danms): This migration did not do per-migration allocation
|
# NOTE(danms): This migration did not do per-migration allocation
|
||||||
# accounting, so nothing to do here.
|
# accounting, so nothing to do here.
|
||||||
|
@ -4915,7 +4916,7 @@ class ComputeManager(manager.Manager):
|
||||||
limits = filter_properties.get('limits', {})
|
limits = filter_properties.get('limits', {})
|
||||||
|
|
||||||
allocations = self.reportclient.get_allocations_for_consumer(
|
allocations = self.reportclient.get_allocations_for_consumer(
|
||||||
instance.uuid)
|
context, instance.uuid)
|
||||||
|
|
||||||
shelved_image_ref = instance.image_ref
|
shelved_image_ref = instance.image_ref
|
||||||
if image:
|
if image:
|
||||||
|
@ -6306,7 +6307,7 @@ class ComputeManager(manager.Manager):
|
||||||
migration = migrate_data.migration
|
migration = migrate_data.migration
|
||||||
rc = self.scheduler_client.reportclient
|
rc = self.scheduler_client.reportclient
|
||||||
# Check to see if our migration has its own allocations
|
# Check to see if our migration has its own allocations
|
||||||
allocs = rc.get_allocations_for_consumer(migration.uuid)
|
allocs = rc.get_allocations_for_consumer(ctxt, migration.uuid)
|
||||||
else:
|
else:
|
||||||
# We didn't have data on a migration, which means we can't
|
# We didn't have data on a migration, which means we can't
|
||||||
# look up to see if we had new-style migration-based
|
# look up to see if we had new-style migration-based
|
||||||
|
|
|
@ -1232,7 +1232,7 @@ class ResourceTracker(object):
|
||||||
# always creates allocations for an instance
|
# always creates allocations for an instance
|
||||||
known_instances = set(self.tracked_instances.keys())
|
known_instances = set(self.tracked_instances.keys())
|
||||||
allocations = self.reportclient.get_allocations_for_resource_provider(
|
allocations = self.reportclient.get_allocations_for_resource_provider(
|
||||||
cn.uuid) or {}
|
context, cn.uuid) or {}
|
||||||
read_deleted_context = context.elevated(read_deleted='yes')
|
read_deleted_context = context.elevated(read_deleted='yes')
|
||||||
for consumer_uuid, alloc in allocations.items():
|
for consumer_uuid, alloc in allocations.items():
|
||||||
if consumer_uuid in known_instances:
|
if consumer_uuid in known_instances:
|
||||||
|
|
|
@ -45,7 +45,7 @@ def replace_allocation_with_migration(context, instance, migration):
|
||||||
reportclient = schedclient.reportclient
|
reportclient = schedclient.reportclient
|
||||||
|
|
||||||
orig_alloc = reportclient.get_allocations_for_consumer_by_provider(
|
orig_alloc = reportclient.get_allocations_for_consumer_by_provider(
|
||||||
source_cn.uuid, instance.uuid)
|
context, source_cn.uuid, instance.uuid)
|
||||||
if not orig_alloc:
|
if not orig_alloc:
|
||||||
LOG.debug('Unable to find existing allocations for instance on '
|
LOG.debug('Unable to find existing allocations for instance on '
|
||||||
'source compute node: %s. This is normal if you are not '
|
'source compute node: %s. This is normal if you are not '
|
||||||
|
|
|
@ -274,8 +274,11 @@ class SchedulerReportClient(object):
|
||||||
client.additional_headers = {'accept': 'application/json'}
|
client.additional_headers = {'accept': 'application/json'}
|
||||||
return client
|
return client
|
||||||
|
|
||||||
def get(self, url, version=None):
|
def get(self, url, version=None, global_request_id=None):
|
||||||
return self._client.get(url, raise_exc=False, microversion=version)
|
headers = ({request_id.INBOUND_HEADER: global_request_id}
|
||||||
|
if global_request_id else {})
|
||||||
|
return self._client.get(url, raise_exc=False, microversion=version,
|
||||||
|
headers=headers)
|
||||||
|
|
||||||
def post(self, url, data, version=None, global_request_id=None):
|
def post(self, url, data, version=None, global_request_id=None):
|
||||||
headers = ({request_id.INBOUND_HEADER: global_request_id}
|
headers = ({request_id.INBOUND_HEADER: global_request_id}
|
||||||
|
@ -306,7 +309,7 @@ class SchedulerReportClient(object):
|
||||||
headers=headers)
|
headers=headers)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def get_allocation_candidates(self, resources):
|
def get_allocation_candidates(self, context, resources):
|
||||||
"""Returns a tuple of (allocation_requests, provider_summaries,
|
"""Returns a tuple of (allocation_requests, provider_summaries,
|
||||||
allocation_request_version).
|
allocation_request_version).
|
||||||
|
|
||||||
|
@ -324,6 +327,7 @@ class SchedulerReportClient(object):
|
||||||
this data from placement, or (None, None, None) if the
|
this data from placement, or (None, None, None) if the
|
||||||
request failed
|
request failed
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param nova.scheduler.utils.ResourceRequest resources:
|
:param nova.scheduler.utils.ResourceRequest resources:
|
||||||
A ResourceRequest object representing the requested resources and
|
A ResourceRequest object representing the requested resources and
|
||||||
traits from the request spec.
|
traits from the request spec.
|
||||||
|
@ -347,7 +351,8 @@ class SchedulerReportClient(object):
|
||||||
|
|
||||||
version = '1.17'
|
version = '1.17'
|
||||||
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
|
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
|
||||||
resp = self.get(url, version=version)
|
resp = self.get(url, version=version,
|
||||||
|
global_request_id=context.global_id)
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
data = resp.json()
|
data = resp.json()
|
||||||
return (data['allocation_requests'], data['provider_summaries'],
|
return (data['allocation_requests'], data['provider_summaries'],
|
||||||
|
@ -371,7 +376,7 @@ class SchedulerReportClient(object):
|
||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def _get_provider_aggregates(self, rp_uuid):
|
def _get_provider_aggregates(self, context, rp_uuid):
|
||||||
"""Queries the placement API for a resource provider's aggregates.
|
"""Queries the placement API for a resource provider's aggregates.
|
||||||
|
|
||||||
:param rp_uuid: UUID of the resource provider to grab aggregates for.
|
:param rp_uuid: UUID of the resource provider to grab aggregates for.
|
||||||
|
@ -383,7 +388,7 @@ class SchedulerReportClient(object):
|
||||||
does not exist.
|
does not exist.
|
||||||
"""
|
"""
|
||||||
resp = self.get("/resource_providers/%s/aggregates" % rp_uuid,
|
resp = self.get("/resource_providers/%s/aggregates" % rp_uuid,
|
||||||
version='1.1')
|
version='1.1', global_request_id=context.global_id)
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
data = resp.json()
|
data = resp.json()
|
||||||
return set(data['aggregates'])
|
return set(data['aggregates'])
|
||||||
|
@ -402,9 +407,10 @@ class SchedulerReportClient(object):
|
||||||
raise exception.ResourceProviderAggregateRetrievalFailed(uuid=rp_uuid)
|
raise exception.ResourceProviderAggregateRetrievalFailed(uuid=rp_uuid)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def _get_provider_traits(self, rp_uuid):
|
def _get_provider_traits(self, context, rp_uuid):
|
||||||
"""Queries the placement API for a resource provider's traits.
|
"""Queries the placement API for a resource provider's traits.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param rp_uuid: UUID of the resource provider to grab traits for.
|
:param rp_uuid: UUID of the resource provider to grab traits for.
|
||||||
:return: A set() of string trait names, which may be empty if the
|
:return: A set() of string trait names, which may be empty if the
|
||||||
specified provider has no traits.
|
specified provider has no traits.
|
||||||
|
@ -413,7 +419,7 @@ class SchedulerReportClient(object):
|
||||||
empty set()) if the specified resource provider does not exist.
|
empty set()) if the specified resource provider does not exist.
|
||||||
"""
|
"""
|
||||||
resp = self.get("/resource_providers/%s/traits" % rp_uuid,
|
resp = self.get("/resource_providers/%s/traits" % rp_uuid,
|
||||||
version='1.6')
|
version='1.6', global_request_id=context.global_id)
|
||||||
|
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
return set(resp.json()['traits'])
|
return set(resp.json()['traits'])
|
||||||
|
@ -428,17 +434,19 @@ class SchedulerReportClient(object):
|
||||||
raise exception.ResourceProviderTraitRetrievalFailed(uuid=rp_uuid)
|
raise exception.ResourceProviderTraitRetrievalFailed(uuid=rp_uuid)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def _get_resource_provider(self, uuid):
|
def _get_resource_provider(self, context, uuid):
|
||||||
"""Queries the placement API for a resource provider record with the
|
"""Queries the placement API for a resource provider record with the
|
||||||
supplied UUID.
|
supplied UUID.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param uuid: UUID identifier for the resource provider to look up
|
:param uuid: UUID identifier for the resource provider to look up
|
||||||
:return: A dict of resource provider information if found or None if no
|
:return: A dict of resource provider information if found or None if no
|
||||||
such resource provider could be found.
|
such resource provider could be found.
|
||||||
:raise: ResourceProviderRetrievalFailed on error.
|
:raise: ResourceProviderRetrievalFailed on error.
|
||||||
"""
|
"""
|
||||||
resp = self.get("/resource_providers/%s" % uuid,
|
resp = self.get("/resource_providers/%s" % uuid,
|
||||||
version=NESTED_PROVIDER_API_VERSION)
|
version=NESTED_PROVIDER_API_VERSION,
|
||||||
|
global_request_id=context.global_id)
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
data = resp.json()
|
data = resp.json()
|
||||||
return data
|
return data
|
||||||
|
@ -459,11 +467,12 @@ class SchedulerReportClient(object):
|
||||||
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
|
raise exception.ResourceProviderRetrievalFailed(uuid=uuid)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def _get_sharing_providers(self, agg_uuids):
|
def _get_sharing_providers(self, context, agg_uuids):
|
||||||
"""Queries the placement API for a list of the resource providers
|
"""Queries the placement API for a list of the resource providers
|
||||||
associated with any of the specified aggregates and possessing the
|
associated with any of the specified aggregates and possessing the
|
||||||
MISC_SHARES_VIA_AGGREGATE trait.
|
MISC_SHARES_VIA_AGGREGATE trait.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param agg_uuids: Iterable of string UUIDs of aggregates to filter on.
|
:param agg_uuids: Iterable of string UUIDs of aggregates to filter on.
|
||||||
:return: A list of dicts of resource provider information, which may be
|
:return: A list of dicts of resource provider information, which may be
|
||||||
empty if no provider exists with the specified UUID.
|
empty if no provider exists with the specified UUID.
|
||||||
|
@ -475,11 +484,11 @@ class SchedulerReportClient(object):
|
||||||
qpval = ','.join(agg_uuids)
|
qpval = ','.join(agg_uuids)
|
||||||
# TODO(efried): Need a ?having_traits=[...] on this API!
|
# TODO(efried): Need a ?having_traits=[...] on this API!
|
||||||
resp = self.get("/resource_providers?member_of=in:" + qpval,
|
resp = self.get("/resource_providers?member_of=in:" + qpval,
|
||||||
version='1.3')
|
version='1.3', global_request_id=context.global_id)
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
rps = []
|
rps = []
|
||||||
for rp in resp.json()['resource_providers']:
|
for rp in resp.json()['resource_providers']:
|
||||||
traits = self._get_provider_traits(rp['uuid'])
|
traits = self._get_provider_traits(context, rp['uuid'])
|
||||||
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
|
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
|
||||||
rps.append(rp)
|
rps.append(rp)
|
||||||
return rps
|
return rps
|
||||||
|
@ -499,17 +508,19 @@ class SchedulerReportClient(object):
|
||||||
raise exception.ResourceProviderRetrievalFailed(message=msg % args)
|
raise exception.ResourceProviderRetrievalFailed(message=msg % args)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def _get_providers_in_tree(self, uuid):
|
def _get_providers_in_tree(self, context, uuid):
|
||||||
"""Queries the placement API for a list of the resource providers in
|
"""Queries the placement API for a list of the resource providers in
|
||||||
the tree associated with the specified UUID.
|
the tree associated with the specified UUID.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param uuid: UUID identifier for the resource provider to look up
|
:param uuid: UUID identifier for the resource provider to look up
|
||||||
:return: A list of dicts of resource provider information, which may be
|
:return: A list of dicts of resource provider information, which may be
|
||||||
empty if no provider exists with the specified UUID.
|
empty if no provider exists with the specified UUID.
|
||||||
:raise: ResourceProviderRetrievalFailed on error.
|
:raise: ResourceProviderRetrievalFailed on error.
|
||||||
"""
|
"""
|
||||||
resp = self.get("/resource_providers?in_tree=%s" % uuid,
|
resp = self.get("/resource_providers?in_tree=%s" % uuid,
|
||||||
version=NESTED_PROVIDER_API_VERSION)
|
version=NESTED_PROVIDER_API_VERSION,
|
||||||
|
global_request_id=context.global_id)
|
||||||
|
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
return resp.json()['resource_providers']
|
return resp.json()['resource_providers']
|
||||||
|
@ -584,7 +595,7 @@ class SchedulerReportClient(object):
|
||||||
'placement_req_id': placement_req_id,
|
'placement_req_id': placement_req_id,
|
||||||
}
|
}
|
||||||
LOG.info(msg, args)
|
LOG.info(msg, args)
|
||||||
return self._get_resource_provider(uuid)
|
return self._get_resource_provider(context, uuid)
|
||||||
|
|
||||||
# A provider with the same *name* already exists, or some other error.
|
# A provider with the same *name* already exists, or some other error.
|
||||||
msg = ("[%(placement_req_id)s] Failed to create resource provider "
|
msg = ("[%(placement_req_id)s] Failed to create resource provider "
|
||||||
|
@ -641,12 +652,12 @@ class SchedulerReportClient(object):
|
||||||
# If we had the requested provider locally, refresh it and its
|
# If we had the requested provider locally, refresh it and its
|
||||||
# descendants, but only if stale.
|
# descendants, but only if stale.
|
||||||
for u in self._provider_tree.get_provider_uuids(uuid):
|
for u in self._provider_tree.get_provider_uuids(uuid):
|
||||||
self._refresh_associations(u, force=False)
|
self._refresh_associations(context, u, force=False)
|
||||||
return uuid
|
return uuid
|
||||||
|
|
||||||
# We don't have it locally; check placement or create it.
|
# We don't have it locally; check placement or create it.
|
||||||
created_rp = None
|
created_rp = None
|
||||||
rps_to_refresh = self._get_providers_in_tree(uuid)
|
rps_to_refresh = self._get_providers_in_tree(context, uuid)
|
||||||
if not rps_to_refresh:
|
if not rps_to_refresh:
|
||||||
created_rp = self._create_resource_provider(
|
created_rp = self._create_resource_provider(
|
||||||
context, uuid, name or uuid,
|
context, uuid, name or uuid,
|
||||||
|
@ -661,7 +672,7 @@ class SchedulerReportClient(object):
|
||||||
|
|
||||||
for rp_to_refresh in rps_to_refresh:
|
for rp_to_refresh in rps_to_refresh:
|
||||||
self._refresh_associations(
|
self._refresh_associations(
|
||||||
rp_to_refresh['uuid'],
|
context, rp_to_refresh['uuid'],
|
||||||
generation=rp_to_refresh.get('generation'), force=True)
|
generation=rp_to_refresh.get('generation'), force=True)
|
||||||
|
|
||||||
return uuid
|
return uuid
|
||||||
|
@ -700,14 +711,14 @@ class SchedulerReportClient(object):
|
||||||
raise exception.ResourceProviderInUse()
|
raise exception.ResourceProviderInUse()
|
||||||
raise exception.ResourceProviderDeletionFailed(uuid=rp_uuid)
|
raise exception.ResourceProviderDeletionFailed(uuid=rp_uuid)
|
||||||
|
|
||||||
def _get_inventory(self, rp_uuid):
|
def _get_inventory(self, context, rp_uuid):
|
||||||
url = '/resource_providers/%s/inventories' % rp_uuid
|
url = '/resource_providers/%s/inventories' % rp_uuid
|
||||||
result = self.get(url)
|
result = self.get(url, global_request_id=context.global_id)
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return None
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
def _refresh_and_get_inventory(self, rp_uuid):
|
def _refresh_and_get_inventory(self, context, rp_uuid):
|
||||||
"""Helper method that retrieves the current inventory for the supplied
|
"""Helper method that retrieves the current inventory for the supplied
|
||||||
resource provider according to the placement API.
|
resource provider according to the placement API.
|
||||||
|
|
||||||
|
@ -716,7 +727,7 @@ class SchedulerReportClient(object):
|
||||||
generation and attempt to update inventory if any exists, otherwise
|
generation and attempt to update inventory if any exists, otherwise
|
||||||
return empty inventories.
|
return empty inventories.
|
||||||
"""
|
"""
|
||||||
curr = self._get_inventory(rp_uuid)
|
curr = self._get_inventory(context, rp_uuid)
|
||||||
if curr is None:
|
if curr is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -726,8 +737,8 @@ class SchedulerReportClient(object):
|
||||||
self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen)
|
self._provider_tree.update_inventory(rp_uuid, curr_inv, cur_gen)
|
||||||
return curr
|
return curr
|
||||||
|
|
||||||
def _refresh_associations(self, rp_uuid, generation=None, force=False,
|
def _refresh_associations(self, context, rp_uuid, generation=None,
|
||||||
refresh_sharing=True):
|
force=False, refresh_sharing=True):
|
||||||
"""Refresh aggregates, traits, and (optionally) aggregate-associated
|
"""Refresh aggregates, traits, and (optionally) aggregate-associated
|
||||||
sharing providers for the specified resource provider uuid.
|
sharing providers for the specified resource provider uuid.
|
||||||
|
|
||||||
|
@ -739,6 +750,7 @@ class SchedulerReportClient(object):
|
||||||
historical: all code paths that get us here are doing inventory refresh
|
historical: all code paths that get us here are doing inventory refresh
|
||||||
themselves.
|
themselves.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
:param rp_uuid: UUID of the resource provider to check for fresh
|
:param rp_uuid: UUID of the resource provider to check for fresh
|
||||||
aggregates and traits
|
aggregates and traits
|
||||||
:param generation: The resource provider generation to set. If None,
|
:param generation: The resource provider generation to set. If None,
|
||||||
|
@ -755,7 +767,7 @@ class SchedulerReportClient(object):
|
||||||
"""
|
"""
|
||||||
if force or self._associations_stale(rp_uuid):
|
if force or self._associations_stale(rp_uuid):
|
||||||
# Refresh aggregates
|
# Refresh aggregates
|
||||||
aggs = self._get_provider_aggregates(rp_uuid)
|
aggs = self._get_provider_aggregates(context, rp_uuid)
|
||||||
msg = ("Refreshing aggregate associations for resource provider "
|
msg = ("Refreshing aggregate associations for resource provider "
|
||||||
"%s, aggregates: %s")
|
"%s, aggregates: %s")
|
||||||
LOG.debug(msg, rp_uuid, ','.join(aggs or ['None']))
|
LOG.debug(msg, rp_uuid, ','.join(aggs or ['None']))
|
||||||
|
@ -766,7 +778,7 @@ class SchedulerReportClient(object):
|
||||||
rp_uuid, aggs, generation=generation)
|
rp_uuid, aggs, generation=generation)
|
||||||
|
|
||||||
# Refresh traits
|
# Refresh traits
|
||||||
traits = self._get_provider_traits(rp_uuid)
|
traits = self._get_provider_traits(context, rp_uuid)
|
||||||
msg = ("Refreshing trait associations for resource provider %s, "
|
msg = ("Refreshing trait associations for resource provider %s, "
|
||||||
"traits: %s")
|
"traits: %s")
|
||||||
LOG.debug(msg, rp_uuid, ','.join(traits or ['None']))
|
LOG.debug(msg, rp_uuid, ','.join(traits or ['None']))
|
||||||
|
@ -777,7 +789,7 @@ class SchedulerReportClient(object):
|
||||||
|
|
||||||
if refresh_sharing:
|
if refresh_sharing:
|
||||||
# Refresh providers associated by aggregate
|
# Refresh providers associated by aggregate
|
||||||
for rp in self._get_sharing_providers(aggs):
|
for rp in self._get_sharing_providers(context, aggs):
|
||||||
if not self._provider_tree.exists(rp['uuid']):
|
if not self._provider_tree.exists(rp['uuid']):
|
||||||
# NOTE(efried): Right now sharing providers are always
|
# NOTE(efried): Right now sharing providers are always
|
||||||
# treated as roots. This is deliberate. From the
|
# treated as roots. This is deliberate. From the
|
||||||
|
@ -790,7 +802,8 @@ class SchedulerReportClient(object):
|
||||||
# providers). No need to override force=True for newly-
|
# providers). No need to override force=True for newly-
|
||||||
# added providers - the missing timestamp will always
|
# added providers - the missing timestamp will always
|
||||||
# trigger them to refresh.
|
# trigger them to refresh.
|
||||||
self._refresh_associations(rp['uuid'], force=force,
|
self._refresh_associations(context, rp['uuid'],
|
||||||
|
force=force,
|
||||||
refresh_sharing=False)
|
refresh_sharing=False)
|
||||||
self.association_refresh_time[rp_uuid] = time.time()
|
self.association_refresh_time[rp_uuid] = time.time()
|
||||||
|
|
||||||
|
@ -816,7 +829,7 @@ class SchedulerReportClient(object):
|
||||||
# TODO(jaypipes): Should we really be calling the placement API to get
|
# TODO(jaypipes): Should we really be calling the placement API to get
|
||||||
# the current inventory for every resource provider each and every time
|
# the current inventory for every resource provider each and every time
|
||||||
# update_resource_stats() is called? :(
|
# update_resource_stats() is called? :(
|
||||||
curr = self._refresh_and_get_inventory(rp_uuid)
|
curr = self._refresh_and_get_inventory(context, rp_uuid)
|
||||||
if curr is None:
|
if curr is None:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -945,7 +958,7 @@ class SchedulerReportClient(object):
|
||||||
if not self._provider_tree.has_inventory(rp_uuid):
|
if not self._provider_tree.has_inventory(rp_uuid):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
curr = self._refresh_and_get_inventory(rp_uuid)
|
curr = self._refresh_and_get_inventory(context, rp_uuid)
|
||||||
|
|
||||||
# Check to see if we need to update placement's view
|
# Check to see if we need to update placement's view
|
||||||
if not curr.get('inventories', {}):
|
if not curr.get('inventories', {}):
|
||||||
|
@ -1052,7 +1065,7 @@ class SchedulerReportClient(object):
|
||||||
parent_provider_uuid=parent_provider_uuid)
|
parent_provider_uuid=parent_provider_uuid)
|
||||||
# Ensure inventories are up to date (for *all* cached RPs)
|
# Ensure inventories are up to date (for *all* cached RPs)
|
||||||
for uuid in self._provider_tree.get_provider_uuids():
|
for uuid in self._provider_tree.get_provider_uuids():
|
||||||
self._refresh_and_get_inventory(uuid)
|
self._refresh_and_get_inventory(context, uuid)
|
||||||
# Return a *copy* of the tree.
|
# Return a *copy* of the tree.
|
||||||
return copy.deepcopy(self._provider_tree)
|
return copy.deepcopy(self._provider_tree)
|
||||||
|
|
||||||
|
@ -1113,7 +1126,8 @@ class SchedulerReportClient(object):
|
||||||
# service knows. If the caller tries to ensure a nonexistent
|
# service knows. If the caller tries to ensure a nonexistent
|
||||||
# "standard" trait, they deserve the TraitCreationFailed exception
|
# "standard" trait, they deserve the TraitCreationFailed exception
|
||||||
# they'll get.
|
# they'll get.
|
||||||
resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6')
|
resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6',
|
||||||
|
global_request_id=context.global_id)
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
traits_to_create = set(traits) - set(resp.json()['traits'])
|
traits_to_create = set(traits) - set(resp.json()['traits'])
|
||||||
# Might be neat to have a batch create. But creating multiple
|
# Might be neat to have a batch create. But creating multiple
|
||||||
|
@ -1363,19 +1377,20 @@ class SchedulerReportClient(object):
|
||||||
self._delete_inventory(context, compute_node.uuid)
|
self._delete_inventory(context, compute_node.uuid)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def get_allocations_for_consumer(self, consumer):
|
def get_allocations_for_consumer(self, context, consumer):
|
||||||
url = '/allocations/%s' % consumer
|
url = '/allocations/%s' % consumer
|
||||||
resp = self.get(url)
|
resp = self.get(url, global_request_id=context.global_id)
|
||||||
if not resp:
|
if not resp:
|
||||||
return {}
|
return {}
|
||||||
else:
|
else:
|
||||||
return resp.json()['allocations']
|
return resp.json()['allocations']
|
||||||
|
|
||||||
def get_allocations_for_consumer_by_provider(self, rp_uuid, consumer):
|
def get_allocations_for_consumer_by_provider(self, context, rp_uuid,
|
||||||
|
consumer):
|
||||||
# NOTE(cdent): This trims to just the allocations being
|
# NOTE(cdent): This trims to just the allocations being
|
||||||
# used on this resource provider. In the future when there
|
# used on this resource provider. In the future when there
|
||||||
# are shared resources there might be other providers.
|
# are shared resources there might be other providers.
|
||||||
allocations = self.get_allocations_for_consumer(consumer)
|
allocations = self.get_allocations_for_consumer(context, consumer)
|
||||||
if allocations is None:
|
if allocations is None:
|
||||||
# safe_connect can return None on 404
|
# safe_connect can return None on 404
|
||||||
allocations = {}
|
allocations = {}
|
||||||
|
@ -1385,7 +1400,7 @@ class SchedulerReportClient(object):
|
||||||
def _allocate_for_instance(self, context, rp_uuid, instance):
|
def _allocate_for_instance(self, context, rp_uuid, instance):
|
||||||
my_allocations = _instance_to_allocations_dict(instance)
|
my_allocations = _instance_to_allocations_dict(instance)
|
||||||
current_allocations = self.get_allocations_for_consumer_by_provider(
|
current_allocations = self.get_allocations_for_consumer_by_provider(
|
||||||
rp_uuid, instance.uuid)
|
context, rp_uuid, instance.uuid)
|
||||||
if current_allocations == my_allocations:
|
if current_allocations == my_allocations:
|
||||||
allocstr = ','.join(['%s=%s' % (k, v)
|
allocstr = ','.join(['%s=%s' % (k, v)
|
||||||
for k, v in my_allocations.items()])
|
for k, v in my_allocations.items()])
|
||||||
|
@ -1470,7 +1485,7 @@ class SchedulerReportClient(object):
|
||||||
# We first need to determine if this is a move operation and if so
|
# We first need to determine if this is a move operation and if so
|
||||||
# create the "doubled-up" allocation that exists for the duration of
|
# create the "doubled-up" allocation that exists for the duration of
|
||||||
# the move operation against both the source and destination hosts
|
# the move operation against both the source and destination hosts
|
||||||
r = self.get(url)
|
r = self.get(url, global_request_id=context.global_id)
|
||||||
if r.status_code == 200:
|
if r.status_code == 200:
|
||||||
current_allocs = r.json()['allocations']
|
current_allocs = r.json()['allocations']
|
||||||
if current_allocs:
|
if current_allocs:
|
||||||
|
@ -1526,7 +1541,7 @@ class SchedulerReportClient(object):
|
||||||
url = '/allocations/%s' % consumer_uuid
|
url = '/allocations/%s' % consumer_uuid
|
||||||
|
|
||||||
# Grab the "doubled-up" allocation that we will manipulate
|
# Grab the "doubled-up" allocation that we will manipulate
|
||||||
r = self.get(url)
|
r = self.get(url, global_request_id=context.global_id)
|
||||||
if r.status_code != 200:
|
if r.status_code != 200:
|
||||||
LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s",
|
LOG.warning("Failed to retrieve allocations for %s. Got HTTP %s",
|
||||||
consumer_uuid, r.status_code)
|
consumer_uuid, r.status_code)
|
||||||
|
@ -1750,9 +1765,9 @@ class SchedulerReportClient(object):
|
||||||
self.delete_allocation_for_instance(context, instance.uuid)
|
self.delete_allocation_for_instance(context, instance.uuid)
|
||||||
|
|
||||||
@safe_connect
|
@safe_connect
|
||||||
def get_allocations_for_resource_provider(self, rp_uuid):
|
def get_allocations_for_resource_provider(self, context, rp_uuid):
|
||||||
url = '/resource_providers/%s/allocations' % rp_uuid
|
url = '/resource_providers/%s/allocations' % rp_uuid
|
||||||
resp = self.get(url)
|
resp = self.get(url, global_request_id=context.global_id)
|
||||||
if not resp:
|
if not resp:
|
||||||
return {}
|
return {}
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -119,7 +119,8 @@ class SchedulerManager(manager.Manager):
|
||||||
alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
|
alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \
|
||||||
= None, None, None
|
= None, None, None
|
||||||
if self.driver.USES_ALLOCATION_CANDIDATES:
|
if self.driver.USES_ALLOCATION_CANDIDATES:
|
||||||
res = self.placement_client.get_allocation_candidates(resources)
|
res = self.placement_client.get_allocation_candidates(ctxt,
|
||||||
|
resources)
|
||||||
if res is None:
|
if res is None:
|
||||||
# We have to handle the case that we failed to connect to the
|
# We have to handle the case that we failed to connect to the
|
||||||
# Placement service and the safe_connect decorator on
|
# Placement service and the safe_connect decorator on
|
||||||
|
|
|
@ -366,7 +366,7 @@ def claim_resources_on_destination(
|
||||||
if not source_node_allocations:
|
if not source_node_allocations:
|
||||||
source_node_allocations = (
|
source_node_allocations = (
|
||||||
reportclient.get_allocations_for_consumer_by_provider(
|
reportclient.get_allocations_for_consumer_by_provider(
|
||||||
source_node.uuid, instance.uuid))
|
context, source_node.uuid, instance.uuid))
|
||||||
if source_node_allocations:
|
if source_node_allocations:
|
||||||
# Generate an allocation request for the destination node.
|
# Generate an allocation request for the destination node.
|
||||||
alloc_request = {
|
alloc_request = {
|
||||||
|
|
|
@ -106,9 +106,11 @@ class SchedulerReportClientTests(test.TestCase):
|
||||||
res_class = fields.ResourceClass.VCPU
|
res_class = fields.ResourceClass.VCPU
|
||||||
with self._interceptor():
|
with self._interceptor():
|
||||||
# When we start out there are no resource providers.
|
# When we start out there are no resource providers.
|
||||||
rp = self.client._get_resource_provider(self.compute_uuid)
|
rp = self.client._get_resource_provider(self.context,
|
||||||
|
self.compute_uuid)
|
||||||
self.assertIsNone(rp)
|
self.assertIsNone(rp)
|
||||||
rps = self.client._get_providers_in_tree(self.compute_uuid)
|
rps = self.client._get_providers_in_tree(self.context,
|
||||||
|
self.compute_uuid)
|
||||||
self.assertEqual([], rps)
|
self.assertEqual([], rps)
|
||||||
# But get_provider_tree_and_ensure_root creates one (via
|
# But get_provider_tree_and_ensure_root creates one (via
|
||||||
# _ensure_resource_provider)
|
# _ensure_resource_provider)
|
||||||
|
@ -120,15 +122,18 @@ class SchedulerReportClientTests(test.TestCase):
|
||||||
self.client.update_compute_node(self.context, self.compute_node)
|
self.client.update_compute_node(self.context, self.compute_node)
|
||||||
|
|
||||||
# So now we have a resource provider
|
# So now we have a resource provider
|
||||||
rp = self.client._get_resource_provider(self.compute_uuid)
|
rp = self.client._get_resource_provider(self.context,
|
||||||
|
self.compute_uuid)
|
||||||
self.assertIsNotNone(rp)
|
self.assertIsNotNone(rp)
|
||||||
rps = self.client._get_providers_in_tree(self.compute_uuid)
|
rps = self.client._get_providers_in_tree(self.context,
|
||||||
|
self.compute_uuid)
|
||||||
self.assertEqual(1, len(rps))
|
self.assertEqual(1, len(rps))
|
||||||
|
|
||||||
# We should also have empty sets of aggregate and trait
|
# We should also have empty sets of aggregate and trait
|
||||||
# associations
|
# associations
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[], self.client._get_sharing_providers([uuids.agg]))
|
[], self.client._get_sharing_providers(self.context,
|
||||||
|
[uuids.agg]))
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.client._provider_tree.have_aggregates_changed(
|
self.client._provider_tree.have_aggregates_changed(
|
||||||
self.compute_uuid, []))
|
self.compute_uuid, []))
|
||||||
|
@ -312,6 +317,8 @@ class SchedulerReportClientTests(test.TestCase):
|
||||||
self.client.put('/resource_providers/%s' % self.compute_uuid,
|
self.client.put('/resource_providers/%s' % self.compute_uuid,
|
||||||
payload,
|
payload,
|
||||||
global_request_id=global_request_id)
|
global_request_id=global_request_id)
|
||||||
|
self.client.get('/resource_providers/%s' % self.compute_uuid,
|
||||||
|
global_request_id=global_request_id)
|
||||||
|
|
||||||
def test_get_provider_tree_with_nested_and_aggregates(self):
|
def test_get_provider_tree_with_nested_and_aggregates(self):
|
||||||
"""A more in-depth test of get_provider_tree_and_ensure_root with
|
"""A more in-depth test of get_provider_tree_and_ensure_root with
|
||||||
|
|
|
@ -5642,7 +5642,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.assertIsInstance(e, exception.BuildAbortException)
|
self.assertIsInstance(e, exception.BuildAbortException)
|
||||||
|
|
||||||
self.mock_get_allocs.assert_called_once_with(self.instance.uuid)
|
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||||
|
self.instance.uuid)
|
||||||
mock_net_wait.assert_called_once_with(do_raise=False)
|
mock_net_wait.assert_called_once_with(do_raise=False)
|
||||||
|
|
||||||
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
|
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
|
||||||
|
@ -6490,7 +6491,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||||
'src')
|
'src')
|
||||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
||||||
ga.assert_called_once_with(
|
ga.assert_called_once_with(
|
||||||
mock_rt().get_node_uuid.return_value, self.migration.uuid)
|
self.context, mock_rt().get_node_uuid.return_value,
|
||||||
|
self.migration.uuid)
|
||||||
|
|
||||||
old = mock_report.remove_provider_from_instance_allocation
|
old = mock_report.remove_provider_from_instance_allocation
|
||||||
if new_rules:
|
if new_rules:
|
||||||
|
@ -6524,7 +6526,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
|
||||||
'dst')
|
'dst')
|
||||||
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
self.assertFalse(mock_report.delete_allocation_for_instance.called)
|
||||||
cn_uuid = mock_rt().get_node_uuid.return_value
|
cn_uuid = mock_rt().get_node_uuid.return_value
|
||||||
ga.assert_called_once_with(cn_uuid, self.migration.uuid)
|
ga.assert_called_once_with(self.context, cn_uuid,
|
||||||
|
self.migration.uuid)
|
||||||
|
|
||||||
old = mock_report.remove_provider_from_instance_allocation
|
old = mock_report.remove_provider_from_instance_allocation
|
||||||
if new_rules:
|
if new_rules:
|
||||||
|
|
|
@ -350,7 +350,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||||
test.MatchType(objects.ImageMeta), injected_files=[],
|
test.MatchType(objects.ImageMeta), injected_files=[],
|
||||||
admin_password=None, allocations={}, network_info=[],
|
admin_password=None, allocations={}, network_info=[],
|
||||||
block_device_info='fake_bdm')
|
block_device_info='fake_bdm')
|
||||||
self.mock_get_allocs.assert_called_once_with(instance.uuid)
|
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||||
|
instance.uuid)
|
||||||
mock_get_power_state.assert_called_once_with(self.context, instance)
|
mock_get_power_state.assert_called_once_with(self.context, instance)
|
||||||
|
|
||||||
self.assertNotIn('shelved_at', instance.system_metadata)
|
self.assertNotIn('shelved_at', instance.system_metadata)
|
||||||
|
@ -451,7 +452,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||||
test.MatchType(objects.ImageMeta),
|
test.MatchType(objects.ImageMeta),
|
||||||
injected_files=[], admin_password=None,
|
injected_files=[], admin_password=None,
|
||||||
allocations={}, network_info=[], block_device_info='fake_bdm')
|
allocations={}, network_info=[], block_device_info='fake_bdm')
|
||||||
self.mock_get_allocs.assert_called_once_with(instance.uuid)
|
self.mock_get_allocs.assert_called_once_with(self.context,
|
||||||
|
instance.uuid)
|
||||||
mock_get_power_state.assert_called_once_with(self.context, instance)
|
mock_get_power_state.assert_called_once_with(self.context, instance)
|
||||||
|
|
||||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
|
||||||
|
|
|
@ -48,12 +48,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
A missing endpoint entry should not explode.
|
A missing endpoint entry should not explode.
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.EndpointNotFound()
|
req.side_effect = ks_exc.EndpointNotFound()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# reset the call count to demonstrate that future calls still
|
# reset the call count to demonstrate that future calls still
|
||||||
# work
|
# work
|
||||||
req.reset_mock()
|
req.reset_mock()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
self.assertTrue(req.called)
|
self.assertTrue(req.called)
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||||
|
@ -65,7 +65,7 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
A missing endpoint should cause _create_client to be called.
|
A missing endpoint should cause _create_client to be called.
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.EndpointNotFound()
|
req.side_effect = ks_exc.EndpointNotFound()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# This is the second time _create_client is called, but the first since
|
# This is the second time _create_client is called, but the first since
|
||||||
# the mock was created.
|
# the mock was created.
|
||||||
|
@ -79,12 +79,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.MissingAuthPlugin()
|
req.side_effect = ks_exc.MissingAuthPlugin()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# reset the call count to demonstrate that future calls still
|
# reset the call count to demonstrate that future calls still
|
||||||
# work
|
# work
|
||||||
req.reset_mock()
|
req.reset_mock()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
self.assertTrue(req.called)
|
self.assertTrue(req.called)
|
||||||
|
|
||||||
@mock.patch('keystoneauth1.session.Session.request')
|
@mock.patch('keystoneauth1.session.Session.request')
|
||||||
|
@ -95,12 +95,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.Unauthorized()
|
req.side_effect = ks_exc.Unauthorized()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# reset the call count to demonstrate that future calls still
|
# reset the call count to demonstrate that future calls still
|
||||||
# work
|
# work
|
||||||
req.reset_mock()
|
req.reset_mock()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
self.assertTrue(req.called)
|
self.assertTrue(req.called)
|
||||||
|
|
||||||
@mock.patch('keystoneauth1.session.Session.request')
|
@mock.patch('keystoneauth1.session.Session.request')
|
||||||
|
@ -112,12 +112,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.ConnectFailure()
|
req.side_effect = ks_exc.ConnectFailure()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# reset the call count to demonstrate that future calls do
|
# reset the call count to demonstrate that future calls do
|
||||||
# work
|
# work
|
||||||
req.reset_mock()
|
req.reset_mock()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
self.assertTrue(req.called)
|
self.assertTrue(req.called)
|
||||||
|
|
||||||
@mock.patch.object(report, 'LOG')
|
@mock.patch.object(report, 'LOG')
|
||||||
|
@ -138,12 +138,12 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||||
Failed discovery should not blow up.
|
Failed discovery should not blow up.
|
||||||
"""
|
"""
|
||||||
req.side_effect = ks_exc.DiscoveryFailure()
|
req.side_effect = ks_exc.DiscoveryFailure()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
|
|
||||||
# reset the call count to demonstrate that future calls still
|
# reset the call count to demonstrate that future calls still
|
||||||
# work
|
# work
|
||||||
req.reset_mock()
|
req.reset_mock()
|
||||||
self.client._get_resource_provider("fake")
|
self.client._get_resource_provider(self.context, "fake")
|
||||||
self.assertTrue(req.called)
|
self.assertTrue(req.called)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1245,12 +1245,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
self.client._ensure_resource_provider(self.context, cn.uuid)
|
self.client._ensure_resource_provider(self.context, cn.uuid)
|
||||||
get_shr_mock.assert_called_once_with(set([uuids.agg1, uuids.agg2]))
|
get_shr_mock.assert_called_once_with(
|
||||||
|
self.context, set([uuids.agg1, uuids.agg2]))
|
||||||
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
|
self.assertTrue(self.client._provider_tree.exists(uuids.shr1))
|
||||||
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
|
self.assertTrue(self.client._provider_tree.exists(uuids.shr2))
|
||||||
# _get_provider_aggregates and _traits were called thrice: one for the
|
# _get_provider_aggregates and _traits were called thrice: one for the
|
||||||
# compute RP and once for each of the sharing RPs.
|
# compute RP and once for each of the sharing RPs.
|
||||||
expected_calls = [mock.call(uuid)
|
expected_calls = [mock.call(self.context, uuid)
|
||||||
for uuid in (cn.uuid, uuids.shr1, uuids.shr2)]
|
for uuid in (cn.uuid, uuids.shr1, uuids.shr2)]
|
||||||
get_agg_mock.assert_has_calls(expected_calls)
|
get_agg_mock.assert_has_calls(expected_calls)
|
||||||
get_trait_mock.assert_has_calls(expected_calls)
|
get_trait_mock.assert_has_calls(expected_calls)
|
||||||
|
@ -1303,23 +1304,24 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
self.client._ensure_resource_provider(self.context, uuids.compute_node)
|
self.client._ensure_resource_provider(self.context, uuids.compute_node)
|
||||||
|
|
||||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||||
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
||||||
get_agg_mock.assert_called_once_with(uuids.compute_node)
|
get_agg_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
||||||
[uuids.agg1]))
|
[uuids.agg1]))
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
self.client._provider_tree.in_aggregates(uuids.compute_node,
|
||||||
[uuids.agg2]))
|
[uuids.agg2]))
|
||||||
get_trait_mock.assert_called_once_with(uuids.compute_node)
|
get_trait_mock.assert_called_once_with(self.context,
|
||||||
|
uuids.compute_node)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.client._provider_tree.has_traits(uuids.compute_node,
|
self.client._provider_tree.has_traits(uuids.compute_node,
|
||||||
['CUSTOM_GOLD']))
|
['CUSTOM_GOLD']))
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
self.client._provider_tree.has_traits(uuids.compute_node,
|
self.client._provider_tree.has_traits(uuids.compute_node,
|
||||||
['CUSTOM_SILVER']))
|
['CUSTOM_SILVER']))
|
||||||
get_shr_mock.assert_called_once_with(set([uuids.agg1]))
|
get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1]))
|
||||||
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
|
||||||
self.assertFalse(create_rp_mock.called)
|
self.assertFalse(create_rp_mock.called)
|
||||||
|
|
||||||
|
@ -1343,7 +1345,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.client._ensure_resource_provider, self.context,
|
self.client._ensure_resource_provider, self.context,
|
||||||
uuids.compute_node)
|
uuids.compute_node)
|
||||||
|
|
||||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||||
create_rp_mock.assert_called_once_with(
|
create_rp_mock.assert_called_once_with(
|
||||||
self.context, uuids.compute_node, uuids.compute_node,
|
self.context, uuids.compute_node, uuids.compute_node,
|
||||||
parent_provider_uuid=None)
|
parent_provider_uuid=None)
|
||||||
|
@ -1383,7 +1385,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
# We don't refresh for a just-created provider
|
# We don't refresh for a just-created provider
|
||||||
refresh_mock.assert_not_called()
|
refresh_mock.assert_not_called()
|
||||||
get_rpt_mock.assert_called_once_with(uuids.compute_node)
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
|
||||||
create_rp_mock.assert_called_once_with(
|
create_rp_mock.assert_called_once_with(
|
||||||
self.context,
|
self.context,
|
||||||
uuids.compute_node,
|
uuids.compute_node,
|
||||||
|
@ -1483,7 +1485,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
mock_exists.assert_called_once_with(uuids.root)
|
mock_exists.assert_called_once_with(uuids.root)
|
||||||
mock_gpu.assert_called_once_with(uuids.root)
|
mock_gpu.assert_called_once_with(uuids.root)
|
||||||
mock_refresh.assert_has_calls(
|
mock_refresh.assert_has_calls(
|
||||||
[mock.call(uuid, force=False) for uuid in tree_uuids])
|
[mock.call(self.context, uuid, force=False)
|
||||||
|
for uuid in tree_uuids])
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||||
'_get_providers_in_tree')
|
'_get_providers_in_tree')
|
||||||
|
@ -1500,9 +1503,9 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.assertEqual(uuids.root,
|
self.assertEqual(uuids.root,
|
||||||
self.client._ensure_resource_provider(self.context,
|
self.client._ensure_resource_provider(self.context,
|
||||||
uuids.root))
|
uuids.root))
|
||||||
mock_gpit.assert_called_once_with(uuids.root)
|
mock_gpit.assert_called_once_with(self.context, uuids.root)
|
||||||
mock_refresh.assert_has_calls(
|
mock_refresh.assert_has_calls(
|
||||||
[mock.call(uuid, generation=42, force=True)
|
[mock.call(self.context, uuid, generation=42, force=True)
|
||||||
for uuid in tree_uuids])
|
for uuid in tree_uuids])
|
||||||
self.assertEqual(tree_uuids,
|
self.assertEqual(tree_uuids,
|
||||||
set(self.client._provider_tree.get_provider_uuids()))
|
set(self.client._provider_tree.get_provider_uuids()))
|
||||||
|
@ -1522,7 +1525,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.assertEqual(uuids.root,
|
self.assertEqual(uuids.root,
|
||||||
self.client._ensure_resource_provider(self.context,
|
self.client._ensure_resource_provider(self.context,
|
||||||
uuids.root))
|
uuids.root))
|
||||||
mock_gpit.assert_called_once_with(uuids.root)
|
mock_gpit.assert_called_once_with(self.context, uuids.root)
|
||||||
mock_create.assert_called_once_with(self.context, uuids.root,
|
mock_create.assert_called_once_with(self.context, uuids.root,
|
||||||
uuids.root,
|
uuids.root,
|
||||||
parent_provider_uuid=None)
|
parent_provider_uuid=None)
|
||||||
|
@ -1552,10 +1555,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
alloc_reqs, p_sums, allocation_request_version = \
|
alloc_reqs, p_sums, allocation_request_version = \
|
||||||
self.client.get_allocation_candidates(resources)
|
self.client.get_allocation_candidates(self.context, resources)
|
||||||
|
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
mock.ANY, raise_exc=False, microversion='1.17')
|
mock.ANY, raise_exc=False, microversion='1.17',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
url = self.ks_adap_mock.get.call_args[0][0]
|
url = self.ks_adap_mock.get.call_args[0][0]
|
||||||
split_url = parse.urlsplit(url)
|
split_url = parse.urlsplit(url)
|
||||||
query = parse.parse_qs(split_url.query)
|
query = parse.parse_qs(split_url.query)
|
||||||
|
@ -1583,10 +1587,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
alloc_reqs, p_sums, allocation_request_version = \
|
alloc_reqs, p_sums, allocation_request_version = \
|
||||||
self.client.get_allocation_candidates(resources)
|
self.client.get_allocation_candidates(self.context, resources)
|
||||||
|
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
mock.ANY, raise_exc=False, microversion='1.17')
|
mock.ANY, raise_exc=False, microversion='1.17',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
url = self.ks_adap_mock.get.call_args[0][0]
|
url = self.ks_adap_mock.get.call_args[0][0]
|
||||||
split_url = parse.urlsplit(url)
|
split_url = parse.urlsplit(url)
|
||||||
query = parse.parse_qs(split_url.query)
|
query = parse.parse_qs(split_url.query)
|
||||||
|
@ -1609,10 +1614,11 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
resources = scheduler_utils.ResourceRequest.from_extra_specs(
|
resources = scheduler_utils.ResourceRequest.from_extra_specs(
|
||||||
{'resources:MEMORY_MB': '1024'})
|
{'resources:MEMORY_MB': '1024'})
|
||||||
|
|
||||||
res = self.client.get_allocation_candidates(resources)
|
res = self.client.get_allocation_candidates(self.context, resources)
|
||||||
|
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
mock.ANY, raise_exc=False, microversion='1.17')
|
mock.ANY, raise_exc=False, microversion='1.17',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
url = self.ks_adap_mock.get.call_args[0][0]
|
url = self.ks_adap_mock.get.call_args[0][0]
|
||||||
split_url = parse.urlsplit(url)
|
split_url = parse.urlsplit(url)
|
||||||
query = parse.parse_qs(split_url.query)
|
query = parse.parse_qs(split_url.query)
|
||||||
|
@ -1634,7 +1640,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
resp_mock.json.return_value = json_data
|
resp_mock.json.return_value = json_data
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
result = self.client._get_resource_provider(uuid)
|
result = self.client._get_resource_provider(self.context, uuid)
|
||||||
|
|
||||||
expected_provider_dict = dict(
|
expected_provider_dict = dict(
|
||||||
uuid=uuid,
|
uuid=uuid,
|
||||||
|
@ -1644,7 +1650,8 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
)
|
)
|
||||||
expected_url = '/resource_providers/' + uuid
|
expected_url = '/resource_providers/' + uuid
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.14')
|
expected_url, raise_exc=False, microversion='1.14',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertEqual(expected_provider_dict, result)
|
self.assertEqual(expected_provider_dict, result)
|
||||||
|
|
||||||
def test_get_resource_provider_not_found(self):
|
def test_get_resource_provider_not_found(self):
|
||||||
|
@ -1654,11 +1661,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
uuid = uuids.compute_node
|
uuid = uuids.compute_node
|
||||||
result = self.client._get_resource_provider(uuid)
|
result = self.client._get_resource_provider(self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid
|
expected_url = '/resource_providers/' + uuid
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.14')
|
expected_url, raise_exc=False, microversion='1.14',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertIsNone(result)
|
self.assertIsNone(result)
|
||||||
|
|
||||||
@mock.patch.object(report.LOG, 'error')
|
@mock.patch.object(report.LOG, 'error')
|
||||||
|
@ -1674,11 +1682,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
uuid = uuids.compute_node
|
uuid = uuids.compute_node
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ResourceProviderRetrievalFailed,
|
exception.ResourceProviderRetrievalFailed,
|
||||||
self.client._get_resource_provider, uuid)
|
self.client._get_resource_provider, self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid
|
expected_url = '/resource_providers/' + uuid
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.14')
|
expected_url, raise_exc=False, microversion='1.14',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
# A 503 Service Unavailable should trigger an error log that
|
# A 503 Service Unavailable should trigger an error log that
|
||||||
# includes the placement request id and return None
|
# includes the placement request id and return None
|
||||||
# from _get_resource_provider()
|
# from _get_resource_provider()
|
||||||
|
@ -1718,17 +1727,19 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']),
|
set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']),
|
||||||
set(['CUSTOM_BAR']),
|
set(['CUSTOM_BAR']),
|
||||||
]
|
]
|
||||||
result = self.client._get_sharing_providers([uuids.agg1, uuids.agg2])
|
result = self.client._get_sharing_providers(
|
||||||
|
self.context, [uuids.agg1, uuids.agg2])
|
||||||
|
|
||||||
expected_url = ('/resource_providers?member_of=in:' +
|
expected_url = ('/resource_providers?member_of=in:' +
|
||||||
','.join((uuids.agg1, uuids.agg2)))
|
','.join((uuids.agg1, uuids.agg2)))
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.3')
|
expected_url, raise_exc=False, microversion='1.3',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertEqual(rpjson[:1], result)
|
self.assertEqual(rpjson[:1], result)
|
||||||
|
|
||||||
def test_get_sharing_providers_emptylist(self):
|
def test_get_sharing_providers_emptylist(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[], self.client._get_sharing_providers([]))
|
[], self.client._get_sharing_providers(self.context, []))
|
||||||
self.ks_adap_mock.get.assert_not_called()
|
self.ks_adap_mock.get.assert_not_called()
|
||||||
|
|
||||||
@mock.patch.object(report.LOG, 'error')
|
@mock.patch.object(report.LOG, 'error')
|
||||||
|
@ -1742,11 +1753,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
uuid = uuids.agg
|
uuid = uuids.agg
|
||||||
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
||||||
self.client._get_sharing_providers, [uuid])
|
self.client._get_sharing_providers,
|
||||||
|
self.context, [uuid])
|
||||||
|
|
||||||
expected_url = '/resource_providers?member_of=in:' + uuid
|
expected_url = '/resource_providers?member_of=in:' + uuid
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.3')
|
expected_url, raise_exc=False, microversion='1.3',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
# A 503 Service Unavailable should trigger an error log that
|
# A 503 Service Unavailable should trigger an error log that
|
||||||
# includes the placement request id
|
# includes the placement request id
|
||||||
self.assertTrue(logging_mock.called)
|
self.assertTrue(logging_mock.called)
|
||||||
|
@ -1776,11 +1789,12 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
resp_mock.json.return_value = {'resource_providers': rpjson}
|
resp_mock.json.return_value = {'resource_providers': rpjson}
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
result = self.client._get_providers_in_tree(root)
|
result = self.client._get_providers_in_tree(self.context, root)
|
||||||
|
|
||||||
expected_url = '/resource_providers?in_tree=' + root
|
expected_url = '/resource_providers?in_tree=' + root
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.14')
|
expected_url, raise_exc=False, microversion='1.14',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertEqual(rpjson, result)
|
self.assertEqual(rpjson, result)
|
||||||
|
|
||||||
@mock.patch.object(report.LOG, 'error')
|
@mock.patch.object(report.LOG, 'error')
|
||||||
|
@ -1794,11 +1808,13 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
uuid = uuids.compute_node
|
uuid = uuids.compute_node
|
||||||
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
self.assertRaises(exception.ResourceProviderRetrievalFailed,
|
||||||
self.client._get_providers_in_tree, uuid)
|
self.client._get_providers_in_tree, self.context,
|
||||||
|
uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers?in_tree=' + uuid
|
expected_url = '/resource_providers?in_tree=' + uuid
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.14')
|
expected_url, raise_exc=False, microversion='1.14',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
# A 503 Service Unavailable should trigger an error log that includes
|
# A 503 Service Unavailable should trigger an error log that includes
|
||||||
# the placement request id
|
# the placement request id
|
||||||
self.assertTrue(logging_mock.called)
|
self.assertTrue(logging_mock.called)
|
||||||
|
@ -2031,11 +2047,12 @@ class TestAggregates(SchedulerReportClientTestCase):
|
||||||
resp_mock.json.return_value = {'aggregates': aggs}
|
resp_mock.json.return_value = {'aggregates': aggs}
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
result = self.client._get_provider_aggregates(uuid)
|
result = self.client._get_provider_aggregates(self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.1')
|
expected_url, raise_exc=False, microversion='1.1',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertEqual(set(aggs), result)
|
self.assertEqual(set(aggs), result)
|
||||||
|
|
||||||
@mock.patch.object(report.LOG, 'error')
|
@mock.patch.object(report.LOG, 'error')
|
||||||
|
@ -2052,11 +2069,12 @@ class TestAggregates(SchedulerReportClientTestCase):
|
||||||
resp_mock.status_code = status_code
|
resp_mock.status_code = status_code
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ResourceProviderAggregateRetrievalFailed,
|
exception.ResourceProviderAggregateRetrievalFailed,
|
||||||
self.client._get_provider_aggregates, uuid)
|
self.client._get_provider_aggregates, self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
expected_url = '/resource_providers/' + uuid + '/aggregates'
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, raise_exc=False, microversion='1.1')
|
expected_url, raise_exc=False, microversion='1.1',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||||
self.assertTrue(log_mock.called)
|
self.assertTrue(log_mock.called)
|
||||||
self.assertEqual(uuids.request_id,
|
self.assertEqual(uuids.request_id,
|
||||||
log_mock.call_args[0][1]['placement_req_id'])
|
log_mock.call_args[0][1]['placement_req_id'])
|
||||||
|
@ -2077,11 +2095,13 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
resp_mock.json.return_value = {'traits': traits}
|
resp_mock.json.return_value = {'traits': traits}
|
||||||
self.ks_adap_mock.get.return_value = resp_mock
|
self.ks_adap_mock.get.return_value = resp_mock
|
||||||
|
|
||||||
result = self.client._get_provider_traits(uuid)
|
result = self.client._get_provider_traits(self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid + '/traits'
|
expected_url = '/resource_providers/' + uuid + '/traits'
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, **self.trait_api_kwargs)
|
expected_url,
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.assertEqual(set(traits), result)
|
self.assertEqual(set(traits), result)
|
||||||
|
|
||||||
@mock.patch.object(report.LOG, 'error')
|
@mock.patch.object(report.LOG, 'error')
|
||||||
|
@ -2098,11 +2118,13 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
resp_mock.status_code = status_code
|
resp_mock.status_code = status_code
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ResourceProviderTraitRetrievalFailed,
|
exception.ResourceProviderTraitRetrievalFailed,
|
||||||
self.client._get_provider_traits, uuid)
|
self.client._get_provider_traits, self.context, uuid)
|
||||||
|
|
||||||
expected_url = '/resource_providers/' + uuid + '/traits'
|
expected_url = '/resource_providers/' + uuid + '/traits'
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
expected_url, **self.trait_api_kwargs)
|
expected_url,
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.assertTrue(log_mock.called)
|
self.assertTrue(log_mock.called)
|
||||||
self.assertEqual(uuids.request_id,
|
self.assertEqual(uuids.request_id,
|
||||||
log_mock.call_args[0][1]['placement_req_id'])
|
log_mock.call_args[0][1]['placement_req_id'])
|
||||||
|
@ -2124,7 +2146,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
get_mock.json.return_value = {'traits': standard_traits}
|
get_mock.json.return_value = {'traits': standard_traits}
|
||||||
self.client._ensure_traits(self.context, all_traits)
|
self.client._ensure_traits(self.context, all_traits)
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
'/traits?name=in:' + ','.join(all_traits), **self.trait_api_kwargs)
|
'/traits?name=in:' + ','.join(all_traits),
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.ks_adap_mock.put.assert_has_calls(
|
self.ks_adap_mock.put.assert_has_calls(
|
||||||
[mock.call('/traits/' + trait,
|
[mock.call('/traits/' + trait,
|
||||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
@ -2138,6 +2162,7 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
self.client._ensure_traits(self.context, standard_traits)
|
self.client._ensure_traits(self.context, standard_traits)
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
'/traits?name=in:' + ','.join(standard_traits),
|
'/traits?name=in:' + ','.join(standard_traits),
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
**self.trait_api_kwargs)
|
**self.trait_api_kwargs)
|
||||||
self.ks_adap_mock.put.assert_not_called()
|
self.ks_adap_mock.put.assert_not_called()
|
||||||
|
|
||||||
|
@ -2157,7 +2182,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
self.context, ['FOO'])
|
self.context, ['FOO'])
|
||||||
|
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
'/traits?name=in:FOO', **self.trait_api_kwargs)
|
'/traits?name=in:FOO',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.ks_adap_mock.put.assert_not_called()
|
self.ks_adap_mock.put.assert_not_called()
|
||||||
|
|
||||||
def test_ensure_traits_fail_creation(self):
|
def test_ensure_traits_fail_creation(self):
|
||||||
|
@ -2173,7 +2200,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
self.context, ['FOO'])
|
self.context, ['FOO'])
|
||||||
|
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
'/traits?name=in:FOO', **self.trait_api_kwargs)
|
'/traits?name=in:FOO',
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.ks_adap_mock.put.assert_called_once_with(
|
self.ks_adap_mock.put.assert_called_once_with(
|
||||||
'/traits/FOO',
|
'/traits/FOO',
|
||||||
headers={'X-Openstack-Request-Id': self.context.global_id},
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
@ -2201,7 +2230,9 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
# Verify API calls
|
# Verify API calls
|
||||||
self.ks_adap_mock.get.assert_called_once_with(
|
self.ks_adap_mock.get.assert_called_once_with(
|
||||||
'/traits?name=in:' + ','.join(traits), **self.trait_api_kwargs)
|
'/traits?name=in:' + ','.join(traits),
|
||||||
|
headers={'X-Openstack-Request-Id': self.context.global_id},
|
||||||
|
**self.trait_api_kwargs)
|
||||||
self.ks_adap_mock.put.assert_called_once_with(
|
self.ks_adap_mock.put.assert_called_once_with(
|
||||||
'/resource_providers/%s/traits' % uuids.rp,
|
'/resource_providers/%s/traits' % uuids.rp,
|
||||||
json={'traits': traits, 'resource_provider_generation': 0},
|
json={'traits': traits, 'resource_provider_generation': 0},
|
||||||
|
@ -2264,10 +2295,11 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||||
self.client._provider_tree.new_root('compute', uuid, 1)
|
self.client._provider_tree.new_root('compute', uuid, 1)
|
||||||
mock_agg_get.return_value = set([uuids.agg1])
|
mock_agg_get.return_value = set([uuids.agg1])
|
||||||
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
||||||
self.client._refresh_associations(uuid)
|
self.client._refresh_associations(self.context, uuid)
|
||||||
mock_agg_get.assert_called_once_with(uuid)
|
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_trait_get.assert_called_once_with(uuid)
|
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_shr_get.assert_called_once_with(mock_agg_get.return_value)
|
mock_shr_get.assert_called_once_with(
|
||||||
|
self.context, mock_agg_get.return_value)
|
||||||
self.assertIn(uuid, self.client.association_refresh_time)
|
self.assertIn(uuid, self.client.association_refresh_time)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
|
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
|
||||||
|
@ -2293,9 +2325,10 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||||
self.client._provider_tree.new_root('compute', uuid, 1)
|
self.client._provider_tree.new_root('compute', uuid, 1)
|
||||||
mock_agg_get.return_value = set([uuids.agg1])
|
mock_agg_get.return_value = set([uuids.agg1])
|
||||||
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
mock_trait_get.return_value = set(['CUSTOM_GOLD'])
|
||||||
self.client._refresh_associations(uuid, refresh_sharing=False)
|
self.client._refresh_associations(self.context, uuid,
|
||||||
mock_agg_get.assert_called_once_with(uuid)
|
refresh_sharing=False)
|
||||||
mock_trait_get.assert_called_once_with(uuid)
|
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||||
|
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_shr_get.assert_not_called()
|
mock_shr_get.assert_not_called()
|
||||||
self.assertIn(uuid, self.client.association_refresh_time)
|
self.assertIn(uuid, self.client.association_refresh_time)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
|
@ -2322,7 +2355,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||||
"""
|
"""
|
||||||
mock_stale.return_value = False
|
mock_stale.return_value = False
|
||||||
uuid = uuids.compute_node
|
uuid = uuids.compute_node
|
||||||
self.client._refresh_associations(uuid)
|
self.client._refresh_associations(self.context, uuid)
|
||||||
mock_agg_get.assert_not_called()
|
mock_agg_get.assert_not_called()
|
||||||
mock_trait_get.assert_not_called()
|
mock_trait_get.assert_not_called()
|
||||||
mock_shr_get.assert_not_called()
|
mock_shr_get.assert_not_called()
|
||||||
|
@ -2347,10 +2380,10 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||||
|
|
||||||
# Called a first time because association_refresh_time is empty.
|
# Called a first time because association_refresh_time is empty.
|
||||||
now = time.time()
|
now = time.time()
|
||||||
self.client._refresh_associations(uuid)
|
self.client._refresh_associations(self.context, uuid)
|
||||||
mock_agg_get.assert_called_once_with(uuid)
|
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_trait_get.assert_called_once_with(uuid)
|
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_shr_get.assert_called_once_with(set())
|
mock_shr_get.assert_called_once_with(self.context, set())
|
||||||
log_mock.assert_has_calls([
|
log_mock.assert_has_calls([
|
||||||
mock.call('Refreshing aggregate associations for resource '
|
mock.call('Refreshing aggregate associations for resource '
|
||||||
'provider %s, aggregates: %s', uuid, 'None'),
|
'provider %s, aggregates: %s', uuid, 'None'),
|
||||||
|
@ -2367,17 +2400,17 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||||
with mock.patch('time.time') as mock_future:
|
with mock.patch('time.time') as mock_future:
|
||||||
# Not called a second time because not enough time has passed.
|
# Not called a second time because not enough time has passed.
|
||||||
mock_future.return_value = now + report.ASSOCIATION_REFRESH / 2
|
mock_future.return_value = now + report.ASSOCIATION_REFRESH / 2
|
||||||
self.client._refresh_associations(uuid)
|
self.client._refresh_associations(self.context, uuid)
|
||||||
mock_agg_get.assert_not_called()
|
mock_agg_get.assert_not_called()
|
||||||
mock_trait_get.assert_not_called()
|
mock_trait_get.assert_not_called()
|
||||||
mock_shr_get.assert_not_called()
|
mock_shr_get.assert_not_called()
|
||||||
|
|
||||||
# Called because time has passed.
|
# Called because time has passed.
|
||||||
mock_future.return_value = now + report.ASSOCIATION_REFRESH + 1
|
mock_future.return_value = now + report.ASSOCIATION_REFRESH + 1
|
||||||
self.client._refresh_associations(uuid)
|
self.client._refresh_associations(self.context, uuid)
|
||||||
mock_agg_get.assert_called_once_with(uuid)
|
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_trait_get.assert_called_once_with(uuid)
|
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||||
mock_shr_get.assert_called_once_with(set())
|
mock_shr_get.assert_called_once_with(self.context, set())
|
||||||
|
|
||||||
|
|
||||||
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||||
|
@ -2836,7 +2869,8 @@ There was a conflict when trying to complete your request.
|
||||||
self.assertTrue(result)
|
self.assertTrue(result)
|
||||||
|
|
||||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||||
mock_get.assert_called_once_with(exp_url)
|
mock_get.assert_called_once_with(
|
||||||
|
exp_url, global_request_id=self.context.global_id)
|
||||||
# Updated with the new inventory from the PUT call
|
# Updated with the new inventory from the PUT call
|
||||||
self._validate_provider(uuid, generation=44)
|
self._validate_provider(uuid, generation=44)
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -2913,7 +2947,8 @@ There was a conflict when trying to complete your request.
|
||||||
self.assertTrue(result)
|
self.assertTrue(result)
|
||||||
|
|
||||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||||
mock_get.assert_called_once_with(exp_url)
|
mock_get.assert_called_once_with(
|
||||||
|
exp_url, global_request_id=self.context.global_id)
|
||||||
# Updated with the new inventory from the PUT call
|
# Updated with the new inventory from the PUT call
|
||||||
self._validate_provider(uuid, generation=44)
|
self._validate_provider(uuid, generation=44)
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -2999,7 +3034,8 @@ There was a conflict when trying to complete your request.
|
||||||
)
|
)
|
||||||
self.assertTrue(result)
|
self.assertTrue(result)
|
||||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||||
mock_get.assert_called_once_with(exp_url)
|
mock_get.assert_called_once_with(
|
||||||
|
exp_url, global_request_id=self.context.global_id)
|
||||||
# No update so put should not be called
|
# No update so put should not be called
|
||||||
self.assertFalse(mock_put.called)
|
self.assertFalse(mock_put.called)
|
||||||
# Make sure we updated the generation from the inventory records
|
# Make sure we updated the generation from the inventory records
|
||||||
|
@ -3567,7 +3603,8 @@ class TestAllocations(SchedulerReportClientTestCase):
|
||||||
self.client.update_instance_allocation(self.context, cn, inst, 1)
|
self.client.update_instance_allocation(self.context, cn, inst, 1)
|
||||||
self.assertFalse(mock_put.called)
|
self.assertFalse(mock_put.called)
|
||||||
mock_get.assert_called_once_with(
|
mock_get.assert_called_once_with(
|
||||||
'/allocations/%s' % inst.uuid)
|
'/allocations/%s' % inst.uuid,
|
||||||
|
global_request_id=self.context.global_id)
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||||
'get')
|
'get')
|
||||||
|
|
|
@ -119,12 +119,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||||
}
|
}
|
||||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||||
) as select_destinations:
|
) as select_destinations:
|
||||||
self.manager.select_destinations(None, spec_obj=fake_spec,
|
self.manager.select_destinations(self.context, spec_obj=fake_spec,
|
||||||
instance_uuids=[fake_spec.instance_uuid])
|
instance_uuids=[fake_spec.instance_uuid])
|
||||||
select_destinations.assert_called_once_with(None, fake_spec,
|
select_destinations.assert_called_once_with(
|
||||||
|
self.context, fake_spec,
|
||||||
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
||||||
mock.sentinel.p_sums, fake_version, False)
|
mock.sentinel.p_sums, fake_version, False)
|
||||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
mock_get_ac.assert_called_once_with(
|
||||||
|
self.context, mock_rfrs.return_value)
|
||||||
|
|
||||||
# Now call select_destinations() with True values for the params
|
# Now call select_destinations() with True values for the params
|
||||||
# introduced in RPC version 4.5
|
# introduced in RPC version 4.5
|
||||||
|
@ -196,10 +198,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||||
) as select_destinations:
|
) as select_destinations:
|
||||||
self.assertRaises(messaging.rpc.dispatcher.ExpectedException,
|
self.assertRaises(messaging.rpc.dispatcher.ExpectedException,
|
||||||
self.manager.select_destinations, None, spec_obj=fake_spec,
|
self.manager.select_destinations, self.context,
|
||||||
|
spec_obj=fake_spec,
|
||||||
instance_uuids=[fake_spec.instance_uuid])
|
instance_uuids=[fake_spec.instance_uuid])
|
||||||
select_destinations.assert_not_called()
|
select_destinations.assert_not_called()
|
||||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
mock_get_ac.assert_called_once_with(
|
||||||
|
self.context, mock_rfrs.return_value)
|
||||||
|
|
||||||
def test_select_destination_old_placement(self):
|
def test_select_destination_old_placement(self):
|
||||||
"""Tests that we will raise NoValidhost when the scheduler
|
"""Tests that we will raise NoValidhost when the scheduler
|
||||||
|
@ -240,11 +244,12 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||||
}
|
}
|
||||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||||
) as select_destinations:
|
) as select_destinations:
|
||||||
self.manager.select_destinations(None, spec_obj=fake_spec)
|
self.manager.select_destinations(self.context, spec_obj=fake_spec)
|
||||||
select_destinations.assert_called_once_with(None, fake_spec, None,
|
select_destinations.assert_called_once_with(self.context,
|
||||||
expected_alloc_reqs_by_rp_uuid, mock.sentinel.p_sums, "42.0",
|
fake_spec, None, expected_alloc_reqs_by_rp_uuid,
|
||||||
False)
|
mock.sentinel.p_sums, "42.0", False)
|
||||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
mock_get_ac.assert_called_once_with(
|
||||||
|
self.context, mock_rfrs.return_value)
|
||||||
|
|
||||||
# TODO(sbauza): Remove that test once the API v4 is removed
|
# TODO(sbauza): Remove that test once the API v4 is removed
|
||||||
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
|
@mock.patch('nova.scheduler.utils.resources_from_request_spec')
|
||||||
|
@ -264,13 +269,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||||
}
|
}
|
||||||
with mock.patch.object(self.manager.driver, 'select_destinations'
|
with mock.patch.object(self.manager.driver, 'select_destinations'
|
||||||
) as select_destinations:
|
) as select_destinations:
|
||||||
self.manager.select_destinations(None, request_spec='fake_spec',
|
self.manager.select_destinations(
|
||||||
filter_properties='fake_props',
|
self.context, request_spec='fake_spec',
|
||||||
instance_uuids=[fake_spec.instance_uuid])
|
filter_properties='fake_props',
|
||||||
select_destinations.assert_called_once_with(None, fake_spec,
|
instance_uuids=[fake_spec.instance_uuid])
|
||||||
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
select_destinations.assert_called_once_with(
|
||||||
mock.sentinel.p_sums, "42.0", False)
|
self.context, fake_spec,
|
||||||
mock_get_ac.assert_called_once_with(mock_rfrs.return_value)
|
[fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid,
|
||||||
|
mock.sentinel.p_sums, "42.0", False)
|
||||||
|
mock_get_ac.assert_called_once_with(
|
||||||
|
self.context, mock_rfrs.return_value)
|
||||||
|
|
||||||
def test_update_aggregates(self):
|
def test_update_aggregates(self):
|
||||||
with mock.patch.object(self.manager.driver.host_manager,
|
with mock.patch.object(self.manager.driver.host_manager,
|
||||||
|
|
|
@ -465,7 +465,7 @@ class TestUtils(test.NoDBTestCase):
|
||||||
utils.claim_resources_on_destination(
|
utils.claim_resources_on_destination(
|
||||||
self.context, reportclient, instance, source_node, dest_node)
|
self.context, reportclient, instance, source_node, dest_node)
|
||||||
mock_get_allocs.assert_called_once_with(
|
mock_get_allocs.assert_called_once_with(
|
||||||
uuids.source_node, instance.uuid)
|
self.context, uuids.source_node, instance.uuid)
|
||||||
|
|
||||||
test()
|
test()
|
||||||
|
|
||||||
|
@ -505,7 +505,7 @@ class TestUtils(test.NoDBTestCase):
|
||||||
self.context, reportclient, instance,
|
self.context, reportclient, instance,
|
||||||
source_node, dest_node)
|
source_node, dest_node)
|
||||||
mock_get_allocs.assert_called_once_with(
|
mock_get_allocs.assert_called_once_with(
|
||||||
uuids.source_node, instance.uuid)
|
self.context, uuids.source_node, instance.uuid)
|
||||||
mock_claim.assert_called_once_with(
|
mock_claim.assert_called_once_with(
|
||||||
self.context, instance.uuid, dest_alloc_request,
|
self.context, instance.uuid, dest_alloc_request,
|
||||||
instance.project_id, instance.user_id,
|
instance.project_id, instance.user_id,
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
The SchedulerReportClient
|
||||||
|
(``nova.scheduler.client.report.SchedulerReportClient``) sends requests
|
||||||
|
with the global request ID in the ``X-Openstack-Request-Id`` header
|
||||||
|
to the placement service. `Bug 1734625`_
|
||||||
|
|
||||||
|
.. _Bug 1734625: https://bugs.launchpad.net/nova/+bug/1734625
|
Loading…
Reference in New Issue