Merge "Commonize _update code path"
This commit is contained in:
commit
bcecee9ac9
|
@ -933,8 +933,8 @@ class ResourceTracker(object):
|
|||
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
|
||||
# Let the virt driver rearrange the provider tree and set/update
|
||||
# the inventory, traits, and aggregates throughout.
|
||||
allocs = None
|
||||
try:
|
||||
allocs = None
|
||||
try:
|
||||
self.driver.update_provider_tree(prov_tree, nodename)
|
||||
except exception.ReshapeNeeded:
|
||||
|
@ -950,27 +950,24 @@ class ResourceTracker(object):
|
|||
self.driver.update_provider_tree(prov_tree, nodename,
|
||||
allocations=allocs)
|
||||
|
||||
# Flush any changes. If we processed ReshapeNeeded above, allocs is
|
||||
# not None, and this will hit placement's POST /reshaper route.
|
||||
reportclient.update_from_provider_tree(context, prov_tree,
|
||||
allocations=allocs)
|
||||
except NotImplementedError:
|
||||
# update_provider_tree isn't implemented yet - try get_inventory
|
||||
try:
|
||||
inv_data = self.driver.get_inventory(nodename)
|
||||
_normalize_inventory_from_cn_obj(inv_data, compute_node)
|
||||
self.scheduler_client.set_inventory_for_provider(
|
||||
context,
|
||||
compute_node.uuid,
|
||||
compute_node.hypervisor_hostname,
|
||||
inv_data,
|
||||
)
|
||||
except NotImplementedError:
|
||||
# Eventually all virt drivers will return an inventory dict in
|
||||
# the format that the placement API expects and we'll be able
|
||||
# to remove this code branch
|
||||
self.scheduler_client.update_compute_node(context,
|
||||
compute_node)
|
||||
inv_data = compute_utils.compute_node_to_inventory_dict(
|
||||
compute_node)
|
||||
|
||||
prov_tree.update_inventory(nodename, inv_data)
|
||||
|
||||
# Flush any changes. If we processed ReshapeNeeded above, allocs is not
|
||||
# None, and this will hit placement's POST /reshaper route.
|
||||
reportclient.update_from_provider_tree(context, prov_tree,
|
||||
allocations=allocs)
|
||||
|
||||
@retrying.retry(stop_max_attempt_number=4,
|
||||
retry_on_exception=lambda e: isinstance(
|
||||
|
|
|
@ -48,6 +48,7 @@ from nova.notifications.objects import server_group as sg_notification
|
|||
from nova.notifications.objects import volume as volume_notification
|
||||
from nova import objects
|
||||
from nova.objects import fields
|
||||
from nova import rc_fields
|
||||
from nova import rpc
|
||||
from nova import safe_utils
|
||||
from nova import utils
|
||||
|
@ -1291,3 +1292,47 @@ def notify_about_instance_delete(notifier, context, instance,
|
|||
source=source,
|
||||
action=delete_type,
|
||||
phase=fields.NotificationPhase.END)
|
||||
|
||||
|
||||
def compute_node_to_inventory_dict(compute_node):
|
||||
"""Given a supplied `objects.ComputeNode` object, return a dict, keyed
|
||||
by resource class, of various inventory information.
|
||||
|
||||
:param compute_node: `objects.ComputeNode` object to translate
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
|
||||
# memory_mb and disk_gb if the Ironic node is not available/operable
|
||||
if compute_node.vcpus > 0:
|
||||
result[rc_fields.ResourceClass.VCPU] = {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
}
|
||||
if compute_node.memory_mb > 0:
|
||||
result[rc_fields.ResourceClass.MEMORY_MB] = {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
}
|
||||
if compute_node.local_gb > 0:
|
||||
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
|
||||
# or start tracking DISK_MB.
|
||||
reserved_disk_gb = convert_mb_to_ceil_gb(
|
||||
CONF.reserved_host_disk_mb)
|
||||
result[rc_fields.ResourceClass.DISK_GB] = {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': reserved_disk_gb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
}
|
||||
return result
|
||||
|
|
|
@ -35,19 +35,6 @@ class SchedulerClient(object):
|
|||
def delete_aggregate(self, context, aggregate):
|
||||
self.queryclient.delete_aggregate(context, aggregate)
|
||||
|
||||
def set_inventory_for_provider(self, context, rp_uuid, rp_name, inv_data,
|
||||
parent_provider_uuid=None):
|
||||
self.reportclient.set_inventory_for_provider(
|
||||
context,
|
||||
rp_uuid,
|
||||
rp_name,
|
||||
inv_data,
|
||||
parent_provider_uuid=parent_provider_uuid,
|
||||
)
|
||||
|
||||
def update_compute_node(self, context, compute_node):
|
||||
self.reportclient.update_compute_node(context, compute_node)
|
||||
|
||||
def update_instance_info(self, context, host_name, instance_info):
|
||||
self.queryclient.update_instance_info(context, host_name,
|
||||
instance_info)
|
||||
|
|
|
@ -30,7 +30,6 @@ from oslo_utils import versionutils
|
|||
import retrying
|
||||
|
||||
from nova.compute import provider_tree
|
||||
from nova.compute import utils as compute_utils
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
|
@ -42,9 +41,6 @@ from nova import utils
|
|||
|
||||
CONF = nova.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
VCPU = fields.ResourceClass.VCPU
|
||||
MEMORY_MB = fields.ResourceClass.MEMORY_MB
|
||||
DISK_GB = fields.ResourceClass.DISK_GB
|
||||
_RE_INV_IN_USE = re.compile("Inventory for (.+) on resource provider "
|
||||
"(.+) in use")
|
||||
WARN_EVERY = 10
|
||||
|
@ -128,50 +124,6 @@ def retries(f):
|
|||
return wrapper
|
||||
|
||||
|
||||
def _compute_node_to_inventory_dict(compute_node):
|
||||
"""Given a supplied `objects.ComputeNode` object, return a dict, keyed
|
||||
by resource class, of various inventory information.
|
||||
|
||||
:param compute_node: `objects.ComputeNode` object to translate
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
|
||||
# memory_mb and disk_gb if the Ironic node is not available/operable
|
||||
if compute_node.vcpus > 0:
|
||||
result[VCPU] = {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
}
|
||||
if compute_node.memory_mb > 0:
|
||||
result[MEMORY_MB] = {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
}
|
||||
if compute_node.local_gb > 0:
|
||||
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
|
||||
# or start tracking DISK_MB.
|
||||
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
|
||||
CONF.reserved_host_disk_mb)
|
||||
result[DISK_GB] = {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': reserved_disk_gb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
def _instance_to_allocations_dict(instance):
|
||||
"""Given an `objects.Instance` object, return a dict, keyed by resource
|
||||
class of the amount used by the instance.
|
||||
|
@ -901,143 +853,6 @@ class SchedulerReportClient(object):
|
|||
return False
|
||||
return (time.time() - refresh_time) > rpar
|
||||
|
||||
def _update_inventory_attempt(self, context, rp_uuid, inv_data):
|
||||
"""Update the inventory for this resource provider if needed.
|
||||
|
||||
:param context: The security context
|
||||
:param rp_uuid: The resource provider UUID for the operation
|
||||
:param inv_data: The new inventory for the resource provider
|
||||
:returns: True if the inventory was updated (or did not need to be),
|
||||
False otherwise.
|
||||
"""
|
||||
# TODO(jaypipes): Should we really be calling the placement API to get
|
||||
# the current inventory for every resource provider each and every time
|
||||
# update_resource_stats() is called? :(
|
||||
curr = self._refresh_and_get_inventory(context, rp_uuid)
|
||||
if curr is None:
|
||||
LOG.debug('No inventory for provider: %s', rp_uuid, inv_data)
|
||||
return False
|
||||
|
||||
cur_gen = curr['resource_provider_generation']
|
||||
|
||||
# Check to see if we need to update placement's view
|
||||
if not self._provider_tree.has_inventory_changed(rp_uuid, inv_data):
|
||||
LOG.debug('Inventory has not changed for provider %s based '
|
||||
'on inventory data: %s', rp_uuid, inv_data)
|
||||
return True
|
||||
|
||||
payload = {
|
||||
'resource_provider_generation': cur_gen,
|
||||
'inventories': inv_data,
|
||||
}
|
||||
url = '/resource_providers/%s/inventories' % rp_uuid
|
||||
# NOTE(vdrok): in microversion 1.26 it is allowed to have inventory
|
||||
# records with reserved value equal to total
|
||||
version = ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION
|
||||
result = self.put(url, payload, version=version,
|
||||
global_request_id=context.global_id)
|
||||
if result.status_code == 409:
|
||||
LOG.info('[%(placement_req_id)s] Inventory update conflict for '
|
||||
'%(resource_provider_uuid)s with generation ID '
|
||||
'%(generation)s',
|
||||
{'placement_req_id': get_placement_request_id(result),
|
||||
'resource_provider_uuid': rp_uuid,
|
||||
'generation': cur_gen})
|
||||
# NOTE(jaypipes): There may be cases when we try to set a
|
||||
# provider's inventory that results in attempting to delete an
|
||||
# inventory record for a resource class that has an active
|
||||
# allocation. We need to catch this particular case and raise an
|
||||
# exception here instead of returning False, since we should not
|
||||
# re-try the operation in this case.
|
||||
#
|
||||
# A use case for where this can occur is the following:
|
||||
#
|
||||
# 1) Provider created for each Ironic baremetal node in Newton
|
||||
# 2) Inventory records for baremetal node created for VCPU,
|
||||
# MEMORY_MB and DISK_GB
|
||||
# 3) A Nova instance consumes the baremetal node and allocation
|
||||
# records are created for VCPU, MEMORY_MB and DISK_GB matching
|
||||
# the total amount of those resource on the baremetal node.
|
||||
# 3) Upgrade to Ocata and now resource tracker wants to set the
|
||||
# provider's inventory to a single record of resource class
|
||||
# CUSTOM_IRON_SILVER (or whatever the Ironic node's
|
||||
# "resource_class" attribute is)
|
||||
# 4) Scheduler report client sends the inventory list containing a
|
||||
# single CUSTOM_IRON_SILVER record and placement service
|
||||
# attempts to delete the inventory records for VCPU, MEMORY_MB
|
||||
# and DISK_GB. An exception is raised from the placement service
|
||||
# because allocation records exist for those resource classes,
|
||||
# and a 409 Conflict is returned to the compute node. We need to
|
||||
# trigger a delete of the old allocation records and then set
|
||||
# the new inventory, and then set the allocation record to the
|
||||
# new CUSTOM_IRON_SILVER record.
|
||||
rc = _extract_inventory_in_use(result.text)
|
||||
if rc is not None:
|
||||
raise exception.InventoryInUse(
|
||||
resource_classes=rc,
|
||||
resource_provider=rp_uuid,
|
||||
)
|
||||
|
||||
# Invalidate our cache and re-fetch the resource provider
|
||||
# to be sure to get the latest generation.
|
||||
self._provider_tree.remove(rp_uuid)
|
||||
# NOTE(jaypipes): We don't need to pass a name parameter to
|
||||
# _ensure_resource_provider() because we know the resource provider
|
||||
# record already exists. We're just reloading the record here.
|
||||
self._ensure_resource_provider(context, rp_uuid)
|
||||
return False
|
||||
elif not result:
|
||||
placement_req_id = get_placement_request_id(result)
|
||||
LOG.warning('[%(placement_req_id)s] Failed to update inventory '
|
||||
'for resource provider %(uuid)s: %(status)i %(text)s',
|
||||
{'placement_req_id': placement_req_id,
|
||||
'uuid': rp_uuid,
|
||||
'status': result.status_code,
|
||||
'text': result.text})
|
||||
# log the body at debug level
|
||||
LOG.debug('[%(placement_req_id)s] Failed inventory update request '
|
||||
'for resource provider %(uuid)s with body: %(payload)s',
|
||||
{'placement_req_id': placement_req_id,
|
||||
'uuid': rp_uuid,
|
||||
'payload': payload})
|
||||
return False
|
||||
|
||||
if result.status_code != 200:
|
||||
placement_req_id = get_placement_request_id(result)
|
||||
LOG.info('[%(placement_req_id)s] Received unexpected response '
|
||||
'code %(code)i while trying to update inventory for '
|
||||
'resource provider %(uuid)s: %(text)s',
|
||||
{'placement_req_id': placement_req_id,
|
||||
'uuid': rp_uuid,
|
||||
'code': result.status_code,
|
||||
'text': result.text})
|
||||
return False
|
||||
|
||||
# Update our view of the generation for next time
|
||||
updated_inventories_result = result.json()
|
||||
new_gen = updated_inventories_result['resource_provider_generation']
|
||||
LOG.debug('Updating ProviderTree inventory for provider %s with '
|
||||
'generation %s from _update_inventory_attempt with data: '
|
||||
'%s', rp_uuid, new_gen, inv_data)
|
||||
self._provider_tree.update_inventory(rp_uuid, inv_data,
|
||||
generation=new_gen)
|
||||
return True
|
||||
|
||||
@safe_connect
|
||||
def _update_inventory(self, context, rp_uuid, inv_data):
|
||||
for attempt in (1, 2, 3):
|
||||
if not self._provider_tree.exists(rp_uuid):
|
||||
# NOTE(danms): Either we failed to fetch/create the RP
|
||||
# on our first attempt, or a previous attempt had to
|
||||
# invalidate the cache, and we were unable to refresh
|
||||
# it. Bail and try again next time.
|
||||
LOG.warning('Unable to refresh my resource provider record')
|
||||
return False
|
||||
if self._update_inventory_attempt(context, rp_uuid, inv_data):
|
||||
return True
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
def get_provider_tree_and_ensure_root(self, context, rp_uuid, name=None,
|
||||
parent_provider_uuid=None):
|
||||
"""Returns a fresh ProviderTree representing all providers which are in
|
||||
|
@ -1069,67 +884,11 @@ class SchedulerReportClient(object):
|
|||
# Return a *copy* of the tree.
|
||||
return copy.deepcopy(self._provider_tree)
|
||||
|
||||
def set_inventory_for_provider(self, context, rp_uuid, rp_name, inv_data,
|
||||
parent_provider_uuid=None):
|
||||
def set_inventory_for_provider(self, context, rp_uuid, inv_data):
|
||||
"""Given the UUID of a provider, set the inventory records for the
|
||||
provider to the supplied dict of resources.
|
||||
|
||||
:param context: The security context
|
||||
:param rp_uuid: UUID of the resource provider to set inventory for
|
||||
:param rp_name: Name of the resource provider in case we need to create
|
||||
a record for it in the placement API
|
||||
:param inv_data: Dict, keyed by resource class name, of inventory data
|
||||
to set against the provider
|
||||
:param parent_provider_uuid:
|
||||
If the provider is not a root, this is required, and represents
|
||||
the UUID of the immediate parent, which is a provider for which
|
||||
this method has already been invoked.
|
||||
|
||||
:raises: exc.InvalidResourceClass if a supplied custom resource class
|
||||
name does not meet the placement API's format requirements.
|
||||
"""
|
||||
self._ensure_resource_provider(
|
||||
context, rp_uuid, rp_name,
|
||||
parent_provider_uuid=parent_provider_uuid)
|
||||
|
||||
# Auto-create custom resource classes coming from a virt driver
|
||||
self._ensure_resource_classes(context, set(inv_data))
|
||||
|
||||
# NOTE(efried): Do not use the DELETE API introduced in microversion
|
||||
# 1.5, even if the new inventory is empty. It provides no way of
|
||||
# sending the generation down, so no way to trigger/detect a conflict
|
||||
# if an out-of-band update occurs between when we GET the latest and
|
||||
# when we invoke the DELETE. See bug #1746374.
|
||||
self._update_inventory(context, rp_uuid, inv_data)
|
||||
|
||||
def _set_inventory_for_provider(self, context, rp_uuid, inv_data):
|
||||
"""Given the UUID of a provider, set the inventory records for the
|
||||
provider to the supplied dict of resources.
|
||||
|
||||
Compare and contrast with set_inventory_for_provider above. This one
|
||||
is specially formulated for use by update_from_provider_tree. Like the
|
||||
other method, we DO need to _ensure_resource_class - i.e. automatically
|
||||
create new resource classes specified in the inv_data. However, UNLIKE
|
||||
the other method:
|
||||
- We don't use the DELETE API when inventory is empty, because that guy
|
||||
doesn't return content, and we need to update the cached provider
|
||||
tree with the new generation.
|
||||
- We raise exceptions (rather than returning a boolean) which are
|
||||
handled in a consistent fashion by update_from_provider_tree.
|
||||
- We don't invalidate the cache on failure. That's controlled at a
|
||||
broader scope (based on errors from ANY of the set_*_for_provider
|
||||
methods, etc.) by update_from_provider_tree.
|
||||
- We don't retry. In this code path, retries happen at the level of
|
||||
the resource tracker on the next iteration.
|
||||
- We take advantage of the cache and no-op if inv_data isn't different
|
||||
from what we have locally. This is an optimization, not essential.
|
||||
- We don't _ensure_resource_provider or refresh_and_get_inventory,
|
||||
because that's already been done in the code paths leading up to
|
||||
update_from_provider_tree (by get_provider_tree). This is an
|
||||
optimization, not essential.
|
||||
|
||||
In short, this version is more in the spirit of set_traits_for_provider
|
||||
and set_aggregates_for_provider.
|
||||
The provider must exist - this method does not attempt to create it.
|
||||
|
||||
:param context: The security context
|
||||
:param rp_uuid: The UUID of the provider whose inventory is to be
|
||||
|
@ -1148,8 +907,6 @@ class SchedulerReportClient(object):
|
|||
:raises: ResourceProviderUpdateFailed on any other placement API
|
||||
failure.
|
||||
"""
|
||||
# TODO(efried): Consolidate/refactor to one set_inventory_for_provider.
|
||||
|
||||
# NOTE(efried): This is here because _ensure_resource_class already has
|
||||
# @safe_connect, so we don't want to decorate this whole method with it
|
||||
@safe_connect
|
||||
|
@ -1180,7 +937,7 @@ class SchedulerReportClient(object):
|
|||
|
||||
if resp.status_code == 200:
|
||||
LOG.debug('Updated inventory for provider %s with generation %s '
|
||||
'in Placement from _set_inventory_for_provider using '
|
||||
'in Placement from set_inventory_for_provider using '
|
||||
'data: %s', rp_uuid, generation, inv_data)
|
||||
json = resp.json()
|
||||
self._provider_tree.update_inventory(
|
||||
|
@ -1462,26 +1219,6 @@ class SchedulerReportClient(object):
|
|||
LOG.error(msg, args)
|
||||
raise exception.InvalidResourceClass(resource_class=name)
|
||||
|
||||
def update_compute_node(self, context, compute_node):
|
||||
"""Creates or updates stats for the supplied compute node.
|
||||
|
||||
:param context: The security context
|
||||
:param compute_node: updated nova.objects.ComputeNode to report
|
||||
:raises `exception.InventoryInUse` if the compute node has had changes
|
||||
to its inventory but there are still active allocations for
|
||||
resource classes that would be deleted by an update to the
|
||||
placement API.
|
||||
"""
|
||||
self._ensure_resource_provider(context, compute_node.uuid,
|
||||
compute_node.hypervisor_hostname)
|
||||
inv_data = _compute_node_to_inventory_dict(compute_node)
|
||||
# NOTE(efried): Do not use the DELETE API introduced in microversion
|
||||
# 1.5, even if the new inventory is empty. It provides no way of
|
||||
# sending the generation down, so no way to trigger/detect a conflict
|
||||
# if an out-of-band update occurs between when we GET the latest and
|
||||
# when we invoke the DELETE. See bug #1746374.
|
||||
self._update_inventory(context, compute_node.uuid, inv_data)
|
||||
|
||||
def _reshape(self, context, inventories, allocations):
|
||||
"""Perform atomic inventory & allocation data migration.
|
||||
|
||||
|
@ -1702,7 +1439,7 @@ class SchedulerReportClient(object):
|
|||
for uuid in reversed(new_uuids):
|
||||
pd = new_tree.data(uuid)
|
||||
with catch_all(pd.uuid):
|
||||
self._set_inventory_for_provider(
|
||||
self.set_inventory_for_provider(
|
||||
context, pd.uuid, pd.inventory)
|
||||
self.set_aggregates_for_provider(
|
||||
context, pd.uuid, pd.aggregates)
|
||||
|
|
|
@ -23,6 +23,7 @@ from placement.tests import fixtures as placement_db
|
|||
|
||||
from nova.cmd import status
|
||||
from nova.compute import provider_tree
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import conf
|
||||
from nova import context
|
||||
# TODO(cdent): This points to the nova, not placement, exception for
|
||||
|
@ -194,7 +195,12 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
|
||||
|
||||
# Now let's update status for our compute node.
|
||||
self.client.update_compute_node(self.context, self.compute_node)
|
||||
self.client._ensure_resource_provider(
|
||||
self.context, self.compute_uuid, name=self.compute_name)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
|
||||
# So now we have a resource provider
|
||||
rp = self.client._get_resource_provider(self.context,
|
||||
|
@ -263,12 +269,19 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
vcpu_data = usage_data[res_class]
|
||||
self.assertEqual(0, vcpu_data)
|
||||
|
||||
# Allocation bumped the generation, so refresh to get the latest
|
||||
self.client._refresh_and_get_inventory(self.context,
|
||||
self.compute_uuid)
|
||||
|
||||
# Trigger the reporting client deleting all inventory by setting
|
||||
# the compute node's CPU, RAM and disk amounts to 0.
|
||||
self.compute_node.vcpus = 0
|
||||
self.compute_node.memory_mb = 0
|
||||
self.compute_node.local_gb = 0
|
||||
self.client.update_compute_node(self.context, self.compute_node)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
|
||||
# Check there's no more inventory records
|
||||
resp = self.client.get(inventory_url)
|
||||
|
@ -283,80 +296,6 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# But the inventory is gone
|
||||
self.assertFalse(ptree.has_inventory(self.compute_uuid))
|
||||
|
||||
# Try setting some invalid inventory and make sure the report
|
||||
# client raises the expected error.
|
||||
inv_data = {
|
||||
'CUSTOM_BOGU$_CLA$$': {
|
||||
'total': 100,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 100,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}
|
||||
self.assertRaises(exception.InvalidResourceClass,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, self.compute_uuid,
|
||||
self.compute_name, inv_data)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
new=mock.Mock(return_value=False))
|
||||
@mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
|
||||
def test_ensure_standard_resource_class(self):
|
||||
"""Test case for bug #1746615: If placement is running a newer version
|
||||
of code than compute, it may have new standard resource classes we
|
||||
don't know about. Make sure this scenario doesn't cause errors in
|
||||
set_inventory_for_provider.
|
||||
"""
|
||||
inv = {
|
||||
'VCPU': {
|
||||
'total': 10,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 2,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 10.0,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1048576,
|
||||
'reserved': 2048,
|
||||
'min_unit': 1024,
|
||||
'max_unit': 131072,
|
||||
'step_size': 1024,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 100,
|
||||
'reserved': 1,
|
||||
'min_unit': 1,
|
||||
'max_unit': 10,
|
||||
'step_size': 2,
|
||||
'allocation_ratio': 10.0,
|
||||
},
|
||||
# A standard resource class known by placement, but not locally
|
||||
'PCI_DEVICE': {
|
||||
'total': 4,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 4,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'CUSTOM_BANDWIDTH': {
|
||||
'total': 1250000,
|
||||
'reserved': 10000,
|
||||
'min_unit': 5000,
|
||||
'max_unit': 250000,
|
||||
'step_size': 5000,
|
||||
'allocation_ratio': 8.0,
|
||||
},
|
||||
}
|
||||
with self._interceptor():
|
||||
self.client.update_compute_node(self.context, self.compute_node)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid, self.compute_name, inv)
|
||||
|
||||
def test_global_request_id(self):
|
||||
global_request_id = 'req-%s' % uuids.global_request_id
|
||||
|
||||
|
@ -397,7 +336,12 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
"""
|
||||
with self._interceptor():
|
||||
# Register the compute node and its inventory
|
||||
self.client.update_compute_node(self.context, self.compute_node)
|
||||
self.client._ensure_resource_provider(
|
||||
self.context, self.compute_uuid, name=self.compute_name)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
compute_utils.compute_node_to_inventory_dict(
|
||||
self.compute_node))
|
||||
# The compute node is associated with two of the shared storages
|
||||
self.client.set_aggregates_for_provider(
|
||||
self.context, self.compute_uuid,
|
||||
|
@ -407,8 +351,11 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
for x in (1, 2):
|
||||
name = 'pf%d' % x
|
||||
uuid = getattr(uuids, name)
|
||||
self.client._ensure_resource_provider(
|
||||
self.context, uuid, name=name,
|
||||
parent_provider_uuid=self.compute_uuid)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuid, name, {
|
||||
self.context, uuid, {
|
||||
fields.ResourceClass.SRIOV_NET_VF: {
|
||||
'total': 24 * x,
|
||||
'reserved': x,
|
||||
|
@ -425,7 +372,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
'step_size': 5000,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}, parent_provider_uuid=self.compute_uuid)
|
||||
})
|
||||
# They're associated with an IP address aggregate
|
||||
self.client.set_aggregates_for_provider(self.context, uuid,
|
||||
[uuids.agg_ip])
|
||||
|
@ -437,8 +384,10 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
for x in (1, 2, 3):
|
||||
name = 'ss%d' % x
|
||||
uuid = getattr(uuids, name)
|
||||
self.client._ensure_resource_provider(self.context, uuid,
|
||||
name=name)
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuid, name, {
|
||||
self.context, uuid, {
|
||||
fields.ResourceClass.DISK_GB: {
|
||||
'total': 100 * x,
|
||||
'reserved': x,
|
||||
|
@ -458,8 +407,10 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
[agg])
|
||||
|
||||
# Register a shared IP address provider with IP address inventory
|
||||
self.client._ensure_resource_provider(self.context, uuids.sip,
|
||||
name='sip')
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.sip, 'sip', {
|
||||
self.context, uuids.sip, {
|
||||
fields.ResourceClass.IPV4_ADDRESS: {
|
||||
'total': 128,
|
||||
'reserved': 0,
|
||||
|
@ -478,8 +429,10 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
[uuids.agg_ip])
|
||||
|
||||
# Register a shared network bandwidth provider
|
||||
self.client._ensure_resource_provider(self.context, uuids.sbw,
|
||||
name='sbw')
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.sbw, 'sbw', {
|
||||
self.context, uuids.sbw, {
|
||||
'CUSTOM_BANDWIDTH': {
|
||||
'total': 1250000,
|
||||
'reserved': 10000,
|
||||
|
@ -500,7 +453,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
prov_tree = self.client.get_provider_tree_and_ensure_root(
|
||||
self.context, self.compute_uuid)
|
||||
|
||||
# All providers show up because we used set_inventory_for_provider
|
||||
# All providers show up because we used _ensure_resource_provider
|
||||
self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2,
|
||||
uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3,
|
||||
uuids.sbw]),
|
||||
|
@ -538,16 +491,15 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
self.client._get_inventory(
|
||||
self.context, uuids.cn)['inventories'])
|
||||
|
||||
def test__set_inventory_for_provider(self):
|
||||
"""Tests for SchedulerReportClient._set_inventory_for_provider, NOT
|
||||
set_inventory_for_provider.
|
||||
def test_set_inventory_for_provider(self):
|
||||
"""Tests for SchedulerReportClient.set_inventory_for_provider.
|
||||
"""
|
||||
with self._interceptor():
|
||||
inv = {
|
||||
|
@ -563,7 +515,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# Provider doesn't exist in our cache
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
self.client._set_inventory_for_provider,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertIsNone(self.client._get_inventory(
|
||||
self.context, uuids.cn))
|
||||
|
@ -577,7 +529,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.context, uuids.cn)['inventories'])
|
||||
|
||||
# Now set the inventory
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -603,7 +555,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -639,7 +591,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
'allocation_ratio': 8.0,
|
||||
},
|
||||
}
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -661,7 +613,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
}
|
||||
self.assertRaises(
|
||||
exception.InvalidResourceClass,
|
||||
self.client._set_inventory_for_provider,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, uuids.cn, bogus_inv)
|
||||
self.assertFalse(
|
||||
self.client.get('/resource_classes/BOGUS'))
|
||||
|
@ -714,7 +666,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# Cached generation is off, so this will bounce with a conflict.
|
||||
self.assertRaises(
|
||||
exception.ResourceProviderUpdateConflict,
|
||||
self.client._set_inventory_for_provider,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, uuids.cn, inv)
|
||||
# Inventory still corresponds to the out-of-band update
|
||||
self.assertEqual(
|
||||
|
@ -724,7 +676,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# Force refresh to get the latest generation
|
||||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
||||
# Now the update should work
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -752,7 +704,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
||||
self.assertRaises(
|
||||
exception.InventoryInUse,
|
||||
self.client._set_inventory_for_provider,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, uuids.cn, bad_inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -763,7 +715,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
bad_inv = {}
|
||||
self.assertRaises(
|
||||
exception.InventoryInUse,
|
||||
self.client._set_inventory_for_provider,
|
||||
self.client.set_inventory_for_provider,
|
||||
self.context, uuids.cn, bad_inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
@ -775,7 +727,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
|||
# Force refresh to get the latest generation
|
||||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
||||
inv = {}
|
||||
self.client._set_inventory_for_provider(
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.cn, inv)
|
||||
self.assertEqual(
|
||||
inv,
|
||||
|
|
|
@ -33,6 +33,7 @@ from nova.compute import power_state
|
|||
from nova.compute import task_states
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.compute import vm_states
|
||||
import nova.conf
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.image import glance
|
||||
|
@ -54,6 +55,8 @@ from nova.tests.unit.objects import test_flavor
|
|||
|
||||
FAKE_IMAGE_REF = uuids.image_ref
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
|
||||
def create_instance(context, user_id='fake', project_id='fake', params=None):
|
||||
"""Create a test instance."""
|
||||
|
@ -1451,3 +1454,65 @@ class IsVolumeBackedInstanceTestCase(test.TestCase):
|
|||
self.assertFalse(
|
||||
compute_utils.is_volume_backed_instance(ctxt, instance, None))
|
||||
mock_bdms.assert_called_with(ctxt, instance.uuid)
|
||||
|
||||
|
||||
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||
def test_compute_node_inventory(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=2,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=1024,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=10,
|
||||
disk_allocation_ratio=1.0)
|
||||
|
||||
self.flags(reserved_host_memory_mb=1000)
|
||||
self.flags(reserved_host_disk_mb=200)
|
||||
self.flags(reserved_host_cpus=1)
|
||||
|
||||
result = compute_utils.compute_node_to_inventory_dict(compute_node)
|
||||
|
||||
expected = {
|
||||
'VCPU': {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': 1, # this is ceil(1000/1024)
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_compute_node_inventory_empty(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=0,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=0,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=0,
|
||||
disk_allocation_ratio=1.0)
|
||||
result = compute_utils.compute_node_to_inventory_dict(compute_node)
|
||||
self.assertEqual({}, result)
|
||||
|
|
|
@ -1313,11 +1313,7 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
self.rt._update(mock.sentinel.ctx, new_compute)
|
||||
self.assertFalse(save_mock.called)
|
||||
# Even the compute node is not updated, get_inventory still got called.
|
||||
# And update_compute_node() is also called when get_inventory() is not
|
||||
# implemented.
|
||||
self.driver_mock.get_inventory.assert_called_once_with(_NODENAME)
|
||||
ucn_mock = self.sched_client_mock.update_compute_node
|
||||
ucn_mock.assert_called_once_with(mock.sentinel.ctx, new_compute)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
def test_existing_compute_node_updated_diff_updated_at(self, save_mock):
|
||||
|
@ -1362,10 +1358,8 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
self.rt._update(mock.sentinel.ctx, new_compute)
|
||||
save_mock.assert_called_once_with()
|
||||
# The get_inventory() is not implemented, it shouldn't call
|
||||
# _normalize_inventory_from_cn_obj but call update_compute_node().
|
||||
self.assertFalse(norm_mock.called)
|
||||
ucn_mock = self.sched_client_mock.update_compute_node
|
||||
ucn_mock.assert_called_once_with(mock.sentinel.ctx, new_compute)
|
||||
# _normalize_inventory_from_cn_obj
|
||||
norm_mock.assert_not_called()
|
||||
|
||||
@mock.patch('nova.compute.resource_tracker.'
|
||||
'_normalize_inventory_from_cn_obj')
|
||||
|
@ -1374,10 +1368,7 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
norm_mock):
|
||||
"""The get_inventory() virt driver method is only implemented for some
|
||||
virt drivers. This method returns inventory information for a
|
||||
node/provider in a way that the placement API better understands, and
|
||||
if this method doesn't raise a NotImplementedError, this triggers
|
||||
_update() to call the set_inventory_for_provider() method of the
|
||||
reporting client instead of the update_compute_node() method.
|
||||
node/provider in a way that the placement API better understands.
|
||||
"""
|
||||
self._setup_rt()
|
||||
|
||||
|
@ -1393,17 +1384,9 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
new_compute = orig_compute.obj_clone()
|
||||
new_compute.local_gb = 210000
|
||||
|
||||
ucn_mock = self.sched_client_mock.update_compute_node
|
||||
sifp_mock = self.sched_client_mock.set_inventory_for_provider
|
||||
self.rt._update(mock.sentinel.ctx, new_compute)
|
||||
save_mock.assert_called_once_with()
|
||||
sifp_mock.assert_called_once_with(
|
||||
mock.sentinel.ctx,
|
||||
new_compute.uuid,
|
||||
new_compute.hypervisor_hostname,
|
||||
mock.sentinel.inv_data,
|
||||
)
|
||||
self.assertFalse(ucn_mock.called)
|
||||
norm_mock.assert_called_once_with(mock.sentinel.inv_data, new_compute)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
def test_existing_node_update_provider_tree_implemented(self, save_mock):
|
||||
|
@ -1411,9 +1394,10 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
for some virt drivers. This method returns inventory, trait, and
|
||||
aggregate information for resource providers in a tree associated with
|
||||
the compute node. If this method doesn't raise a NotImplementedError,
|
||||
it triggers _update() to call the update_from_provider_tree() method of
|
||||
the reporting client instead of set_inventory_for_provider() (old) or
|
||||
update_compute_node() (older).
|
||||
it triggers _update() to try get_inventory() and then
|
||||
compute_node_to_inventory_dict() to produce the inventory data with
|
||||
which to call the update_from_provider_tree() method of the reporting
|
||||
client instead.
|
||||
"""
|
||||
fake_inv = {
|
||||
rc_fields.ResourceClass.VCPU: {
|
||||
|
@ -1476,8 +1460,7 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
ptree, new_compute.hypervisor_hostname)
|
||||
rc_mock.update_from_provider_tree.assert_called_once_with(
|
||||
mock.sentinel.ctx, ptree, allocations=None)
|
||||
self.sched_client_mock.update_compute_node.assert_not_called()
|
||||
self.sched_client_mock.set_inventory_for_provider.assert_not_called()
|
||||
self.driver_mock.get_inventory.assert_not_called()
|
||||
exp_inv = copy.deepcopy(fake_inv)
|
||||
# These ratios and reserved amounts come from fake_upt
|
||||
exp_inv[rc_fields.ResourceClass.VCPU]['allocation_ratio'] = 16.0
|
||||
|
|
|
@ -3012,725 +3012,6 @@ class TestAssociations(SchedulerReportClientTestCase):
|
|||
self.assert_things_were_called(uuid)
|
||||
|
||||
|
||||
class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||
def test_compute_node_inventory(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=2,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=1024,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=10,
|
||||
disk_allocation_ratio=1.0)
|
||||
|
||||
self.flags(reserved_host_memory_mb=1000)
|
||||
self.flags(reserved_host_disk_mb=200)
|
||||
self.flags(reserved_host_cpus=1)
|
||||
|
||||
result = report._compute_node_to_inventory_dict(compute_node)
|
||||
|
||||
expected = {
|
||||
'VCPU': {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': compute_node.memory_mb,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': compute_node.local_gb,
|
||||
'reserved': 1, # this is ceil(1000/1024)
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_compute_node_inventory_empty(self):
|
||||
uuid = uuids.compute_node
|
||||
name = 'computehost'
|
||||
compute_node = objects.ComputeNode(uuid=uuid,
|
||||
hypervisor_hostname=name,
|
||||
vcpus=0,
|
||||
cpu_allocation_ratio=16.0,
|
||||
memory_mb=0,
|
||||
ram_allocation_ratio=1.5,
|
||||
local_gb=0,
|
||||
disk_allocation_ratio=1.0)
|
||||
result = report._compute_node_to_inventory_dict(compute_node)
|
||||
self.assertEqual({}, result)
|
||||
|
||||
|
||||
class TestInventory(SchedulerReportClientTestCase):
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory')
|
||||
def test_update_compute_node(self, mock_ui, mock_erp):
|
||||
cn = self.compute_node
|
||||
self.client.update_compute_node(self.context, cn)
|
||||
mock_erp.assert_called_once_with(self.context, cn.uuid,
|
||||
cn.hypervisor_hostname)
|
||||
expected_inv_data = {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': 8,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 16.0,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': 512,
|
||||
'min_unit': 1,
|
||||
'max_unit': 1024,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.5,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 10,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 10,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}
|
||||
mock_ui.assert_called_once_with(
|
||||
self.context,
|
||||
cn.uuid,
|
||||
expected_inv_data,
|
||||
)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory')
|
||||
def test_update_compute_node_no_inv(self, mock_ui, mock_erp):
|
||||
"""Ensure that if there are no inventory records, we still call
|
||||
_update_inventory().
|
||||
"""
|
||||
cn = self.compute_node
|
||||
cn.vcpus = 0
|
||||
cn.memory_mb = 0
|
||||
cn.local_gb = 0
|
||||
self.client.update_compute_node(self.context, cn)
|
||||
mock_erp.assert_called_once_with(self.context, cn.uuid,
|
||||
cn.hypervisor_hostname)
|
||||
mock_ui.assert_called_once_with(self.context, cn.uuid, {})
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory_initial_empty(self, mock_put, mock_get):
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self._init_provider_tree(resources_override={})
|
||||
|
||||
mock_get.return_value.json.return_value = {
|
||||
'resource_provider_generation': 43,
|
||||
'inventories': {
|
||||
'VCPU': {'total': 16},
|
||||
'MEMORY_MB': {'total': 1024},
|
||||
'DISK_GB': {'total': 10},
|
||||
}
|
||||
}
|
||||
mock_put.return_value.status_code = 200
|
||||
mock_put.return_value.json.return_value = {
|
||||
'resource_provider_generation': 44,
|
||||
'inventories': {
|
||||
'VCPU': {'total': 16},
|
||||
'MEMORY_MB': {'total': 1024},
|
||||
'DISK_GB': {'total': 10},
|
||||
}
|
||||
}
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertTrue(result)
|
||||
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# Updated with the new inventory from the PUT call
|
||||
self._validate_provider(uuid, generation=44)
|
||||
expected = {
|
||||
# Called with the newly-found generation from the existing
|
||||
# inventory
|
||||
'resource_provider_generation': 43,
|
||||
'inventories': {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 10,
|
||||
'reserved': 0, # reserved_host_disk_mb is 0 by default
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
}
|
||||
mock_put.assert_called_once_with(
|
||||
exp_url, expected, version='1.26',
|
||||
global_request_id=self.context.global_id)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory(self, mock_put, mock_get):
|
||||
self.flags(reserved_host_disk_mb=1000)
|
||||
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self._init_provider_tree()
|
||||
new_vcpus_total = 240
|
||||
|
||||
mock_get.return_value.json.return_value = {
|
||||
'resource_provider_generation': 43,
|
||||
'inventories': {
|
||||
'VCPU': {'total': 16},
|
||||
'MEMORY_MB': {'total': 1024},
|
||||
'DISK_GB': {'total': 10},
|
||||
}
|
||||
}
|
||||
mock_put.return_value.status_code = 200
|
||||
mock_put.return_value.json.return_value = {
|
||||
'resource_provider_generation': 44,
|
||||
'inventories': {
|
||||
'VCPU': {'total': new_vcpus_total},
|
||||
'MEMORY_MB': {'total': 1024},
|
||||
'DISK_GB': {'total': 10},
|
||||
}
|
||||
}
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
# Make a change to trigger the update...
|
||||
inv_data['VCPU']['total'] = new_vcpus_total
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertTrue(result)
|
||||
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# Updated with the new inventory from the PUT call
|
||||
self._validate_provider(uuid, generation=44)
|
||||
expected = {
|
||||
# Called with the newly-found generation from the existing
|
||||
# inventory
|
||||
'resource_provider_generation': 43,
|
||||
'inventories': {
|
||||
'VCPU': {
|
||||
'total': new_vcpus_total,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 10,
|
||||
'reserved': 1, # this is ceil for 1000MB
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
}
|
||||
mock_put.assert_called_once_with(
|
||||
exp_url, expected, version='1.26',
|
||||
global_request_id=self.context.global_id)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory_no_update(self, mock_put, mock_get):
|
||||
"""Simulate situation where scheduler client is first starting up and
|
||||
ends up loading information from the placement API via a GET against
|
||||
the resource provider's inventory but has no local cached inventory
|
||||
information for a resource provider.
|
||||
"""
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self._init_provider_tree(generation_override=42, resources_override={})
|
||||
mock_get.return_value.json.return_value = {
|
||||
'resource_provider_generation': 43,
|
||||
'inventories': {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.cpu_allocation_ratio,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': CONF.reserved_host_memory_mb,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.memory_mb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.ram_allocation_ratio,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 10,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.local_gb,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': compute_node.disk_allocation_ratio,
|
||||
},
|
||||
}
|
||||
}
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertTrue(result)
|
||||
exp_url = '/resource_providers/%s/inventories' % uuid
|
||||
mock_get.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
# No update so put should not be called
|
||||
self.assertFalse(mock_put.called)
|
||||
# Make sure we updated the generation from the inventory records
|
||||
self._validate_provider(uuid, generation=43)
|
||||
|
||||
@mock.patch.object(report.LOG, 'info')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_get_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
def test_update_inventory_concurrent_update(self, mock_ensure,
|
||||
mock_put, mock_get, mock_info):
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self.client._provider_tree.new_root(
|
||||
compute_node.hypervisor_hostname,
|
||||
compute_node.uuid,
|
||||
generation=42,
|
||||
)
|
||||
|
||||
mock_get.return_value = {
|
||||
'resource_provider_generation': 42,
|
||||
'inventories': {},
|
||||
}
|
||||
mock_put.return_value.status_code = 409
|
||||
mock_put.return_value.text = 'Does not match inventory in use'
|
||||
mock_put.return_value.headers = {'x-openstack-request-id':
|
||||
uuids.request_id}
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertFalse(result)
|
||||
|
||||
# Invalidated the cache
|
||||
self.assertFalse(self.client._provider_tree.exists(uuid))
|
||||
# Refreshed our resource provider
|
||||
mock_ensure.assert_called_once_with(self.context, uuid)
|
||||
# Logged the request id in the log message
|
||||
self.assertEqual(uuids.request_id,
|
||||
mock_info.call_args[0][1]['placement_req_id'])
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_get_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory_inventory_in_use(self, mock_put, mock_get):
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self.client._provider_tree.new_root(
|
||||
compute_node.hypervisor_hostname,
|
||||
compute_node.uuid,
|
||||
generation=42,
|
||||
)
|
||||
|
||||
mock_get.return_value = {
|
||||
'resource_provider_generation': 42,
|
||||
'inventories': {},
|
||||
}
|
||||
mock_put.return_value.status_code = 409
|
||||
mock_put.return_value.text = (
|
||||
"update conflict: Inventory for VCPU on "
|
||||
"resource provider 123 in use"
|
||||
)
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
self.assertRaises(
|
||||
exception.InventoryInUse,
|
||||
self.client._update_inventory_attempt,
|
||||
self.context,
|
||||
compute_node.uuid,
|
||||
inv_data,
|
||||
)
|
||||
|
||||
# Did NOT invalidate the cache
|
||||
self.assertTrue(self.client._provider_tree.exists(uuid))
|
||||
|
||||
@mock.patch.object(report.LOG, 'info')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_get_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory_unknown_response(self, mock_put, mock_get,
|
||||
mock_info):
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self.client._provider_tree.new_root(
|
||||
compute_node.hypervisor_hostname,
|
||||
compute_node.uuid,
|
||||
generation=42,
|
||||
)
|
||||
|
||||
mock_get.return_value = {
|
||||
'resource_provider_generation': 42,
|
||||
'inventories': {},
|
||||
}
|
||||
mock_put.return_value.status_code = 234
|
||||
mock_put.return_value.headers = {'x-openstack-request-id':
|
||||
uuids.request_id}
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertFalse(result)
|
||||
|
||||
# No cache invalidation
|
||||
self.assertTrue(self.client._provider_tree.exists(uuid))
|
||||
|
||||
@mock.patch.object(report.LOG, 'warning')
|
||||
@mock.patch.object(report.LOG, 'debug')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_get_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'put')
|
||||
def test_update_inventory_failed(self, mock_put, mock_get,
|
||||
mock_debug, mock_warn):
|
||||
# Ensure _update_inventory() returns a list of Inventories objects
|
||||
# after creating or updating the existing values
|
||||
uuid = uuids.compute_node
|
||||
compute_node = self.compute_node
|
||||
# Make sure the resource provider exists for preventing to call the API
|
||||
self.client._provider_tree.new_root(
|
||||
compute_node.hypervisor_hostname,
|
||||
compute_node.uuid,
|
||||
generation=42,
|
||||
)
|
||||
|
||||
mock_get.return_value = {
|
||||
'resource_provider_generation': 42,
|
||||
'inventories': {},
|
||||
}
|
||||
mock_put.return_value = fake_requests.FakeResponse(
|
||||
400, headers={'x-openstack-request-id': uuids.request_id})
|
||||
|
||||
inv_data = report._compute_node_to_inventory_dict(compute_node)
|
||||
result = self.client._update_inventory_attempt(
|
||||
self.context, compute_node.uuid, inv_data
|
||||
)
|
||||
self.assertFalse(result)
|
||||
|
||||
# No cache invalidation
|
||||
self.assertTrue(self.client._provider_tree.exists(uuid))
|
||||
# Logged the request id in the log messages
|
||||
self.assertEqual(uuids.request_id,
|
||||
mock_debug.call_args[0][1]['placement_req_id'])
|
||||
self.assertEqual(uuids.request_id,
|
||||
mock_warn.call_args[0][1]['placement_req_id'])
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory_attempt')
|
||||
@mock.patch('time.sleep')
|
||||
def test_update_inventory_fails_and_then_succeeds(self, mock_sleep,
|
||||
mock_update,
|
||||
mock_ensure):
|
||||
# Ensure _update_inventory() fails if we have a conflict when updating
|
||||
# but retries correctly.
|
||||
cn = self.compute_node
|
||||
mock_update.side_effect = (False, True)
|
||||
|
||||
self.client._provider_tree.new_root(
|
||||
cn.hypervisor_hostname,
|
||||
cn.uuid,
|
||||
generation=42,
|
||||
)
|
||||
result = self.client._update_inventory(
|
||||
self.context, cn.uuid, mock.sentinel.inv_data
|
||||
)
|
||||
self.assertTrue(result)
|
||||
|
||||
# Only slept once
|
||||
mock_sleep.assert_called_once_with(1)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory_attempt')
|
||||
@mock.patch('time.sleep')
|
||||
def test_update_inventory_never_succeeds(self, mock_sleep,
|
||||
mock_update,
|
||||
mock_ensure):
|
||||
# but retries correctly.
|
||||
cn = self.compute_node
|
||||
mock_update.side_effect = (False, False, False)
|
||||
|
||||
self.client._provider_tree.new_root(
|
||||
cn.hypervisor_hostname,
|
||||
cn.uuid,
|
||||
generation=42,
|
||||
)
|
||||
result = self.client._update_inventory(
|
||||
self.context, cn.uuid, mock.sentinel.inv_data
|
||||
)
|
||||
self.assertFalse(result)
|
||||
|
||||
# Slept three times
|
||||
mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
|
||||
|
||||
# Three attempts to update
|
||||
mock_update.assert_has_calls([
|
||||
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
|
||||
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
|
||||
mock.call(self.context, cn.uuid, mock.sentinel.inv_data),
|
||||
])
|
||||
|
||||
# Slept three times
|
||||
mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_classes')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
def test_set_inventory_for_provider_no_custom(self, mock_erp, mock_erc,
|
||||
mock_upd):
|
||||
"""Tests that inventory records of all standard resource classes are
|
||||
passed to the report client's _update_inventory() method.
|
||||
"""
|
||||
inv_data = {
|
||||
'VCPU': {
|
||||
'total': 24,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 24,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 1024,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 100,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 100,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
}
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
inv_data,
|
||||
)
|
||||
mock_erp.assert_called_once_with(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
parent_provider_uuid=None,
|
||||
)
|
||||
# No custom resource classes to ensure...
|
||||
mock_erc.assert_called_once_with(self.context,
|
||||
set(['VCPU', 'MEMORY_MB', 'DISK_GB']))
|
||||
mock_upd.assert_called_once_with(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
inv_data,
|
||||
)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_classes')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
def test_set_inventory_for_provider_no_inv(self, mock_erp, mock_erc,
|
||||
mock_upd):
|
||||
"""Tests that passing empty set of inventory records triggers a delete
|
||||
of inventory for the provider.
|
||||
"""
|
||||
inv_data = {}
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
inv_data,
|
||||
)
|
||||
mock_erp.assert_called_once_with(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
parent_provider_uuid=None,
|
||||
)
|
||||
mock_erc.assert_called_once_with(self.context, set())
|
||||
mock_upd.assert_called_once_with(
|
||||
self.context, mock.sentinel.rp_uuid, {})
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_update_inventory')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_classes')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
def test_set_inventory_for_provider_with_custom(self, mock_erp, mock_erc,
|
||||
mock_upd):
|
||||
"""Tests that inventory records that include a custom resource class
|
||||
are passed to the report client's _update_inventory() method and that
|
||||
the custom resource class is auto-created.
|
||||
"""
|
||||
inv_data = {
|
||||
'VCPU': {
|
||||
'total': 24,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 24,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'MEMORY_MB': {
|
||||
'total': 1024,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 1024,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'DISK_GB': {
|
||||
'total': 100,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 100,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
},
|
||||
'CUSTOM_IRON_SILVER': {
|
||||
'total': 1,
|
||||
'reserved': 0,
|
||||
'min_unit': 1,
|
||||
'max_unit': 1,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
}
|
||||
|
||||
}
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
inv_data,
|
||||
)
|
||||
mock_erp.assert_called_once_with(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
parent_provider_uuid=None,
|
||||
)
|
||||
mock_erc.assert_called_once_with(
|
||||
self.context,
|
||||
set(['VCPU', 'MEMORY_MB', 'DISK_GB', 'CUSTOM_IRON_SILVER']))
|
||||
mock_upd.assert_called_once_with(
|
||||
self.context,
|
||||
mock.sentinel.rp_uuid,
|
||||
inv_data,
|
||||
)
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_classes', new=mock.Mock())
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'_ensure_resource_provider')
|
||||
def test_set_inventory_for_provider_with_parent(self, mock_erp):
|
||||
"""Ensure parent UUID is sent through."""
|
||||
self.client.set_inventory_for_provider(
|
||||
self.context, uuids.child, 'junior', {},
|
||||
parent_provider_uuid=uuids.parent)
|
||||
mock_erp.assert_called_once_with(
|
||||
self.context, uuids.child, 'junior',
|
||||
parent_provider_uuid=uuids.parent)
|
||||
|
||||
|
||||
class TestAllocations(SchedulerReportClientTestCase):
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance')
|
||||
|
|
|
@ -20,7 +20,6 @@ from oslo_utils.fixture import uuidsentinel as uuids
|
|||
from nova import objects
|
||||
from nova.scheduler import client as scheduler_client
|
||||
from nova.scheduler.client import query as scheduler_query_client
|
||||
from nova.scheduler.client import report as scheduler_report_client
|
||||
from nova import test
|
||||
"""Tests for Scheduler Client."""
|
||||
|
||||
|
@ -79,44 +78,3 @@ class SchedulerClientTestCase(test.NoDBTestCase):
|
|||
aggregate=aggregate)
|
||||
mock_delete_agg.assert_called_once_with(
|
||||
'context', aggregate)
|
||||
|
||||
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
|
||||
'update_compute_node')
|
||||
def test_update_compute_node(self, mock_update_compute_node):
|
||||
self.client.update_compute_node(mock.sentinel.ctx, mock.sentinel.cn)
|
||||
|
||||
mock_update_compute_node.assert_called_once_with(
|
||||
mock.sentinel.ctx, mock.sentinel.cn)
|
||||
|
||||
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
|
||||
'set_inventory_for_provider')
|
||||
def test_set_inventory_for_provider(self, mock_set):
|
||||
self.client.set_inventory_for_provider(
|
||||
mock.sentinel.ctx,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
mock.sentinel.inv_data,
|
||||
)
|
||||
mock_set.assert_called_once_with(
|
||||
mock.sentinel.ctx,
|
||||
mock.sentinel.rp_uuid,
|
||||
mock.sentinel.rp_name,
|
||||
mock.sentinel.inv_data,
|
||||
parent_provider_uuid=None,
|
||||
)
|
||||
# Pass the optional parent_provider_uuid
|
||||
mock_set.reset_mock()
|
||||
self.client.set_inventory_for_provider(
|
||||
mock.sentinel.ctx,
|
||||
mock.sentinel.child_uuid,
|
||||
mock.sentinel.child_name,
|
||||
mock.sentinel.inv_data2,
|
||||
parent_provider_uuid=mock.sentinel.rp_uuid,
|
||||
)
|
||||
mock_set.assert_called_once_with(
|
||||
mock.sentinel.ctx,
|
||||
mock.sentinel.child_uuid,
|
||||
mock.sentinel.child_name,
|
||||
mock.sentinel.inv_data2,
|
||||
parent_provider_uuid=mock.sentinel.rp_uuid,
|
||||
)
|
||||
|
|
|
@ -197,6 +197,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||
# otherwise.
|
||||
inv = provider_tree.data(nodename).inventory
|
||||
ratios = self._get_allocation_ratios(inv)
|
||||
# TODO(efried): Fix these to reflect something like reality
|
||||
cpu_reserved = CONF.reserved_host_cpus
|
||||
mem_reserved = CONF.reserved_host_memory_mb
|
||||
disk_reserved = self._get_reserved_host_disk_gb_from_config()
|
||||
|
|
Loading…
Reference in New Issue