virt: add get_inventory() virt driver API method

Adds a new get_inventory() method to the virt driver API for returning a
dict of inventory records in a format that the placement API
understands.

We also move the ComputeNode.save() call out of the scheduler reporting
client and into the resource tracker. The resource tracker's _update()
method now attempts to call the new get_inventory() virt driver method
and falls back on the existing update_resource_stats() (renamed to
update_compute_node() in this patch) method when get_inventory() is not
implemented.

The next patch implements get_inventory() for the Ironic virt driver.

Change-Id: I921daea7f6d5776b19561f0ca457e604a372eb9e
blueprint: custom-resource-classes-pike
This commit is contained in:
Jay Pipes 2017-03-03 17:34:42 -05:00
parent 841a9b9251
commit fbc5a67de9
9 changed files with 535 additions and 63 deletions

View File

@ -453,7 +453,7 @@ class ResourceTracker(object):
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self.scheduler_client.update_resource_stats(cn)
self._update(context, cn)
return
# now try to get the compute node record from the
@ -463,7 +463,7 @@ class ResourceTracker(object):
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self.scheduler_client.update_resource_stats(cn)
self._update(context, cn)
return
# there was no local copy and none in the database
@ -479,7 +479,7 @@ class ResourceTracker(object):
{'host': self.host, 'node': nodename})
self._setup_pci_tracker(context, cn, resources)
self.scheduler_client.update_resource_stats(cn)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
@ -741,8 +741,22 @@ class ResourceTracker(object):
"""Update partial stats locally and populate them to Scheduler."""
if not self._resource_change(compute_node):
return
nodename = compute_node.hypervisor_hostname
compute_node.save()
# Persist the stats to the Scheduler
self.scheduler_client.update_resource_stats(compute_node)
try:
inv_data = self.driver.get_inventory(nodename)
self.scheduler_client.set_inventory_for_provider(
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(compute_node)
if self.pci_tracker:
self.pci_tracker.save(context)

View File

@ -2054,6 +2054,10 @@ class InventoryWithResourceClassNotFound(NotFound):
msg_fmt = _("No inventory of class %(resource_class)s found.")
class InvalidResourceClass(Invalid):
msg_fmt = _("Resource class '%(resource_class)s' invalid.")
class ResourceClassExists(NovaException):
msg_fmt = _("Resource class %(resource_class)s already exists.")

View File

@ -56,8 +56,15 @@ class SchedulerClient(object):
def delete_aggregate(self, context, aggregate):
self.queryclient.delete_aggregate(context, aggregate)
def update_resource_stats(self, compute_node):
self.reportclient.update_resource_stats(compute_node)
def set_inventory_for_provider(self, rp_uuid, rp_name, inv_data):
self.reportclient.set_inventory_for_provider(
rp_uuid,
rp_name,
inv_data,
)
def update_compute_node(self, compute_node):
self.reportclient.update_compute_node(compute_node)
def update_instance_info(self, context, host_name, instance_info):
self.queryclient.update_instance_info(context, host_name,

View File

@ -215,14 +215,22 @@ class SchedulerReportClient(object):
url,
endpoint_filter=self.ks_filter, raise_exc=False, **kwargs)
def post(self, url, data):
def post(self, url, data, version=None):
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
kwargs = {}
if version is not None:
# TODO(mriedem): Perform some version discovery at some point.
kwargs = {
'headers': {
'OpenStack-API-Version': 'placement %s' % version
},
}
return self._client.post(
url, json=data,
endpoint_filter=self.ks_filter, raise_exc=False)
endpoint_filter=self.ks_filter, raise_exc=False, **kwargs)
def put(self, url, data):
# NOTE(sdague): using json= instead of data= sets the
@ -664,7 +672,96 @@ class SchedulerReportClient(object):
}
LOG.error(msg, msg_args)
def update_resource_stats(self, compute_node):
def set_inventory_for_provider(self, rp_uuid, rp_name, inv_data):
"""Given the UUID of a provider, set the inventory records for the
provider to the supplied dict of resources.
:param rp_uuid: UUID of the resource provider to set inventory for
:param rp_name: Name of the resource provider in case we need to create
a record for it in the placement API
:param inv_data: Dict, keyed by resource class name, of inventory data
to set against the provider
:raises: exc.InvalidResourceClass if a supplied custom resource class
name does not meet the placement API's format requirements.
"""
self._ensure_resource_provider(rp_uuid, rp_name)
new_inv = {}
for rc_name, inv in inv_data.items():
if rc_name not in fields.ResourceClass.STANDARD:
# Auto-create custom resource classes coming from a virt driver
self._get_or_create_resource_class(rc_name)
new_inv[rc_name] = inv
if new_inv:
self._update_inventory(rp_uuid, new_inv)
else:
self._delete_inventory(rp_uuid)
@safe_connect
def _get_or_create_resource_class(self, name):
"""Queries the placement API for a resource class supplied resource
class string name. If the resource class does not exist, creates it.
Returns the resource class name if exists or was created, else None.
:param name: String name of the resource class to check/create.
"""
resp = self.get("/resource_classes/%s" % name, version="1.2")
if 200 <= resp.status_code < 300:
return name
elif resp.status_code == 404:
self._create_resource_class(name)
return name
else:
msg = _LE("Failed to retrieve resource class record from "
"placement API for resource class %(rc_name)s. "
"Got %(status_code)d: %(err_text)s.")
args = {
'rc_name': name,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
return None
def _create_resource_class(self, name):
"""Calls the placement API to create a new resource class.
:param name: String name of the resource class to create.
:returns: None on successful creation.
:raises: `exception.InvalidResourceClass` upon error.
"""
url = "/resource_classes"
payload = {
'name': name,
}
resp = self.post(url, payload, version="1.2")
if 200 <= resp.status_code < 300:
msg = _LI("Created resource class record via placement API "
"for resource class %s.")
LOG.info(msg, name)
elif resp.status_code == 409:
# Another thread concurrently created a resource class with the
# same name. Log a warning and then just return
msg = _LI("Another thread already created a resource class "
"with the name %s. Returning.")
LOG.info(msg, name)
else:
msg = _LE("Failed to create resource class %(resource_class)s in "
"placement API. Got %(status_code)d: %(err_text)s.")
args = {
'resource_class': name,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
raise exception.InvalidResourceClass(resource_class=name)
def update_compute_node(self, compute_node):
"""Creates or updates stats for the supplied compute node.
:param compute_node: updated nova.objects.ComputeNode to report
@ -673,7 +770,6 @@ class SchedulerReportClient(object):
resource classes that would be deleted by an update to the
placement API.
"""
compute_node.save()
self._ensure_resource_provider(compute_node.uuid,
compute_node.hypervisor_hostname)
inv_data = _compute_node_to_inventory_dict(compute_node)

View File

@ -102,7 +102,7 @@ class SchedulerReportClientTests(test.TestCase):
self.assertIsNone(rp)
# Now let's update status for our compute node.
self.client.update_resource_stats(self.compute_node)
self.client.update_compute_node(self.compute_node)
# So now we have a resource provider
rp = self.client._get_resource_provider(self.compute_uuid)
@ -156,7 +156,7 @@ class SchedulerReportClientTests(test.TestCase):
self.compute_node.vcpus = 0
self.compute_node.memory_mb = 0
self.compute_node.local_gb = 0
self.client.update_resource_stats(self.compute_node)
self.client.update_compute_node(self.compute_node)
# Check there's no more inventory records
resp = self.client.get(inventory_url)

View File

@ -56,6 +56,7 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
@ -423,6 +424,7 @@ def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.get_inventory.side_effect = NotImplementedError
vd.get_host_ip_addr.return_value = _NODENAME
vd.estimate_instance_overhead.side_effect = estimate_overhead
@ -488,18 +490,21 @@ class TestUpdateAvailableResources(BaseTestCase):
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
# This will call _init_compute_node() and create a ComputeNode object
# and will also call through to InstanceList.get_by_host_and_node()
# because the node is available.
self._update_available_resources()
self.assertTrue(update_mock.called)
self.assertTrue(get_mock.called)
update_mock.reset_mock()
get_mock.reset_mock()
# OK, now simulate a node being disabled by the Ironic virt driver.
vd = self.driver_mock
vd.node_is_available.return_value = False
update_mock = self._update_available_resources()
self._update_available_resources()
self.assertFalse(update_mock.called)
self.assertFalse(get_mock.called)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@ -959,7 +964,9 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock, service_mock,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
create_mock, pci_mock):
self._setup_rt()
@ -973,13 +980,15 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertTrue(pci_mock.called)
self.assertTrue(self.sched_client_mock.update_resource_stats.called)
self.assertTrue(update_mock.called)
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, create_mock,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
@ -995,14 +1004,16 @@ class TestInitComputeNode(BaseTestCase):
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
self.assertFalse(create_mock.called)
self.assertTrue(self.sched_client_mock.update_resource_stats.called)
self.assertTrue(update_mock.called)
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList(objects=[]))
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, create_mock,
pci_tracker_mock):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
create_mock, pci_tracker_mock):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
self._setup_rt()
@ -1073,17 +1084,17 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
pci_tracker_mock.assert_called_once_with(mock.sentinel.ctx,
42)
self.assertTrue(self.sched_client_mock.update_resource_stats.called)
self.assertTrue(update_mock.called)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_same_resources(self, save_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# are checking below to see that update_compute_node() is not
# needlessly called when the resources don't actually change.
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
@ -1095,12 +1106,13 @@ class TestUpdateComputeNode(BaseTestCase):
# are already stored in the resource tracker, that the scheduler client
# won't be called again to update those (unchanged) resources for the
# compute node
urs_mock = self.sched_client_mock.update_resource_stats
ucn_mock = self.sched_client_mock.update_compute_node
self.rt._update(mock.sentinel.ctx, new_compute)
self.assertFalse(urs_mock.called)
self.assertFalse(ucn_mock.called)
self.assertFalse(save_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_diff_updated_at(self, service_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_diff_updated_at(self, save_mock):
self._setup_rt()
ts1 = timeutils.utcnow()
ts2 = ts1 + datetime.timedelta(seconds=10)
@ -1115,12 +1127,13 @@ class TestUpdateComputeNode(BaseTestCase):
new_compute = orig_compute.obj_clone()
new_compute.updated_at = ts2
urs_mock = self.sched_client_mock.update_resource_stats
ucn_mock = self.sched_client_mock.update_compute_node
self.rt._update(mock.sentinel.ctx, new_compute)
self.assertFalse(urs_mock.called)
self.assertFalse(save_mock.called)
self.assertFalse(ucn_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_new_resources(self, save_mock):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
@ -1136,9 +1149,44 @@ class TestUpdateComputeNode(BaseTestCase):
new_compute.vcpus_used = 2
new_compute.local_gb_used = 4
urs_mock = self.sched_client_mock.update_resource_stats
ucn_mock = self.sched_client_mock.update_compute_node
self.rt._update(mock.sentinel.ctx, new_compute)
urs_mock.assert_called_once_with(new_compute)
save_mock.assert_called_once_with()
ucn_mock.assert_called_once_with(new_compute)
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_node_get_inventory_implemented(self, save_mock):
"""The get_inventory() virt driver method is only implemented for some
virt drivers. This method returns inventory information for a
node/provider in a way that the placement API better understands, and
if this method doesn't raise a NotImplementedError, this triggers
_update() to call the set_inventory_for_provider() method of the
reporting client instead of the update_compute_node() method.
"""
self._setup_rt()
# Emulate a driver that has implemented the new get_inventory() virt
# driver method
self.driver_mock.get_inventory.side_effect = [mock.sentinel.inv_data]
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Deliberately changing local_gb to trigger updating inventory
new_compute = orig_compute.obj_clone()
new_compute.local_gb = 210000
ucn_mock = self.sched_client_mock.update_compute_node
sifp_mock = self.sched_client_mock.set_inventory_for_provider
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
sifp_mock.assert_called_once_with(
new_compute.uuid,
new_compute.hypervisor_hostname,
mock.sentinel.inv_data,
)
self.assertFalse(ucn_mock.called)
class TestInstanceClaim(BaseTestCase):
@ -1380,7 +1428,8 @@ class TestInstanceClaim(BaseTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort_context_manager(self, migr_mock, pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_abort_context_manager(self, save_mock, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
cn = self.rt.compute_nodes[_NODENAME]
@ -1419,7 +1468,8 @@ class TestInstanceClaim(BaseTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort(self, migr_mock, pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_abort(self, save_mock, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@ -1459,7 +1509,8 @@ class TestInstanceClaim(BaseTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_limits(self, save_mock, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
@ -1477,7 +1528,8 @@ class TestInstanceClaim(BaseTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_numa(self, save_mock, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
cn = self.rt.compute_nodes[_NODENAME]
@ -1508,8 +1560,9 @@ class TestResize(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_resize_claim_same_host(self, get_mock, migr_mock, get_cn_mock,
pci_mock, instance_pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_resize_claim_same_host(self, save_mock, get_mock, migr_mock,
get_cn_mock, pci_mock, instance_pci_mock):
# Resize an existing instance from its current flavor (instance type
# 1) to a new flavor (instance type 2) and verify that the compute
# node's resources are appropriately updated to account for the new
@ -1609,7 +1662,9 @@ class TestResize(BaseTestCase):
return_value=[])
@mock.patch('nova.objects.InstanceList.get_by_host_and_node',
return_value=[])
@mock.patch('nova.objects.ComputeNode.save')
def _test_instance_build_resize(self,
save_mock,
get_by_host_and_node_mock,
get_in_progress_by_host_and_node_mock,
get_by_host_and_nodename_mock,
@ -1714,8 +1769,9 @@ class TestResize(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_resize_claim_dest_host_with_pci(self, get_mock, migr_mock,
get_cn_mock, pci_mock, pci_req_mock, pci_claim_mock,
@mock.patch('nova.objects.ComputeNode.save')
def test_resize_claim_dest_host_with_pci(self, save_mock, get_mock,
migr_mock, get_cn_mock, pci_mock, pci_req_mock, pci_claim_mock,
pci_dev_save_mock, pci_supports_mock):
# Starting from an empty destination compute node, perform a resize
# operation for an instance containing SR-IOV PCI devices on the
@ -1850,8 +1906,9 @@ class TestResize(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_resize_claim_two_instances(self, get_mock, migr_mock, get_cn_mock,
pci_mock, instance_pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_resize_claim_two_instances(self, save_mock, get_mock, migr_mock,
get_cn_mock, pci_mock, instance_pci_mock):
# Issue two resize claims against a destination host with no prior
# instances on it and validate that the accounting for resources is
# correct.
@ -1973,8 +2030,9 @@ class TestRebuild(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_rebuild_claim(self, get_mock, migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
@mock.patch('nova.objects.ComputeNode.save')
def test_rebuild_claim(self, save_mock, get_mock, migr_mock, get_cn_mock,
pci_mock, instance_pci_mock):
# Rebuild an instance, emulating an evacuate command issued against the
# original instance. The rebuild operation uses the resource tracker's
# _move_claim() method, but unlike with resize_claim(), rebuild_claim()

View File

@ -649,12 +649,9 @@ class TestInventory(SchedulerReportClientTestCase):
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.objects.ComputeNode.save')
def test_update_resource_stats(self, mock_save, mock_ui, mock_delete,
mock_erp):
def test_update_compute_node(self, mock_ui, mock_delete, mock_erp):
cn = self.compute_node
self.client.update_resource_stats(cn)
mock_save.assert_called_once_with()
self.client.update_compute_node(cn)
mock_erp.assert_called_once_with(cn.uuid, cn.hypervisor_hostname)
expected_inv_data = {
'VCPU': {
@ -694,9 +691,8 @@ class TestInventory(SchedulerReportClientTestCase):
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.objects.ComputeNode.save')
def test_update_resource_stats_no_inv(self, mock_save, mock_ui,
mock_delete, mock_erp):
def test_update_compute_node_no_inv(self, mock_ui, mock_delete,
mock_erp):
"""Ensure that if there are no inventory records, that we call
_delete_inventory() instead of _update_inventory().
"""
@ -704,8 +700,7 @@ class TestInventory(SchedulerReportClientTestCase):
cn.vcpus = 0
cn.memory_mb = 0
cn.local_gb = 0
self.client.update_resource_stats(cn)
mock_save.assert_called_once_with()
self.client.update_compute_node(cn)
mock_erp.assert_called_once_with(cn.uuid, cn.hypervisor_hostname)
mock_delete.assert_called_once_with(cn.uuid)
self.assertFalse(mock_ui.called)
@ -1174,6 +1169,190 @@ There was a conflict when trying to complete your request.
# Slept three times
mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_or_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_no_custom(self, mock_erp, mock_gocr,
mock_del, mock_upd):
"""Tests that inventory records of all standard resource classes are
passed to the report client's _update_inventory() method.
"""
inv_data = {
'VCPU': {
'total': 24,
'reserved': 0,
'min_unit': 1,
'max_unit': 24,
'step_size': 1,
'allocation_ratio': 1.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 0,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
'allocation_ratio': 1.0,
},
'DISK_GB': {
'total': 100,
'reserved': 0,
'min_unit': 1,
'max_unit': 100,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.client.set_inventory_for_provider(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
)
# No custom resource classes to ensure...
self.assertFalse(mock_gocr.called)
mock_upd.assert_called_once_with(
mock.sentinel.rp_uuid,
inv_data,
)
self.assertFalse(mock_del.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_or_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_bad_custom(self, mock_erp, mock_gocr,
mock_del, mock_upd):
"""Tests that inventory record containing a badly-formatted resource
class results in raising an appropriate exception.
"""
exc = exception.InvalidResourceClass(resource_class='BAD_FOO')
mock_gocr.side_effect = exc
inv_data = {
'BAD_FOO': {
'total': 100,
'reserved': 0,
'min_unit': 1,
'max_unit': 100,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
self.assertRaises(
exception.InvalidResourceClass,
self.client.set_inventory_for_provider,
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
self.assertFalse(mock_upd.called)
self.assertFalse(mock_del.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_or_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_no_inv(self, mock_erp, mock_gocr,
mock_del, mock_upd):
"""Tests that passing empty set of inventory records triggers a delete
of inventory for the provider.
"""
inv_data = {}
self.client.set_inventory_for_provider(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
)
self.assertFalse(mock_gocr.called)
self.assertFalse(mock_upd.called)
mock_del.assert_called_once_with(mock.sentinel.rp_uuid)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_update_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_delete_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_or_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_ensure_resource_provider')
def test_set_inventory_for_provider_with_custom(self, mock_erp,
mock_gocr, mock_del, mock_upd):
"""Tests that inventory records that include a custom resource class
are passed to the report client's _update_inventory() method and that
the custom resource class is auto-created.
"""
inv_data = {
'VCPU': {
'total': 24,
'reserved': 0,
'min_unit': 1,
'max_unit': 24,
'step_size': 1,
'allocation_ratio': 1.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 0,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
'allocation_ratio': 1.0,
},
'DISK_GB': {
'total': 100,
'reserved': 0,
'min_unit': 1,
'max_unit': 100,
'step_size': 1,
'allocation_ratio': 1.0,
},
'CUSTOM_IRON_SILVER': {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
}
}
self.client.set_inventory_for_provider(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
inv_data,
)
mock_erp.assert_called_once_with(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
)
mock_gocr.assert_called_once_with('CUSTOM_IRON_SILVER')
mock_upd.assert_called_once_with(
mock.sentinel.rp_uuid,
inv_data,
)
self.assertFalse(mock_del.called)
class TestAllocations(SchedulerReportClientTestCase):
@ -1458,3 +1637,97 @@ class TestAllocations(SchedulerReportClientTestCase):
# With a 409, only the warning should be called
self.assertEqual(0, mock_log.info.call_count)
self.assertEqual(1, mock_log.warning.call_count)
class TestResourceClass(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_or_create_existing(self, mock_get, mock_crc):
resp_mock = mock.Mock(status_code=200)
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
)
self.assertFalse(mock_crc.called)
self.assertEqual(rc_name, result)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_or_create_not_existing(self, mock_get, mock_crc):
resp_mock = mock.Mock(status_code=404)
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
)
mock_crc.assert_called_once_with(rc_name)
self.assertEqual(rc_name, result)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_class')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_or_create_bad_get(self, mock_get, mock_crc):
resp_mock = mock.Mock(status_code=500, text='server error')
mock_get.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._get_or_create_resource_class(rc_name)
mock_get.assert_called_once_with(
'/resource_classes/' + rc_name,
version="1.2",
)
self.assertFalse(mock_crc.called)
self.assertIsNone(result)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.post')
def test_create_resource_class(self, mock_post):
resp_mock = mock.Mock(status_code=201)
mock_post.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._create_resource_class(rc_name)
mock_post.assert_called_once_with(
'/resource_classes',
{'name': rc_name},
version="1.2",
)
self.assertIsNone(result)
@mock.patch('nova.scheduler.client.report.LOG.info')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.post')
def test_create_resource_class_concurrent_write(self, mock_post, mock_log):
resp_mock = mock.Mock(status_code=409)
mock_post.return_value = resp_mock
rc_name = 'CUSTOM_FOO'
result = self.client._create_resource_class(rc_name)
mock_post.assert_called_once_with(
'/resource_classes',
{'name': rc_name},
version="1.2",
)
self.assertIsNone(result)
self.assertIn('Another thread already', mock_log.call_args[0][0])
@mock.patch('nova.scheduler.client.report.LOG.error')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.post')
def test_create_resource_class_bad_name(self, mock_post, mock_log):
resp_mock = mock.Mock(status_code=400, text='errortext')
mock_post.return_value = resp_mock
rc_name = 'FOO'
self.assertRaises(
exception.InvalidResourceClass,
self.client._create_resource_class,
rc_name,
)
mock_post.assert_called_once_with(
'/resource_classes',
{'name': rc_name},
version="1.2",
)
self.assertIn('Failed to create', mock_log.call_args[0][0])

View File

@ -87,11 +87,25 @@ class SchedulerClientTestCase(test.NoDBTestCase):
'context', aggregate)
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
'update_resource_stats')
def test_update_resource_stats(self, mock_update_resource_stats):
'update_compute_node')
def test_update_compute_node(self, mock_update_compute_node):
self.assertIsNone(self.client.reportclient.instance)
self.client.update_resource_stats(mock.sentinel.cn)
self.client.update_compute_node(mock.sentinel.cn)
self.assertIsNotNone(self.client.reportclient.instance)
mock_update_resource_stats.assert_called_once_with(mock.sentinel.cn)
mock_update_compute_node.assert_called_once_with(mock.sentinel.cn)
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
'set_inventory_for_provider')
def test_set_inventory_for_provider(self, mock_set):
self.client.set_inventory_for_provider(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
mock.sentinel.inv_data,
)
mock_set.assert_called_once_with(
mock.sentinel.rp_uuid,
mock.sentinel.rp_name,
mock.sentinel.inv_data,
)

View File

@ -774,6 +774,12 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def get_inventory(self, nodename):
"""Return a dict, keyed by resource class, of inventory information for
the supplied node.
"""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.