From c102600d59ed6cd8751d8012c138cd318866910a Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Wed, 26 Apr 2017 15:50:49 +0200 Subject: [PATCH] Add reserved_host_cpus option When discussing for the blueprint placement-claims we had a consensus on using reserved values for helping operators to provide overhead limits for all the instances. Since we only had 2 opts for RAM and disk, we missed a possible CPU one. This patch is adding that one. Change-Id: Ic3162f0ab39a703798b4a2a7e860fed628da1e7e Partially-Implements: blueprint placement-claims --- nova/compute/resource_tracker.py | 2 +- nova/conf/compute.py | 14 ++++++++++++++ nova/scheduler/client/report.py | 2 +- nova/tests/unit/compute/test_resource_tracker.py | 10 ++++++---- nova/tests/unit/scheduler/client/test_report.py | 9 +++++---- .../notes/reserved_host_cpus-e7de4aa9b89bd947.yaml | 6 ++++++ 6 files changed, 33 insertions(+), 10 deletions(-) create mode 100644 releasenotes/notes/reserved_host_cpus-e7de4aa9b89bd947.yaml diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 4fdefa3c7f60..a753488a464a 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -982,7 +982,7 @@ class ResourceTracker(object): # set some initial values, reserve room for host/hypervisor: cn.local_gb_used = CONF.reserved_host_disk_mb / 1024 cn.memory_mb_used = CONF.reserved_host_memory_mb - cn.vcpus_used = 0 + cn.vcpus_used = CONF.reserved_host_cpus cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used) cn.free_disk_gb = (cn.local_gb - cn.local_gb_used) cn.current_workload = 0 diff --git a/nova/conf/compute.py b/nova/conf/compute.py index 91d6a74a7aff..c13851eb082b 100644 --- a/nova/conf/compute.py +++ b/nova/conf/compute.py @@ -393,6 +393,20 @@ Possible values: * Any positive integer representing amount of memory in MB to reserve for the host. +"""), + cfg.IntOpt('reserved_host_cpus', + default=0, + min=0, + help=""" +Number of physical CPUs to reserve for the host. The host resources usage is +reported back to the scheduler continuously from nova-compute running on the +compute node. To prevent the host CPU from being considered as available, +this option is used to reserve random pCPU(s) for the host. + +Possible values: + +* Any positive integer representing number of physical CPUs to reserve + for the host. """), ] diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index 7b45d4dd0dff..99b5f1d2996c 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -108,7 +108,7 @@ def _compute_node_to_inventory_dict(compute_node): if compute_node.vcpus > 0: result[VCPU] = { 'total': compute_node.vcpus, - 'reserved': 0, + 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index 5be3f99a53c3..b5fdd02b7388 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -453,7 +453,8 @@ class BaseTestCase(test.NoDBTestCase): self.rt = None self.flags(my_ip='1.1.1.1', reserved_host_disk_mb=0, - reserved_host_memory_mb=0) + reserved_host_memory_mb=0, + reserved_host_cpus=0) def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES, estimate_overhead=overhead_zero): @@ -564,11 +565,12 @@ class TestUpdateAvailableResources(BaseTestCase): @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename') @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') @mock.patch('nova.objects.InstanceList.get_by_host_and_node') - def test_no_instances_no_migrations_reserved_disk_and_ram( + def test_no_instances_no_migrations_reserved_disk_ram_and_cpu( self, get_mock, migr_mock, get_cn_mock, pci_mock, instance_pci_mock): self.flags(reserved_host_disk_mb=1024, - reserved_host_memory_mb=512) + reserved_host_memory_mb=512, + reserved_host_cpus=1) self._setup_rt() get_mock.return_value = [] @@ -585,7 +587,7 @@ class TestUpdateAvailableResources(BaseTestCase): 'local_gb': 6, 'free_ram_mb': 0, # 512MB avail - 512MB reserved 'memory_mb_used': 512, # 0MB used + 512MB reserved - 'vcpus_used': 0, + 'vcpus_used': 1, 'local_gb_used': 1, # 0GB used + 1 GB reserved 'memory_mb': 512, 'current_workload': 0, diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index ab9e0a388038..48eb149196e7 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -592,13 +592,14 @@ class TestComputeNodeToInventoryDict(test.NoDBTestCase): self.flags(reserved_host_memory_mb=1000) self.flags(reserved_host_disk_mb=200) + self.flags(reserved_host_cpus=1) result = report._compute_node_to_inventory_dict(compute_node) expected = { 'VCPU': { 'total': compute_node.vcpus, - 'reserved': 0, + 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, @@ -652,7 +653,7 @@ class TestInventory(SchedulerReportClientTestCase): expected_inv_data = { 'VCPU': { 'total': 8, - 'reserved': 0, + 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, @@ -897,7 +898,7 @@ There was a conflict when trying to complete your request. 'inventories': { 'VCPU': { 'total': 8, - 'reserved': 0, + 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, @@ -937,7 +938,7 @@ There was a conflict when trying to complete your request. 'inventories': { 'VCPU': { 'total': 8, - 'reserved': 0, + 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, diff --git a/releasenotes/notes/reserved_host_cpus-e7de4aa9b89bd947.yaml b/releasenotes/notes/reserved_host_cpus-e7de4aa9b89bd947.yaml new file mode 100644 index 000000000000..fb530955810a --- /dev/null +++ b/releasenotes/notes/reserved_host_cpus-e7de4aa9b89bd947.yaml @@ -0,0 +1,6 @@ +--- +features: + - A new configuration option ``reserved_host_cpus`` has been added for + compute services. It helps operators to provide how many physical CPUs they + would like to reserve for the hypervisor separately from what the instances + use.