Add reserved_host_cpus option
When discussing for the blueprint placement-claims we had a consensus on using reserved values for helping operators to provide overhead limits for all the instances. Since we only had 2 opts for RAM and disk, we missed a possible CPU one. This patch is adding that one. Change-Id: Ic3162f0ab39a703798b4a2a7e860fed628da1e7e Partially-Implements: blueprint placement-claims
This commit is contained in:
parent
7e383b68d3
commit
c102600d59
@ -982,7 +982,7 @@ class ResourceTracker(object):
|
||||
# set some initial values, reserve room for host/hypervisor:
|
||||
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
|
||||
cn.memory_mb_used = CONF.reserved_host_memory_mb
|
||||
cn.vcpus_used = 0
|
||||
cn.vcpus_used = CONF.reserved_host_cpus
|
||||
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
|
||||
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
|
||||
cn.current_workload = 0
|
||||
|
@ -393,6 +393,20 @@ Possible values:
|
||||
|
||||
* Any positive integer representing amount of memory in MB to reserve
|
||||
for the host.
|
||||
"""),
|
||||
cfg.IntOpt('reserved_host_cpus',
|
||||
default=0,
|
||||
min=0,
|
||||
help="""
|
||||
Number of physical CPUs to reserve for the host. The host resources usage is
|
||||
reported back to the scheduler continuously from nova-compute running on the
|
||||
compute node. To prevent the host CPU from being considered as available,
|
||||
this option is used to reserve random pCPU(s) for the host.
|
||||
|
||||
Possible values:
|
||||
|
||||
* Any positive integer representing number of physical CPUs to reserve
|
||||
for the host.
|
||||
"""),
|
||||
]
|
||||
|
||||
|
@ -108,7 +108,7 @@ def _compute_node_to_inventory_dict(compute_node):
|
||||
if compute_node.vcpus > 0:
|
||||
result[VCPU] = {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': 0,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
|
@ -453,7 +453,8 @@ class BaseTestCase(test.NoDBTestCase):
|
||||
self.rt = None
|
||||
self.flags(my_ip='1.1.1.1',
|
||||
reserved_host_disk_mb=0,
|
||||
reserved_host_memory_mb=0)
|
||||
reserved_host_memory_mb=0,
|
||||
reserved_host_cpus=0)
|
||||
|
||||
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
|
||||
estimate_overhead=overhead_zero):
|
||||
@ -564,11 +565,12 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_no_instances_no_migrations_reserved_disk_and_ram(
|
||||
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
|
||||
self, get_mock, migr_mock, get_cn_mock, pci_mock,
|
||||
instance_pci_mock):
|
||||
self.flags(reserved_host_disk_mb=1024,
|
||||
reserved_host_memory_mb=512)
|
||||
reserved_host_memory_mb=512,
|
||||
reserved_host_cpus=1)
|
||||
self._setup_rt()
|
||||
|
||||
get_mock.return_value = []
|
||||
@ -585,7 +587,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
'local_gb': 6,
|
||||
'free_ram_mb': 0, # 512MB avail - 512MB reserved
|
||||
'memory_mb_used': 512, # 0MB used + 512MB reserved
|
||||
'vcpus_used': 0,
|
||||
'vcpus_used': 1,
|
||||
'local_gb_used': 1, # 0GB used + 1 GB reserved
|
||||
'memory_mb': 512,
|
||||
'current_workload': 0,
|
||||
|
@ -592,13 +592,14 @@ class TestComputeNodeToInventoryDict(test.NoDBTestCase):
|
||||
|
||||
self.flags(reserved_host_memory_mb=1000)
|
||||
self.flags(reserved_host_disk_mb=200)
|
||||
self.flags(reserved_host_cpus=1)
|
||||
|
||||
result = report._compute_node_to_inventory_dict(compute_node)
|
||||
|
||||
expected = {
|
||||
'VCPU': {
|
||||
'total': compute_node.vcpus,
|
||||
'reserved': 0,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
@ -652,7 +653,7 @@ class TestInventory(SchedulerReportClientTestCase):
|
||||
expected_inv_data = {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': 0,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': 8,
|
||||
'step_size': 1,
|
||||
@ -897,7 +898,7 @@ There was a conflict when trying to complete your request.
|
||||
'inventories': {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': 0,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
@ -937,7 +938,7 @@ There was a conflict when trying to complete your request.
|
||||
'inventories': {
|
||||
'VCPU': {
|
||||
'total': 8,
|
||||
'reserved': 0,
|
||||
'reserved': CONF.reserved_host_cpus,
|
||||
'min_unit': 1,
|
||||
'max_unit': compute_node.vcpus,
|
||||
'step_size': 1,
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- A new configuration option ``reserved_host_cpus`` has been added for
|
||||
compute services. It helps operators to provide how many physical CPUs they
|
||||
would like to reserve for the hypervisor separately from what the instances
|
||||
use.
|
Loading…
Reference in New Issue
Block a user