Update ComputeNode values with disk allocation ratios in the RT

Now that we have added the field for persisting the disk alloc ratio, we can
have the ResouceTracker persisting it by adding it to the local ComputeNode
object which is persisted by calling the _update() method.
It will then send by default 0.0 unless the operator explicitely specified an
allocation ratio in the compute nova.conf.

Thanks to the ComputeNode object hydratation on the scheduler side, the facade
will make sure that if a default 0.0 is provided by either a compute node or
by the scheduler's nova.conf, it will actually get the original allocation
ratios (ie. 1.0 for disk)
Since the Scheduler reads the same RT opt but goes thru the ComputeNode object,
it will also get the Facade returning 1.0 unless the operator
explicitely provided other ratios for the scheduler's nova.conf

Amending the release note now that the behaviour is changing.

DocImpact Disk alloc ratio is now per computenode
UpgradeImpact

Change-Id: Ief6fa32429d58b80e70029ed67c7f42e0bdc986d
Implements: blueprint disk-allocation-ratio-to-rt
This commit is contained in:
Sylvain Bauza 2016-02-09 17:35:47 +01:00 committed by John Garbutt
parent 1d92ef40fd
commit ad6654eaa7
3 changed files with 34 additions and 3 deletions

View File

@ -82,7 +82,7 @@ allocation_ratio_opts = [
'set on the scheduler node(s) will be used '
'and defaulted to 1.5'),
cfg.FloatOpt('disk_allocation_ratio',
default=1.0,
default=0.0,
help='This is the virtual disk to physical disk allocation ratio used '
'by the disk_filter.py script to determine if a host has '
'sufficient disk space to fit a requested instance. A ratio '
@ -92,7 +92,10 @@ allocation_ratio_opts = [
'use the entire virtual disk,such as sparse or compressed '
'images. It can be set to a value between 0.0 and 1.0 in order '
'to preserve a percentage of the disk for uses other than '
'instances'),
'instances.'
'NOTE: This can be set per-compute, or if set to 0.0, the value '
'set on the scheduler node(s) will be used '
'and defaulted to 1.0'),
]
@ -151,6 +154,7 @@ class ResourceTracker(object):
self.scheduler_client = scheduler_client.SchedulerClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
@ -445,6 +449,7 @@ class ResourceTracker(object):
# update the allocation ratios for the related ComputeNode object
self.compute_node.ram_allocation_ratio = self.ram_allocation_ratio
self.compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
self.compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
self.compute_node.update_from_virt_driver(resources)

View File

@ -982,6 +982,7 @@ class TestInitComputeNode(BaseTestCase):
get_mock.side_effect = exc.NotFound
cpu_alloc_ratio = 1.0
ram_alloc_ratio = 1.0
disk_alloc_ratio = 1.0
resources = {
'host_ip': '1.1.1.1',
@ -1024,11 +1025,13 @@ class TestInitComputeNode(BaseTestCase):
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
disk_allocation_ratio=disk_alloc_ratio,
)
# Forcing the flags to the values we know
self.rt.ram_allocation_ratio = ram_alloc_ratio
self.rt.cpu_allocation_ratio = cpu_alloc_ratio
self.rt.disk_allocation_ratio = disk_alloc_ratio
self.rt._init_compute_node(mock.sentinel.ctx, resources)
@ -1040,7 +1043,8 @@ class TestInitComputeNode(BaseTestCase):
self.rt.compute_node))
def test_copy_resources_adds_allocation_ratios(self):
self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0)
self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0,
disk_allocation_ratio=2.0)
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
@ -1050,6 +1054,7 @@ class TestInitComputeNode(BaseTestCase):
self.rt._copy_resources(resources)
self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio)
self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio)
self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio)
class TestUpdateComputeNode(BaseTestCase):
@ -1082,6 +1087,7 @@ class TestUpdateComputeNode(BaseTestCase):
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
@ -1129,6 +1135,7 @@ class TestUpdateComputeNode(BaseTestCase):
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)

View File

@ -0,0 +1,19 @@
---
feature:
- On Mitaka compute nodes, if you want to modify the default disk allocation
ratio of 1.0, you should set that on every compute node, rather than
setting it in the scheduler. This means the disk, RAM and CPU allocation
ratios now all work in the same way.
upgrade:
- For Liberty compute nodes, the disk_allocation_ratio works as before, you
must set it on the scheduler if you want to change it.
For Mitaka compute nodes, the disk_allocation_ratio set on the compute
nodes will be used only if the configuration is not set on the scheduler.
This is to allow, for backwards compatibility, the ability to still
override the disk allocation ratio by setting the configuration on the
scheduler node.
In Newton, we plan to remove the ability to set the disk allocation ratio
on the scheduler, at which point the compute nodes will always define the
disk allocation ratio, and pass that up to the scheduler. None of this
changes the default disk allocation ratio of 1.0. This matches the
behaviour of the RAM and CPU allocation ratios.