From 2588af87c862cfd02d860f6b860381e907b279ff Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 30 Aug 2018 17:57:24 -0400 Subject: [PATCH] Don't persist zero allocation ratios in ResourceTracker The ComputeNode object itself has a facade which provides the actual default values for cpu, disk and ram allocation ratios when the config option values are left at the default (0.0). When we initially create a ComputeNode in the ResourceTracker, the *_allocation_ratio values in the object are going to be unset, and then _copy_resources, called from _init_compute_node, will set the values to the config option values, again, defaulted to 0.0, but the ComputeNode object, after calling create() or save(), will change those *on the object* to the non-0.0 we actually know and love (16.0 for cpu, 1.5 for ram, and 1.0 for disk). During the update_available_resource periodic, we'll again go through _init_compute_node and _copy_resources in the ResourceTracker which will set the configured values (default of 0.0) onto the ComputeNode object, which makes the _resource_change method, called from _update, return True and trigger a ComputeNode.save() call from the _update method. At that point we're *persisting* the 0.0 allocation ratios in the database, even though ComputeNode.save will change them to their non-0.0 default values *on the object* because of the _from_db_object call at the end of ComputeNode.save. So even if the ComputeNode object allocation ratio values are the non-0.0 defaults, we'll *always* update the database on every periodic even if nothing else changed in inventory. This change modifies the _copy_resource method to only update the ComputeNode fields if the configured ratios are not the 0.0 default. Change-Id: I43a23a3290db0c835fed01b8d6a38962dc61adce Related-Bug: #1789654 --- nova/compute/resource_tracker.py | 29 ++++++++++++--- .../unit/compute/test_resource_tracker.py | 35 +++++++++++++++++++ 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 23f1bc36641b..258556c3a025 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -617,10 +617,31 @@ class ResourceTracker(object): stats.digest_stats(resources.get('stats')) compute_node.stats = stats - # update the allocation ratios for the related ComputeNode object - compute_node.ram_allocation_ratio = self.ram_allocation_ratio - compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio - compute_node.disk_allocation_ratio = self.disk_allocation_ratio + # Update the allocation ratios for the related ComputeNode object + # but only if the configured values are not the default 0.0; the + # ComputeNode._from_db_object method takes care of providing default + # allocation ratios when the config is left at the 0.0 default, so + # we'll really end up with something like a + # ComputeNode.cpu_allocation_ratio of 16.0, not 0.0. We want to avoid + # resetting the ComputeNode fields to 0.0 because that will make + # the _resource_change method think something changed when really it + # didn't. + # TODO(mriedem): Will this break any scenarios where an operator is + # trying to *reset* the allocation ratios by changing config from + # non-0.0 back to 0.0? Maybe we should only do this if the fields on + # the ComputeNode object are not already set. For example, let's say + # the cpu_allocation_ratio config was 1.0 and then the operator wants + # to get back to the default (16.0 via the facade), and to do that they + # change the config back to 0.0 (or just unset the config option). + # Should we support that or treat these config options as "sticky" in + # that once you start setting them, you can't go back to the implied + # defaults by unsetting or resetting to 0.0? Sort of like how + # per-tenant quota is sticky once you change it in the API. + for res in ('cpu', 'disk', 'ram'): + attr = '%s_allocation_ratio' % res + conf_alloc_ratio = getattr(self, attr) + if conf_alloc_ratio != 0.0: + setattr(compute_node, attr, conf_alloc_ratio) # now copy rest to compute_node compute_node.update_from_virt_driver(resources) diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py index 7dc54493d6c4..1a30bd337196 100644 --- a/nova/tests/unit/compute/test_resource_tracker.py +++ b/nova/tests/unit/compute/test_resource_tracker.py @@ -1482,6 +1482,41 @@ class TestUpdateComputeNode(BaseTestCase): self.assertRaises(exc.ComputeHostNotFound, self.rt.get_node_uuid, 'foo') + def test_copy_resources_no_update_allocation_ratios(self): + """Tests that a ComputeNode object's allocation ratio fields are + not set if the configured allocation ratio values are default 0.0. + """ + self._setup_rt() + compute = _COMPUTE_NODE_FIXTURES[0].obj_clone() + compute.obj_reset_changes() # make sure we start clean + self.rt._copy_resources( + compute, self.driver_mock.get_available_resource.return_value) + # Assert that the ComputeNode fields were not changed. + changes = compute.obj_get_changes() + for res in ('cpu', 'disk', 'ram'): + attr_name = '%s_allocation_ratio' % res + self.assertNotIn(attr_name, changes) + + def test_copy_resources_update_allocation_ratios_from_config(self): + """Tests that a ComputeNode object's allocation ratio fields are + set if the configured allocation ratio values are not 0.0. + """ + # Set explicit ratio config values to 1.0 (the default is 0.0). + for res in ('cpu', 'disk', 'ram'): + opt_name = '%s_allocation_ratio' % res + CONF.set_override(opt_name, 1.0) + self._setup_rt() + compute = _COMPUTE_NODE_FIXTURES[0].obj_clone() + compute.obj_reset_changes() # make sure we start clean + self.rt._copy_resources( + compute, self.driver_mock.get_available_resource.return_value) + # Assert that the ComputeNode fields were changed. + changes = compute.obj_get_changes() + for res in ('cpu', 'disk', 'ram'): + attr_name = '%s_allocation_ratio' % res + self.assertIn(attr_name, changes) + self.assertEqual(1.0, changes[attr_name]) + class TestNormalizatInventoryFromComputeNode(test.NoDBTestCase): def test_normalize_libvirt(self):