Allow shuffling hosts with the same best weight

This patch adds a new boolean config option
`[filter_scheduler]shuffle_best_same_weighed_hosts` (default False).

Enabling it will improve scheduling for the case when host_subset_size=1
but list of weighed hosts contains many hosts with the same best weight
(quite often the case for ironic nodes).
On the other hand, enabling it will also make VM packing on hypervisors
less dense even when host weighing is completely disabled.

Change-Id: Icee137e15f264da59a1bdc1dc1ecfeaac82b98c6
Closes-Bug: #1711184
This commit is contained in:
Pavlo Shchelokovskyy 2017-08-16 08:50:49 +00:00
parent be7fb7fe7f
commit 3759f105a7
4 changed files with 77 additions and 0 deletions

View File

@ -477,6 +477,21 @@ Possible values:
for hosts with group soft anti-affinity. Only a positive value are
meaningful, as negative values would make this behave as a soft affinity
weigher.
"""),
cfg.BoolOpt(
"shuffle_best_same_weighed_hosts",
default=False,
help="""
Enable spreading the instances between hosts with the same best weight.
Enabling it is beneficial for cases when host_subset_size is 1
(default), but there is a large number of hosts with same maximal weight.
This scenario is common in Ironic deployments where there are typically many
baremetal nodes with identical weights returned to the scheduler.
In such case enabling this option will reduce contention and chances for
rescheduling events.
At the same time it will make the instance packing (even in unweighed case)
less dense.
"""),
# TODO(mikal): replace this option with something involving host aggregates
cfg.ListOpt("isolated_images",

View File

@ -302,6 +302,16 @@ class FilterScheduler(driver.Scheduler):
weighed_hosts = self.host_manager.get_weighed_hosts(filtered_hosts,
spec_obj)
if CONF.filter_scheduler.shuffle_best_same_weighed_hosts:
# NOTE(pas-ha) Randomize best hosts, relying on weighed_hosts
# being already sorted by weight in descending order.
# This decreases possible contention and rescheduling attempts
# when there is a large number of hosts having the same best
# weight, especially so when host_subset_size is 1 (default)
best_hosts = [w for w in weighed_hosts
if w.weight == weighed_hosts[0].weight]
random.shuffle(best_hosts)
weighed_hosts = best_hosts + weighed_hosts[len(best_hosts):]
# Strip off the WeighedHost wrapper class...
weighed_hosts = [h.obj for h in weighed_hosts]

View File

@ -461,6 +461,43 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# weighed hosts and thus return [hs1, hs2]
self.assertEqual([hs1, hs2], results)
@mock.patch('random.shuffle', side_effect=lambda x: x.reverse())
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_shuffle_top_equal(self, mock_filt, mock_weighed,
mock_shuffle):
"""Tests that top best weighed hosts are shuffled when enabled.
"""
self.flags(host_subset_size=1, group='filter_scheduler')
self.flags(shuffle_best_same_weighed_hosts=True,
group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1')
hs2 = mock.Mock(spec=host_manager.HostState, host='host2')
hs3 = mock.Mock(spec=host_manager.HostState, host='host3')
hs4 = mock.Mock(spec=host_manager.HostState, host='host4')
all_host_states = [hs1, hs2, hs3, hs4]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0),
weights.WeighedHost(hs2, 1.0),
weights.WeighedHost(hs3, 0.5),
weights.WeighedHost(hs4, 0.5),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We override random.shuffle() to reverse the list, thus the
# head of the list should become [host#2, host#1]
# (as the host_subset_size is 1) and the tail should stay the same.
self.assertEqual([hs2, hs1, hs3, hs4], results)
def test_cleanup_allocations(self):
instance_uuids = []
# Check we don't do anything if there's no instance UUIDs to cleanup

View File

@ -0,0 +1,15 @@
---
features:
- |
Added a new boolean configuration option
``[filter_scheduler]shuffle_best_same_weighed_hosts`` (default is False).
Enabling it will spread instances between hosts that have the same weight
according to request spec. It is mostly useful when the
``[filter_scheduler]host_subset_size`` option has default value of 1,
but available hosts have the same weight (e.g. ironic nodes using resource
classes). In this case enabling it will decrease the number of
rescheduling events.
On the other hand, enabling it will make packing of VMs on hypervizors
less dence even when host weighing is disabled.