Implement update_provider_tree

For now, we'll just set the available ram, disk and vcpus.
This will allow allocation ratios to be set at the placement
inventory level. At the same time, driver capabilities will be
exposed through traits.

Once #1784020 is addressed, we may skip reporting shared storage
capacity.

We're also skipping CPU features for a couple of reasons:
* there are just a few features defined by os-traits, yet even
  fewer exposed by Windows APIs. One option would be to call cpuid
  to get a more exhaustive list of features but that would require
  a C Python extension or separate executable.
* we can't tell for sure which CPU features will actually be exposed
  to the guest, especially when limiting guest CPU features in order
  to support live migration in heterogeneous deployments.

NOTE(mriedem): This is a cherry-pick of the out-of-tree hyperv
driver change https://review.opendev.org/655123/. The only
modification is the os_resource_classes imports are grouped with
third party imports rather than the nova imports.

Change-Id: Ibfb879751bba038054ba7852dc49f6d9e67027d6
This commit is contained in:
Lucian Petrut
2019-04-23 04:01:35 -07:00
committed by Matt Riedemann
parent 91647a9b71
commit 544738d97b
4 changed files with 105 additions and 0 deletions

View File

@@ -486,3 +486,17 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
mock.sentinel.all_instances)
self.driver._imagecache.update.assert_called_once_with(
mock.sentinel.context, mock.sentinel.all_instances)
@mock.patch.object(driver.HyperVDriver, '_get_allocation_ratios')
def test_update_provider_tree(self, mock_get_alloc_ratios):
mock_ptree = mock.Mock()
mock_inventory = mock_ptree.data.return_value.inventory
self.driver.update_provider_tree(
mock_ptree, mock.sentinel.nodename, mock.sentinel.allocations)
mock_get_alloc_ratios.assert_called_once_with(mock_inventory)
self.driver._hostops.update_provider_tree.assert_called_once_with(
mock_ptree, mock.sentinel.nodename,
mock_get_alloc_ratios.return_value,
mock.sentinel.allocations)

View File

@@ -16,6 +16,7 @@
import datetime
import mock
import os_resource_classes as orc
from os_win import constants as os_win_const
from oslo_config import cfg
from oslo_serialization import jsonutils
@@ -268,3 +269,48 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
str(mock_time()), str(tdelta))
self.assertEqual(expected, response)
@mock.patch.object(hostops.HostOps, 'get_available_resource')
def test_update_provider_tree(self, mock_get_avail_res):
resources = mock.MagicMock()
allocation_ratios = mock.MagicMock()
provider_tree = mock.Mock()
mock_get_avail_res.return_value = resources
self.flags(reserved_host_disk_mb=1)
exp_inventory = {
orc.VCPU: {
'total': resources['vcpus'],
'min_unit': 1,
'max_unit': resources['vcpus'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.VCPU],
'reserved': CONF.reserved_host_cpus,
},
orc.MEMORY_MB: {
'total': resources['memory_mb'],
'min_unit': 1,
'max_unit': resources['memory_mb'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
orc.DISK_GB: {
'total': resources['local_gb'],
'min_unit': 1,
'max_unit': resources['local_gb'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.DISK_GB],
'reserved': 1,
},
}
self._hostops.update_provider_tree(
provider_tree, mock.sentinel.node_name, allocation_ratios,
mock.sentinel.allocations)
provider_tree.update_inventory.assert_called_once_with(
mock.sentinel.node_name,
exp_inventory)

View File

@@ -369,3 +369,10 @@ class HyperVDriver(driver.ComputeDriver):
def unrescue(self, instance, network_info):
self._vmops.unrescue_instance(instance)
def update_provider_tree(self, provider_tree, nodename, allocations=None):
inventory = provider_tree.data(nodename).inventory
alloc_ratios = self._get_allocation_ratios(inventory)
self._hostops.update_provider_tree(
provider_tree, nodename, alloc_ratios, allocations)

View File

@@ -20,12 +20,14 @@ import datetime
import platform
import time
import os_resource_classes as orc
from os_win import constants as os_win_const
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.compute import utils as compute_utils
import nova.conf
from nova.i18n import _
from nova import objects
@@ -251,3 +253,39 @@ class HostOps(object):
return "%s up %s, 0 users, load average: 0, 0, 0" % (
str(time.strftime("%H:%M:%S")),
str(datetime.timedelta(milliseconds=int(tick_count64))))
def update_provider_tree(self, provider_tree, nodename,
allocation_ratios, allocations=None):
resources = self.get_available_resource()
inventory = {
orc.VCPU: {
'total': resources['vcpus'],
'min_unit': 1,
'max_unit': resources['vcpus'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.VCPU],
'reserved': CONF.reserved_host_cpus,
},
orc.MEMORY_MB: {
'total': resources['memory_mb'],
'min_unit': 1,
'max_unit': resources['memory_mb'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
# TODO(lpetrut): once #1784020 is fixed, we can skip reporting
# shared storage capacity
orc.DISK_GB: {
'total': resources['local_gb'],
'min_unit': 1,
'max_unit': resources['local_gb'],
'step_size': 1,
'allocation_ratio': allocation_ratios[orc.DISK_GB],
'reserved': compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb),
},
}
provider_tree.update_inventory(nodename, inventory)