diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py index 6ae8d27115ef..9449b36b9479 100644 --- a/nova/tests/unit/virt/hyperv/test_driver.py +++ b/nova/tests/unit/virt/hyperv/test_driver.py @@ -486,3 +486,17 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase): mock.sentinel.all_instances) self.driver._imagecache.update.assert_called_once_with( mock.sentinel.context, mock.sentinel.all_instances) + + @mock.patch.object(driver.HyperVDriver, '_get_allocation_ratios') + def test_update_provider_tree(self, mock_get_alloc_ratios): + mock_ptree = mock.Mock() + mock_inventory = mock_ptree.data.return_value.inventory + + self.driver.update_provider_tree( + mock_ptree, mock.sentinel.nodename, mock.sentinel.allocations) + + mock_get_alloc_ratios.assert_called_once_with(mock_inventory) + self.driver._hostops.update_provider_tree.assert_called_once_with( + mock_ptree, mock.sentinel.nodename, + mock_get_alloc_ratios.return_value, + mock.sentinel.allocations) diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py index 73bb5d8955fe..520c532c95c2 100644 --- a/nova/tests/unit/virt/hyperv/test_hostops.py +++ b/nova/tests/unit/virt/hyperv/test_hostops.py @@ -16,6 +16,7 @@ import datetime import mock +import os_resource_classes as orc from os_win import constants as os_win_const from oslo_config import cfg from oslo_serialization import jsonutils @@ -268,3 +269,48 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase): str(mock_time()), str(tdelta)) self.assertEqual(expected, response) + + @mock.patch.object(hostops.HostOps, 'get_available_resource') + def test_update_provider_tree(self, mock_get_avail_res): + resources = mock.MagicMock() + allocation_ratios = mock.MagicMock() + provider_tree = mock.Mock() + + mock_get_avail_res.return_value = resources + + self.flags(reserved_host_disk_mb=1) + + exp_inventory = { + orc.VCPU: { + 'total': resources['vcpus'], + 'min_unit': 1, + 'max_unit': resources['vcpus'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.VCPU], + 'reserved': CONF.reserved_host_cpus, + }, + orc.MEMORY_MB: { + 'total': resources['memory_mb'], + 'min_unit': 1, + 'max_unit': resources['memory_mb'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], + 'reserved': CONF.reserved_host_memory_mb, + }, + orc.DISK_GB: { + 'total': resources['local_gb'], + 'min_unit': 1, + 'max_unit': resources['local_gb'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.DISK_GB], + 'reserved': 1, + }, + } + + self._hostops.update_provider_tree( + provider_tree, mock.sentinel.node_name, allocation_ratios, + mock.sentinel.allocations) + + provider_tree.update_inventory.assert_called_once_with( + mock.sentinel.node_name, + exp_inventory) diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 6c93687a7f42..7b30ca047457 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -369,3 +369,10 @@ class HyperVDriver(driver.ComputeDriver): def unrescue(self, instance, network_info): self._vmops.unrescue_instance(instance) + + def update_provider_tree(self, provider_tree, nodename, allocations=None): + inventory = provider_tree.data(nodename).inventory + alloc_ratios = self._get_allocation_ratios(inventory) + + self._hostops.update_provider_tree( + provider_tree, nodename, alloc_ratios, allocations) diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py index 0ab1c05a7b0a..9689bf5760e4 100644 --- a/nova/virt/hyperv/hostops.py +++ b/nova/virt/hyperv/hostops.py @@ -20,12 +20,14 @@ import datetime import platform import time +import os_resource_classes as orc from os_win import constants as os_win_const from os_win import utilsfactory from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import units +from nova.compute import utils as compute_utils import nova.conf from nova.i18n import _ from nova import objects @@ -251,3 +253,39 @@ class HostOps(object): return "%s up %s, 0 users, load average: 0, 0, 0" % ( str(time.strftime("%H:%M:%S")), str(datetime.timedelta(milliseconds=int(tick_count64)))) + + def update_provider_tree(self, provider_tree, nodename, + allocation_ratios, allocations=None): + resources = self.get_available_resource() + + inventory = { + orc.VCPU: { + 'total': resources['vcpus'], + 'min_unit': 1, + 'max_unit': resources['vcpus'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.VCPU], + 'reserved': CONF.reserved_host_cpus, + }, + orc.MEMORY_MB: { + 'total': resources['memory_mb'], + 'min_unit': 1, + 'max_unit': resources['memory_mb'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], + 'reserved': CONF.reserved_host_memory_mb, + }, + # TODO(lpetrut): once #1784020 is fixed, we can skip reporting + # shared storage capacity + orc.DISK_GB: { + 'total': resources['local_gb'], + 'min_unit': 1, + 'max_unit': resources['local_gb'], + 'step_size': 1, + 'allocation_ratio': allocation_ratios[orc.DISK_GB], + 'reserved': compute_utils.convert_mb_to_ceil_gb( + CONF.reserved_host_disk_mb), + }, + } + + provider_tree.update_inventory(nodename, inventory)