Remove 'nova.virt.driver.ComputeDriver.estimate_instance_overhead'
With the removal of the Core, Ram and Disk filters in change I8a0d332877fbb9794700081e7954f2501b7e7c09, there's now only a single caller for the 'estimate_instance_overhead' function. This call results in the 'memory_mb_used', 'local_gb_used', 'vcpus_used', 'free_ram_mb' and 'free_disk_gb' fields of a compute nodes 'ComputeNode' object being modified when calculating usage as part of the resource tracker to include driver-specific overhead. However, these fields are no longer used for for anything except logging and the 'os-hypervisors' API. Since overhead is not reflected in placement (and therefore the scheduler), reporting them as part of the various usage values for both logging and that API is actually a bit of a lie and is likely to cause confusion among users. Remove the whole thing and make our logs and the 'os-hypervisors' API better match what's stored in placement. Change-Id: I033e8269194de54432079cbc970431e3dcea7ce5 Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
parent
5406c8bd9b
commit
97096c8e4a
|
@ -1385,7 +1385,8 @@ XenAPI
|
||||||
You should configure the
|
You should configure the
|
||||||
:oslo.config:option:`reserved_host_memory_mb` config option to
|
:oslo.config:option:`reserved_host_memory_mb` config option to
|
||||||
account for this overhead, based on the size of your hosts and
|
account for this overhead, based on the size of your hosts and
|
||||||
instances.
|
instances. For more information, refer to
|
||||||
|
https://wiki.openstack.org/wiki/XenServer/Overhead.
|
||||||
|
|
||||||
Cells considerations
|
Cells considerations
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
|
@ -1053,15 +1053,15 @@ class ResourceTracker(object):
|
||||||
self.pci_tracker.save(context)
|
self.pci_tracker.save(context)
|
||||||
|
|
||||||
def _update_usage(self, usage, nodename, sign=1):
|
def _update_usage(self, usage, nodename, sign=1):
|
||||||
|
# TODO(stephenfin): We don't use the CPU, RAM and disk fields for much
|
||||||
|
# except 'Aggregate(Core|Ram|Disk)Filter', the 'os-hypervisors' API,
|
||||||
|
# and perhaps some out-of-tree filters. Once the in-tree stuff is
|
||||||
|
# removed or updated to use information from placement, we can think
|
||||||
|
# about dropping the fields from the 'ComputeNode' object entirely
|
||||||
mem_usage = usage['memory_mb']
|
mem_usage = usage['memory_mb']
|
||||||
disk_usage = usage.get('root_gb', 0)
|
disk_usage = usage.get('root_gb', 0)
|
||||||
vcpus_usage = usage.get('vcpus', 0)
|
vcpus_usage = usage.get('vcpus', 0)
|
||||||
|
|
||||||
overhead = self.driver.estimate_instance_overhead(usage)
|
|
||||||
mem_usage += overhead['memory_mb']
|
|
||||||
disk_usage += overhead.get('disk_gb', 0)
|
|
||||||
vcpus_usage += overhead.get('vcpus', 0)
|
|
||||||
|
|
||||||
cn = self.compute_nodes[nodename]
|
cn = self.compute_nodes[nodename]
|
||||||
cn.memory_mb_used += sign * mem_usage
|
cn.memory_mb_used += sign * mem_usage
|
||||||
cn.local_gb_used += sign * disk_usage
|
cn.local_gb_used += sign * disk_usage
|
||||||
|
|
|
@ -417,26 +417,12 @@ _MIGRATION_CONTEXT_FIXTURES = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def overhead_zero(instance):
|
def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES):
|
||||||
# Emulate that the driver does not adjust the memory
|
|
||||||
# of the instance...
|
|
||||||
return {
|
|
||||||
'memory_mb': 0,
|
|
||||||
'disk_gb': 0,
|
|
||||||
'vcpus': 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
|
|
||||||
estimate_overhead=overhead_zero):
|
|
||||||
"""Sets up the resource tracker instance with mock fixtures.
|
"""Sets up the resource tracker instance with mock fixtures.
|
||||||
|
|
||||||
:param virt_resources: Optional override of the resource representation
|
:param virt_resources: Optional override of the resource representation
|
||||||
returned by the virt driver's
|
returned by the virt driver's
|
||||||
`get_available_resource()` method.
|
`get_available_resource()` method.
|
||||||
:param estimate_overhead: Optional override of a function that should
|
|
||||||
return overhead of memory given an instance
|
|
||||||
object. Defaults to returning zero overhead.
|
|
||||||
"""
|
"""
|
||||||
query_client_mock = mock.MagicMock()
|
query_client_mock = mock.MagicMock()
|
||||||
report_client_mock = mock.MagicMock()
|
report_client_mock = mock.MagicMock()
|
||||||
|
@ -449,7 +435,6 @@ def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
|
||||||
# TODO(mriedem): Need to make this mocked virt driver implement upt.
|
# TODO(mriedem): Need to make this mocked virt driver implement upt.
|
||||||
vd.update_provider_tree.side_effect = NotImplementedError
|
vd.update_provider_tree.side_effect = NotImplementedError
|
||||||
vd.get_host_ip_addr.return_value = _NODENAME
|
vd.get_host_ip_addr.return_value = _NODENAME
|
||||||
vd.estimate_instance_overhead.side_effect = estimate_overhead
|
|
||||||
vd.rebalances_nodes = False
|
vd.rebalances_nodes = False
|
||||||
|
|
||||||
with test.nested(
|
with test.nested(
|
||||||
|
@ -481,11 +466,9 @@ class BaseTestCase(test.NoDBTestCase):
|
||||||
reserved_host_memory_mb=0,
|
reserved_host_memory_mb=0,
|
||||||
reserved_host_cpus=0)
|
reserved_host_cpus=0)
|
||||||
|
|
||||||
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
|
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES):
|
||||||
estimate_overhead=overhead_zero):
|
|
||||||
(self.rt, self.sched_client_mock, self.report_client_mock,
|
(self.rt, self.sched_client_mock, self.report_client_mock,
|
||||||
self.driver_mock) = setup_rt(
|
self.driver_mock) = setup_rt(_HOSTNAME, virt_resources)
|
||||||
_HOSTNAME, virt_resources, estimate_overhead)
|
|
||||||
|
|
||||||
def _setup_ptree(self, compute):
|
def _setup_ptree(self, compute):
|
||||||
"""Set up a ProviderTree with a compute node root, and mock the
|
"""Set up a ProviderTree with a compute node root, and mock the
|
||||||
|
|
|
@ -137,11 +137,6 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
|
||||||
self.driver.list_instances()
|
self.driver.list_instances()
|
||||||
self.driver._vmops.list_instances.assert_called_once_with()
|
self.driver._vmops.list_instances.assert_called_once_with()
|
||||||
|
|
||||||
def test_estimate_instance_overhead(self):
|
|
||||||
self.driver.estimate_instance_overhead(mock.sentinel.instance)
|
|
||||||
self.driver._vmops.estimate_instance_overhead.assert_called_once_with(
|
|
||||||
mock.sentinel.instance)
|
|
||||||
|
|
||||||
def test_spawn(self):
|
def test_spawn(self):
|
||||||
self.driver.spawn(
|
self.driver.spawn(
|
||||||
mock.sentinel.context, mock.sentinel.instance,
|
mock.sentinel.context, mock.sentinel.instance,
|
||||||
|
|
|
@ -82,16 +82,6 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
|
||||||
self._vmops._vmutils.list_instances.assert_called_once_with()
|
self._vmops._vmutils.list_instances.assert_called_once_with()
|
||||||
self.assertEqual(response, [mock_instance])
|
self.assertEqual(response, [mock_instance])
|
||||||
|
|
||||||
def test_estimate_instance_overhead(self):
|
|
||||||
instance_info = {'memory_mb': 512}
|
|
||||||
overhead = self._vmops.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(0, overhead['memory_mb'])
|
|
||||||
self.assertEqual(1, overhead['disk_gb'])
|
|
||||||
|
|
||||||
instance_info = {'memory_mb': 500}
|
|
||||||
overhead = self._vmops.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(0, overhead['disk_gb'])
|
|
||||||
|
|
||||||
def _test_get_info(self, vm_exists):
|
def _test_get_info(self, vm_exists):
|
||||||
mock_instance = fake_instance.fake_instance_obj(self.context)
|
mock_instance = fake_instance.fake_instance_obj(self.context)
|
||||||
mock_info = mock.MagicMock(spec_set=dict)
|
mock_info = mock.MagicMock(spec_set=dict)
|
||||||
|
|
|
@ -7305,78 +7305,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||||
mock_get_guest.side_effect = exception.InternalError(err='something')
|
mock_get_guest.side_effect = exception.InternalError(err='something')
|
||||||
self.assertFalse(drvr.instance_exists(None))
|
self.assertFalse(drvr.instance_exists(None))
|
||||||
|
|
||||||
def test_estimate_instance_overhead_spawn(self):
|
|
||||||
# test that method when called with instance ref
|
|
||||||
instance_topology = objects.InstanceNUMATopology(
|
|
||||||
emulator_threads_policy=(
|
|
||||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
||||||
cells=[objects.InstanceNUMACell(
|
|
||||||
id=0, cpuset=set([0]), memory=1024)])
|
|
||||||
instance_info = objects.Instance(**self.test_instance)
|
|
||||||
instance_info.numa_topology = instance_topology
|
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(1, overhead['vcpus'])
|
|
||||||
|
|
||||||
def test_estimate_instance_overhead_spawn_no_overhead(self):
|
|
||||||
# test that method when called with instance ref, no overhead
|
|
||||||
instance_topology = objects.InstanceNUMATopology(
|
|
||||||
emulator_threads_policy=(
|
|
||||||
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
||||||
cells=[objects.InstanceNUMACell(
|
|
||||||
id=0, cpuset=set([0]), memory=1024)])
|
|
||||||
instance_info = objects.Instance(**self.test_instance)
|
|
||||||
instance_info.numa_topology = instance_topology
|
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(0, overhead['vcpus'])
|
|
||||||
|
|
||||||
def test_estimate_instance_overhead_migrate(self):
|
|
||||||
# test that method when called with flavor ref
|
|
||||||
instance_info = objects.Flavor(extra_specs={
|
|
||||||
'hw:emulator_threads_policy': (
|
|
||||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
||||||
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
||||||
})
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(1, overhead['vcpus'])
|
|
||||||
|
|
||||||
def test_estimate_instance_overhead_migrate_no_overhead(self):
|
|
||||||
# test that method when called with flavor ref, no overhead
|
|
||||||
instance_info = objects.Flavor(extra_specs={
|
|
||||||
'hw:emulator_threads_policy': (
|
|
||||||
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
||||||
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
||||||
})
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(0, overhead['vcpus'])
|
|
||||||
|
|
||||||
def test_estimate_instance_overhead_usage(self):
|
|
||||||
# test that method when called with usage dict
|
|
||||||
instance_info = objects.Flavor(extra_specs={
|
|
||||||
'hw:emulator_threads_policy': (
|
|
||||||
fields.CPUEmulatorThreadsPolicy.ISOLATE),
|
|
||||||
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
||||||
})
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(1, overhead['vcpus'])
|
|
||||||
|
|
||||||
def test_estimate_instance_overhead_usage_no_overhead(self):
|
|
||||||
# test that method when called with usage dict, no overhead
|
|
||||||
instance_info = objects.Flavor(extra_specs={
|
|
||||||
'hw:emulator_threads_policy': (
|
|
||||||
fields.CPUEmulatorThreadsPolicy.SHARE),
|
|
||||||
'hw:cpu_policy': fields.CPUAllocationPolicy.DEDICATED,
|
|
||||||
})
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
|
||||||
overhead = drvr.estimate_instance_overhead(instance_info)
|
|
||||||
self.assertEqual(0, overhead['vcpus'])
|
|
||||||
|
|
||||||
@mock.patch.object(host.Host, "list_instance_domains")
|
@mock.patch.object(host.Host, "list_instance_domains")
|
||||||
def test_list_instances(self, mock_list):
|
def test_list_instances(self, mock_list):
|
||||||
vm1 = FakeVirtDomain(id=3, name="instance00000001")
|
vm1 = FakeVirtDomain(id=3, name="instance00000001")
|
||||||
|
|
|
@ -12,9 +12,6 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import math
|
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
import os_resource_classes as orc
|
import os_resource_classes as orc
|
||||||
from oslo_utils.fixture import uuidsentinel as uuids
|
from oslo_utils.fixture import uuidsentinel as uuids
|
||||||
|
@ -105,19 +102,6 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
|
||||||
self.assertEqual(1, resources['disk_available_least'])
|
self.assertEqual(1, resources['disk_available_least'])
|
||||||
mock_get.assert_called_once_with(refresh=True)
|
mock_get.assert_called_once_with(refresh=True)
|
||||||
|
|
||||||
def test_overhead(self):
|
|
||||||
driver = self._get_driver()
|
|
||||||
instance = {'memory_mb': 30720, 'vcpus': 4}
|
|
||||||
|
|
||||||
# expected memory overhead per:
|
|
||||||
# https://wiki.openstack.org/wiki/XenServer/Overhead
|
|
||||||
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
|
|
||||||
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
|
|
||||||
xenapi_driver.OVERHEAD_BASE)
|
|
||||||
expected = math.ceil(expected)
|
|
||||||
overhead = driver.estimate_instance_overhead(instance)
|
|
||||||
self.assertEqual(expected, overhead['memory_mb'])
|
|
||||||
|
|
||||||
def test_set_bootable(self):
|
def test_set_bootable(self):
|
||||||
driver = self._get_driver()
|
driver = self._get_driver()
|
||||||
|
|
||||||
|
|
|
@ -255,20 +255,6 @@ class ComputeDriver(object):
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
return instance.name in self.list_instances()
|
return instance.name in self.list_instances()
|
||||||
|
|
||||||
def estimate_instance_overhead(self, instance_info):
|
|
||||||
"""Estimate the virtualization overhead required to build an instance
|
|
||||||
of the given flavor.
|
|
||||||
|
|
||||||
Defaults to zero, drivers should override if per-instance overhead
|
|
||||||
calculations are desired.
|
|
||||||
|
|
||||||
:param instance_info: Instance/flavor to calculate overhead for.
|
|
||||||
:returns: Dict of estimated overhead values.
|
|
||||||
"""
|
|
||||||
return {'memory_mb': 0,
|
|
||||||
'disk_gb': 0,
|
|
||||||
'vcpus': 0}
|
|
||||||
|
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
"""Return the names of all the instances known to the virtualization
|
"""Return the names of all the instances known to the virtualization
|
||||||
layer, as a list.
|
layer, as a list.
|
||||||
|
|
|
@ -157,9 +157,6 @@ class HyperVDriver(driver.ComputeDriver):
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
return self._vmops.list_instances()
|
return self._vmops.list_instances()
|
||||||
|
|
||||||
def estimate_instance_overhead(self, instance_info):
|
|
||||||
return self._vmops.estimate_instance_overhead(instance_info)
|
|
||||||
|
|
||||||
def spawn(self, context, instance, image_meta, injected_files,
|
def spawn(self, context, instance, image_meta, injected_files,
|
||||||
admin_password, allocations, network_info=None,
|
admin_password, allocations, network_info=None,
|
||||||
block_device_info=None):
|
block_device_info=None):
|
||||||
|
|
|
@ -116,14 +116,6 @@ class VMOps(object):
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
return self._vmutils.list_instances()
|
return self._vmutils.list_instances()
|
||||||
|
|
||||||
def estimate_instance_overhead(self, instance_info):
|
|
||||||
# NOTE(claudiub): When an instance starts, Hyper-V creates a VM memory
|
|
||||||
# file on the local disk. The file size is the same as the VM's amount
|
|
||||||
# of memory. Since disk_gb must be an integer, and memory is MB, round
|
|
||||||
# up from X512 MB.
|
|
||||||
return {'memory_mb': 0,
|
|
||||||
'disk_gb': (instance_info['memory_mb'] + 512) // units.Ki}
|
|
||||||
|
|
||||||
def get_info(self, instance):
|
def get_info(self, instance):
|
||||||
"""Get information about the VM."""
|
"""Get information about the VM."""
|
||||||
LOG.debug("get_info called for instance", instance=instance)
|
LOG.debug("get_info called for instance", instance=instance)
|
||||||
|
|
|
@ -890,24 +890,6 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||||
except (exception.InternalError, exception.InstanceNotFound):
|
except (exception.InternalError, exception.InstanceNotFound):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def estimate_instance_overhead(self, instance_info):
|
|
||||||
overhead = super(LibvirtDriver, self).estimate_instance_overhead(
|
|
||||||
instance_info)
|
|
||||||
if isinstance(instance_info, objects.Flavor):
|
|
||||||
# A flavor object is passed during case of migrate
|
|
||||||
emu_policy = hardware.get_emulator_thread_policy_constraint(
|
|
||||||
instance_info)
|
|
||||||
if emu_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE:
|
|
||||||
overhead['vcpus'] += 1
|
|
||||||
else:
|
|
||||||
# An instance object is passed during case of spawing or a
|
|
||||||
# dict is passed when computing resource for an instance
|
|
||||||
numa_topology = hardware.instance_topology_from_instance(
|
|
||||||
instance_info)
|
|
||||||
if numa_topology and numa_topology.emulator_threads_isolated:
|
|
||||||
overhead['vcpus'] += 1
|
|
||||||
return overhead
|
|
||||||
|
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
names = []
|
names = []
|
||||||
for guest in self._host.list_guests(only_running=False):
|
for guest in self._host.list_guests(only_running=False):
|
||||||
|
|
|
@ -23,8 +23,6 @@ A driver for XenServer or Xen Cloud Platform.
|
||||||
- suffix "_rec" for record objects
|
- suffix "_rec" for record objects
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import math
|
|
||||||
|
|
||||||
import os_resource_classes as orc
|
import os_resource_classes as orc
|
||||||
from os_xenapi.client import session
|
from os_xenapi.client import session
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
@ -48,10 +46,6 @@ LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
CONF = nova.conf.CONF
|
CONF = nova.conf.CONF
|
||||||
|
|
||||||
OVERHEAD_BASE = 3
|
|
||||||
OVERHEAD_PER_MB = 0.00781
|
|
||||||
OVERHEAD_PER_VCPU = 1.5
|
|
||||||
|
|
||||||
|
|
||||||
def invalid_option(option_name, recommended_value):
|
def invalid_option(option_name, recommended_value):
|
||||||
LOG.exception(_('Current value of '
|
LOG.exception(_('Current value of '
|
||||||
|
@ -155,30 +149,6 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||||
"""
|
"""
|
||||||
return self._vmops.instance_exists(instance.name)
|
return self._vmops.instance_exists(instance.name)
|
||||||
|
|
||||||
def estimate_instance_overhead(self, instance_info):
|
|
||||||
"""Get virtualization overhead required to build an instance of the
|
|
||||||
given flavor.
|
|
||||||
|
|
||||||
:param instance_info: Instance/flavor to calculate overhead for.
|
|
||||||
:returns: Overhead memory in MB.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# XenServer memory overhead is proportional to the size of the
|
|
||||||
# VM. Larger flavor VMs become more efficient with respect to
|
|
||||||
# overhead.
|
|
||||||
|
|
||||||
# interpolated formula to predict overhead required per vm.
|
|
||||||
# based on data from:
|
|
||||||
# https://wiki.openstack.org/wiki/XenServer/Overhead
|
|
||||||
# Some padding is done to each value to fit all available VM data
|
|
||||||
memory_mb = instance_info['memory_mb']
|
|
||||||
vcpus = instance_info.get('vcpus', 1)
|
|
||||||
overhead = ((memory_mb * OVERHEAD_PER_MB) +
|
|
||||||
(vcpus * OVERHEAD_PER_VCPU) +
|
|
||||||
OVERHEAD_BASE)
|
|
||||||
overhead = math.ceil(overhead)
|
|
||||||
return {'memory_mb': overhead}
|
|
||||||
|
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
"""List VM instances."""
|
"""List VM instances."""
|
||||||
return self._vmops.list_instances()
|
return self._vmops.list_instances()
|
||||||
|
|
Loading…
Reference in New Issue