Remove Windows OS support

It was deprecated during the previous cycle because of retired
WinStackers project[1].

The following pollsters are also removed because these are supported
only by Hyper-V inspector.
 - disk.device.latency
 - disk.device.iops

[1] 7660339b4a

Change-Id: I872f93b945233a1288c773302be3835db2fd2e4f
This commit is contained in:
Takashi Kajinami 2024-05-01 18:31:05 +09:00
parent a6e43add90
commit 3b8adafbb0
14 changed files with 38 additions and 520 deletions

View File

@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import shlex
import cotyledon
@ -89,16 +88,8 @@ def create_polling_service(worker_id, conf=None):
def main():
sm = cotyledon.ServiceManager()
# On Windows, we can only initialize conf objects in the subprocess.
# As a consequence, we can't use oslo_config_glue.setup() on Windows,
# because cotyledon.ServiceManager objects are not picklable.
if os.name == 'nt':
LOG.warning("Support for Ceilometer on Windows operating systems is"
"deprecated.")
sm.add(create_polling_service)
else:
conf = _prepare_config()
priv_context.init(root_helper=shlex.split(utils._get_root_helper()))
oslo_config_glue.setup(sm, conf)
sm.add(create_polling_service, args=(conf,))
conf = _prepare_config()
priv_context.init(root_helper=shlex.split(utils._get_root_helper()))
oslo_config_glue.setup(sm, conf)
sm.add(create_polling_service, args=(conf,))
sm.run()

View File

@ -58,20 +58,6 @@ class PerDeviceWriteBytesPollster(PerDeviceDiskPollster):
sample_stats_key = 'write_bytes'
class PerDeviceDiskLatencyPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_latency'
sample_name = 'disk.device.latency'
sample_unit = 'ms'
sample_stats_key = 'disk_latency'
class PerDeviceDiskIOPSPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_iops'
sample_name = 'disk.device.iops'
sample_unit = 'count/s'
sample_stats_key = 'iops_count'
class PerDeviceCapacityPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_info'
sample_name = 'disk.device.capacity'

View File

@ -1,161 +0,0 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Inspector abstraction for Hyper-V"""
import collections
import functools
import sys
import warnings
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_utils import units
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
def convert_exceptions(exception_map, yields=True):
expected_exceptions = tuple(exception_map.keys())
def _reraise_exception(exc):
# exception might be a subclass of an expected exception.
for expected in expected_exceptions:
if isinstance(exc, expected):
raised_exception = exception_map[expected]
break
exc_info = sys.exc_info()
exc = raised_exception(str(exc_info[1]))
raise exc.with_traceback(exc_info[2])
def decorator(function):
if yields:
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
# NOTE(claudiub): We're consuming the function's yield in
# order to avoid yielding a generator.
for item in function(*args, **kwargs):
yield item
except expected_exceptions as ex:
_reraise_exception(ex)
else:
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except expected_exceptions as ex:
_reraise_exception(ex)
return wrapper
return decorator
exception_conversion_map = collections.OrderedDict([
# NOTE(claudiub): order should be from the most specialized exception type
# to the most generic exception type.
# (expected_exception, converted_exception)
(os_win_exc.NotFound, virt_inspector.InstanceNotFoundException),
(os_win_exc.OSWinException, virt_inspector.InspectorException),
])
# NOTE(claudiub): the purpose of the decorators below is to prevent any
# os_win exceptions (subclasses of OSWinException) to leak outside of the
# HyperVInspector.
class HyperVInspector(virt_inspector.Inspector):
def __init__(self, conf):
super(HyperVInspector, self).__init__(conf)
self._utils = utilsfactory.get_metricsutils()
self._host_max_cpu_clock = self._compute_host_max_cpu_clock()
warnings.warn('Support for HyperV is deprecated.',
category=DeprecationWarning, stacklevel=2)
def _compute_host_max_cpu_clock(self):
hostutils = utilsfactory.get_hostutils()
# host's number of CPUs and CPU clock speed will not change.
cpu_info = hostutils.get_cpus_info()
host_cpu_count = len(cpu_info)
host_cpu_clock = cpu_info[0]['MaxClockSpeed']
return float(host_cpu_clock * host_cpu_count)
@convert_exceptions(exception_conversion_map, yields=False)
def inspect_instance(self, instance, duration):
instance_name = util.instance_name(instance)
(cpu_clock_used,
cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name)
cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock
# Nanoseconds
cpu_time = (int(uptime * cpu_percent_used) * units.k)
memory_usage = self._utils.get_memory_metrics(instance_name)
return virt_inspector.InstanceStats(
cpu_number=cpu_count,
cpu_time=cpu_time,
memory_usage=memory_usage)
@convert_exceptions(exception_conversion_map)
def inspect_vnics(self, instance, duration):
instance_name = util.instance_name(instance)
for vnic_metrics in self._utils.get_vnic_metrics(instance_name):
yield virt_inspector.InterfaceStats(
name=vnic_metrics["element_name"],
mac=vnic_metrics["address"],
fref=None,
parameters=None,
rx_bytes=vnic_metrics['rx_mb'] * units.Mi,
rx_packets=0,
rx_drop=0,
rx_errors=0,
tx_bytes=vnic_metrics['tx_mb'] * units.Mi,
tx_packets=0,
tx_drop=0,
tx_errors=0,
rx_bytes_delta=0,
tx_bytes_delta=0)
@convert_exceptions(exception_conversion_map)
def inspect_disks(self, instance, duration):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_metrics(instance_name):
yield virt_inspector.DiskStats(
device=disk_metrics['instance_id'],
read_requests=0,
# Return bytes
read_bytes=disk_metrics['read_mb'] * units.Mi,
write_requests=0,
write_bytes=disk_metrics['write_mb'] * units.Mi,
errors=0, wr_total_times=0, rd_total_times=0)
@convert_exceptions(exception_conversion_map)
def inspect_disk_latency(self, instance, duration):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_latency_metrics(
instance_name):
yield virt_inspector.DiskLatencyStats(
device=disk_metrics['instance_id'],
disk_latency=disk_metrics['disk_latency'] / 1000)
@convert_exceptions(exception_conversion_map)
def inspect_disk_iops(self, instance, duration):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_iops_count(instance_name):
yield virt_inspector.DiskIOPSStats(
device=disk_metrics['instance_id'],
iops_count=disk_metrics['iops_count'])

View File

@ -27,7 +27,7 @@ OPTS = [
cfg.StrOpt('hypervisor_inspector',
default='libvirt',
help='Inspector to use for inspecting the hypervisor layer. '
'Known inspectors are libvirt, hyperv, and vsphere.'),
'Known inspectors are libvirt and vsphere.'),
]
@ -129,21 +129,6 @@ DiskRateStats = collections.namedtuple('DiskRateStats',
'write_bytes_rate',
'write_requests_rate'])
# Named tuple representing disk latency statistics.
#
# disk_latency: average disk latency
#
DiskLatencyStats = collections.namedtuple('DiskLatencyStats',
['device', 'disk_latency'])
# Named tuple representing disk iops statistics.
#
# iops: number of iops per second
#
DiskIOPSStats = collections.namedtuple('DiskIOPSStats',
['device', 'iops_count'])
# Named tuple representing disk Information.
#
# capacity: capacity of the disk
@ -237,26 +222,6 @@ class Inspector(object):
"""
raise ceilometer.NotImplementedError
def inspect_disk_latency(self, instance, duration):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each disk, the average disk latency
"""
raise ceilometer.NotImplementedError
def inspect_disk_iops(self, instance, duration):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each disk, the number of iops per second
"""
raise ceilometer.NotImplementedError
def inspect_disk_info(self, instance, duration):
"""Inspect the disk information for an instance.

View File

@ -156,10 +156,8 @@ resources:
archive_policy_name: ceilometer-low-rate
disk.device.write.bytes:
archive_policy_name: ceilometer-low-rate
disk.device.latency:
disk.device.read.latency:
disk.device.write.latency:
disk.device.iops:
disk.device.capacity:
disk.device.allocation:
disk.device.usage:

View File

@ -164,47 +164,6 @@ class TestDiskPollsters(TestBaseDiskIO):
'disk.device.write.latency', 400, 'vda2')
class TestDiskLatencyPollsters(TestBaseDiskIO):
DISKS = [
virt_inspector.DiskLatencyStats("disk1", 1),
virt_inspector.DiskLatencyStats("disk2", 2)
]
TYPE = 'gauge'
def setUp(self):
super(TestDiskLatencyPollsters, self).setUp()
self.inspector.inspect_disk_latency = mock.Mock(
return_value=self.DISKS)
def test_per_device_latency(self):
self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster,
'disk.device.latency', 1, 'disk1')
self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster,
'disk.device.latency', 2, 'disk2')
class TestDiskIOPSPollsters(TestBaseDiskIO):
DISKS = [
virt_inspector.DiskIOPSStats("disk1", 10),
virt_inspector.DiskIOPSStats("disk2", 20),
]
TYPE = 'gauge'
def setUp(self):
super(TestDiskIOPSPollsters, self).setUp()
self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS)
def test_per_device_iops(self):
self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster,
'disk.device.iops', 10, 'disk1')
self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster,
'disk.device.iops', 20, 'disk2')
class TestDiskInfoPollsters(TestBaseDiskIO):
DISKS = [

View File

@ -1,212 +0,0 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Hyper-V inspector.
"""
import sys
from unittest import mock
from os_win import exceptions as os_win_exc
from oslo_utils import units
from oslotest import base
from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer import service
class TestHyperVInspection(base.BaseTestCase):
@mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock())
@mock.patch.object(hyperv_inspector.HyperVInspector,
'_compute_host_max_cpu_clock')
def setUp(self, mock_compute_host_cpu_clock):
conf = service.prepare_service([], [])
self._inspector = hyperv_inspector.HyperVInspector(conf)
self._inspector._utils = mock.MagicMock()
super(TestHyperVInspection, self).setUp()
def test_converted_exception(self):
self._inspector._utils.get_cpu_metrics.side_effect = (
os_win_exc.OSWinException)
self.assertRaises(virt_inspector.InspectorException,
self._inspector.inspect_instance,
mock.sentinel.instance, None)
self._inspector._utils.get_cpu_metrics.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(virt_inspector.InspectorException,
self._inspector.inspect_instance,
mock.sentinel.instance, None)
self._inspector._utils.get_cpu_metrics.side_effect = (
os_win_exc.NotFound(resource='foofoo'))
self.assertRaises(virt_inspector.InstanceNotFoundException,
self._inspector.inspect_instance,
mock.sentinel.instance, None)
def _yield_consumer(generator_method, *args, **kwargs):
list(generator_method(*args, **kwargs))
self._inspector._utils.get_vnic_metrics.side_effect = (
os_win_exc.OSWinException)
self.assertRaises(virt_inspector.InspectorException,
_yield_consumer, self._inspector.inspect_vnics,
mock.sentinel.instance, None)
self._inspector._utils.get_vnic_metrics.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(virt_inspector.InspectorException,
_yield_consumer, self._inspector.inspect_vnics,
mock.sentinel.instance, None)
self._inspector._utils.get_vnic_metrics.side_effect = (
os_win_exc.NotFound(resource='foofoo'))
self.assertRaises(virt_inspector.InstanceNotFoundException,
_yield_consumer, self._inspector.inspect_vnics,
mock.sentinel.instance, None)
def test_assert_original_traceback_maintained(self):
def bar(self):
foo = "foofoo"
raise os_win_exc.NotFound(resource=foo)
self._inspector._utils.get_cpu_metrics.side_effect = bar
try:
self._inspector.inspect_instance(mock.sentinel.instance, None)
self.fail("Test expected exception, but it was not raised.")
except virt_inspector.InstanceNotFoundException:
# exception has been raised as expected.
_, _, trace = sys.exc_info()
while trace.tb_next:
# iterate until the original exception source, bar.
trace = trace.tb_next
# original frame will contain the 'foo' variable.
self.assertEqual('foofoo', trace.tb_frame.f_locals['foo'])
@mock.patch.object(hyperv_inspector, 'utilsfactory')
def test_compute_host_max_cpu_clock(self, mock_utilsfactory):
mock_cpu = {'MaxClockSpeed': 1000}
hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info
hostutils.return_value = [mock_cpu, mock_cpu]
cpu_clock = self._inspector._compute_host_max_cpu_clock()
self.assertEqual(2000.0, cpu_clock)
def test_inspect_instance(self):
fake_instance_name = 'fake_instance_name'
fake_cpu_clock_used = 2000
fake_cpu_count = 3000
fake_uptime = 4000
self._inspector._host_max_cpu_clock = 4000.0
fake_cpu_percent_used = (fake_cpu_clock_used /
self._inspector._host_max_cpu_clock)
fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) *
1000)
self._inspector._utils.get_cpu_metrics.return_value = (
fake_cpu_clock_used, fake_cpu_count, fake_uptime)
fake_usage = self._inspector._utils.get_memory_metrics.return_value
stats = self._inspector.inspect_instance(fake_instance_name, None)
self.assertEqual(fake_cpu_count, stats.cpu_number)
self.assertEqual(fake_cpu_time, stats.cpu_time)
self.assertEqual(fake_usage, stats.memory_usage)
def test_inspect_vnics(self):
fake_instance_name = 'fake_instance_name'
fake_rx_mb = 1000
fake_tx_mb = 2000
fake_element_name = 'fake_element_name'
fake_address = 'fake_address'
self._inspector._utils.get_vnic_metrics.return_value = [{
'rx_mb': fake_rx_mb,
'tx_mb': fake_tx_mb,
'element_name': fake_element_name,
'address': fake_address}]
inspected_vnics = list(self._inspector.inspect_vnics(
fake_instance_name, None))
self.assertEqual(1, len(inspected_vnics))
inspected_stats = inspected_vnics[0]
self.assertEqual(fake_element_name, inspected_stats.name)
self.assertEqual(fake_address, inspected_stats.mac)
self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes)
self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes)
def test_inspect_disks(self):
fake_instance_name = 'fake_instance_name'
fake_read_mb = 1000
fake_write_mb = 2000
fake_instance_id = "fake_fake_instance_id"
fake_host_resource = "fake_host_resource"
self._inspector._utils.get_disk_metrics.return_value = [{
'read_mb': fake_read_mb,
'write_mb': fake_write_mb,
'instance_id': fake_instance_id,
'host_resource': fake_host_resource}]
inspected_disks = list(self._inspector.inspect_disks(
fake_instance_name, None))
self.assertEqual(1, len(inspected_disks))
inspected_stats = inspected_disks[0]
self.assertEqual(fake_instance_id, inspected_stats.device)
self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes)
self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes)
def test_inspect_disk_latency(self):
fake_instance_name = mock.sentinel.INSTANCE_NAME
fake_disk_latency = 1000
fake_instance_id = mock.sentinel.INSTANCE_ID
self._inspector._utils.get_disk_latency_metrics.return_value = [{
'disk_latency': fake_disk_latency,
'instance_id': fake_instance_id}]
inspected_disks = list(self._inspector.inspect_disk_latency(
fake_instance_name, None))
self.assertEqual(1, len(inspected_disks))
inspected_stats = inspected_disks[0]
self.assertEqual(fake_instance_id, inspected_stats.device)
self.assertEqual(1, inspected_stats.disk_latency)
def test_inspect_disk_iops_count(self):
fake_instance_name = mock.sentinel.INSTANCE_NAME
fake_disk_iops_count = 53
fake_instance_id = mock.sentinel.INSTANCE_ID
self._inspector._utils.get_disk_iops_count.return_value = [{
'iops_count': fake_disk_iops_count,
'instance_id': fake_instance_id}]
inspected_disks = list(self._inspector.inspect_disk_iops(
fake_instance_name, None))
self.assertEqual(1, len(inspected_disks))
inspected_stats = inspected_disks[0]
self.assertEqual(fake_instance_id, inspected_stats.device)
self.assertEqual(53, inspected_stats.iops_count)

View File

@ -85,13 +85,13 @@ The following meters are collected for OpenStack Compute.
+===========+=======+======+==========+==========+=========+==================+
| **Meters added in the Mitaka release or earlier** |
+-----------+-------+------+----------+----------+---------+------------------+
| memory | Gauge | MB | instance | Notific\ | Libvirt,| Volume of RAM |
| | | | ID | ation | Hyper-V | allocated to the |
| memory | Gauge | MB | instance | Notific\ | Libvirt | Volume of RAM |
| | | | ID | ation | | allocated to the |
| | | | | | | instance |
+-----------+-------+------+----------+----------+---------+------------------+
| memory.\ | Gauge | MB | instance | Pollster | Libvirt,| Volume of RAM |
| usage | | | ID | | Hyper-V,| used by the inst\|
| | | | | | vSphere,| ance from the |
| usage | | | ID | | vSphere,| used by the inst\|
| | | | | | | ance from the |
| | | | | | | amount of its |
| | | | | | | allocated memory |
+-----------+-------+------+----------+----------+---------+------------------+
@ -100,41 +100,34 @@ The following meters are collected for OpenStack Compute.
| | | | | | | ance on the phy\ |
| | | | | | | sical machine |
+-----------+-------+------+----------+----------+---------+------------------+
| cpu | Cumu\ | ns | instance | Pollster | Libvirt,| CPU time used |
| | lative| | ID | | Hyper-V | |
| cpu | Cumu\ | ns | instance | Pollster | Libvirt | CPU time used |
| | lative| | ID | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| vcpus | Gauge | vcpu | instance | Notific\ | Libvirt,| Number of virtual|
| | | | ID | ation | Hyper-V | CPUs allocated to|
| vcpus | Gauge | vcpu | instance | Notific\ | Libvirt | Number of virtual|
| | | | ID | ation | | CPUs allocated to|
| | | | | | | the instance |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of read |
| ice.read\ | lative| uest | | | Hyper-V | requests |
| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt | Number of read |
| ice.read\ | lative| uest | | | | requests |
| .requests | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of write |
| ice.write\| lative| uest | | | Hyper-V | requests |
| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt | Number of write |
| ice.write\| lative| uest | | | | requests |
| .requests | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of reads |
| ice.read\ | lative| | | | Hyper-V | |
| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt | Volume of reads |
| ice.read\ | lative| | | | | |
| .bytes | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of writes |
| ice.write\| lative| | | | Hyper-V | |
| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt | Volume of writes |
| ice.write\| lative| | | | | |
| .bytes | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.root\| Gauge | GB | instance | Notific\ | Libvirt,| Size of root disk|
| .size | | | ID | ation | Hyper-V | |
| disk.root\| Gauge | GB | instance | Notific\ | Libvirt | Size of root disk|
| .size | | | ID | ation | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.ephe\| Gauge | GB | instance | Notific\ | Libvirt,| Size of ephemeral|
| meral.size| | | ID | ation | Hyper-V | disk |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Gauge | ms | disk ID | Pollster | Hyper-V | Average disk la\ |
| ice.late\ | | | | | | tency per device |
| ncy | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Gauge | coun\| disk ID | Pollster | Hyper-V | Average disk io\ |
| ice.iops | | t/s | | | | ps per device |
| disk.ephe\| Gauge | GB | instance | Notific\ | Libvirt | Size of ephemeral|
| meral.size| | | ID | ation | | disk |
+-----------+-------+------+----------+----------+---------+------------------+
| disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ |
| ice.capa\ | | | | | | isk per device |
@ -153,20 +146,20 @@ The following meters are collected for OpenStack Compute.
| | | | | | | iner on the hos\ |
| | | | | | | t per device |
+-----------+-------+------+----------+----------+---------+------------------+
| network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of |
| incoming.\| lative| | ID | | Hyper-V | incoming bytes |
| network.\ | Cumu\ | B | interface| Pollster | Libvirt | Number of |
| incoming.\| lative| | ID | | | incoming bytes |
| bytes | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of |
| outgoing\ | lative| | ID | | Hyper-V | outgoing bytes |
| network.\ | Cumu\ | B | interface| Pollster | Libvirt | Number of |
| outgoing\ | lative| | ID | | | outgoing bytes |
| .bytes | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of |
| incoming\ | lative| ket | ID | | Hyper-V | incoming packets |
| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt | Number of |
| incoming\ | lative| ket | ID | | | incoming packets |
| .packets | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of |
| outgoing\ | lative| ket | ID | | Hyper-V | outgoing packets |
| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt | Number of |
| outgoing\ | lative| ket | ID | | | outgoing packets |
| .packets | | | | | | |
+-----------+-------+------+----------+----------+---------+------------------+
| **Meters added in the Newton release** |

View File

@ -78,8 +78,6 @@ compute hosts.
The following is a list of supported hypervisors.
- `Libvirt supported hypervisors <http://libvirt.org/>`__ such as KVM and QEMU
- `Hyper-V <https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-on-windows-server>`__
- `XEN <http://www.xenproject.org/help/documentation.html>`__
- `VMware vSphere <https://www.vmware.com/support/vsphere-hypervisor.html>`__
.. note::

View File

@ -0,0 +1,5 @@
---
upgrade:
- |
Support for running ceilometer in Windows operating systems has been
removed. Because of the removal, Hyper-V inspector has also been removed.

View File

@ -73,8 +73,6 @@ ceilometer.poll.compute =
disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster
disk.device.read.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskReadLatencyPollster
disk.device.write.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskWriteLatencyPollster
disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster
disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster
cpu = ceilometer.compute.pollsters.instance_stats:CPUPollster
cpu_util = ceilometer.compute.pollsters.instance_stats:CPUUtilPollster
cpu_l3_cache = ceilometer.compute.pollsters.instance_stats:CPUL3CachePollster
@ -176,7 +174,6 @@ ceilometer.poll.central =
ceilometer.compute.virt =
libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector
vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector
ceilometer.sample.publisher =

View File

@ -5,7 +5,6 @@
coverage!=4.4,>=4.0 # Apache-2.0
eventlet>=0.30.1 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
os-win>=3.0.0 # Apache-2.0
oslo.messaging[kafka]>=8.0.0 # Apache-2.0
oslotest>=3.8.0 # Apache-2.0
oslo.vmware>=2.17.0 # Apache-2.0