diff --git a/ceilometer/compute/discovery.py b/ceilometer/compute/discovery.py index d454b21191..af09e3703d 100644 --- a/ceilometer/compute/discovery.py +++ b/ceilometer/compute/discovery.py @@ -107,7 +107,6 @@ class InstanceDiscovery(plugin_base.DiscoveryBase): self.expiration_time = conf.compute.resource_update_interval self.cache_expiry = conf.compute.resource_cache_expiry if self.method == "libvirt_metadata": - self._connection = None # 4096 instances on a compute should be enough :) self._flavor_cache = cachetools.LRUCache(4096) else: @@ -117,9 +116,7 @@ class InstanceDiscovery(plugin_base.DiscoveryBase): @property def connection(self): - if not self._connection: - self._connection = libvirt_utils.get_libvirt_connection(self.conf) - return self._connection + return libvirt_utils.refresh_libvirt_connection(self.conf, self) def discover(self, manager, param=None): """Discover resources to monitor.""" diff --git a/ceilometer/compute/pollsters/__init__.py b/ceilometer/compute/pollsters/__init__.py index f33602708a..97710b9d82 100644 --- a/ceilometer/compute/pollsters/__init__.py +++ b/ceilometer/compute/pollsters/__init__.py @@ -25,13 +25,18 @@ from ceilometer.compute.virt import inspector as virt_inspector @six.add_metaclass(abc.ABCMeta) class BaseComputePollster(plugin_base.PollsterBase): - @property - def inspector(self): + def setup_environment(self): + super(BaseComputePollster, self).setup_environment() + self.inspector = self._get_inspector(self.conf) + + @classmethod + def _get_inspector(cls, conf): + # FIXME(sileht): This doesn't looks threadsafe... try: - inspector = self._inspector + inspector = cls._inspector except AttributeError: - inspector = virt_inspector.get_hypervisor_inspector(self.conf) - BaseComputePollster._inspector = inspector + inspector = virt_inspector.get_hypervisor_inspector(conf) + cls._inspector = inspector return inspector @property diff --git a/ceilometer/compute/virt/libvirt/inspector.py b/ceilometer/compute/virt/libvirt/inspector.py index 595749ad7b..7a6e2fed3d 100644 --- a/ceilometer/compute/virt/libvirt/inspector.py +++ b/ceilometer/compute/virt/libvirt/inspector.py @@ -27,7 +27,7 @@ except ImportError: from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import utils as libvirt_utils -from ceilometer.i18n import _LW, _LE, _ +from ceilometer.i18n import _LW, _ LOG = logging.getLogger(__name__) @@ -36,37 +36,32 @@ class LibvirtInspector(virt_inspector.Inspector): def __init__(self, conf): super(LibvirtInspector, self).__init__(conf) - self._connection = None + # NOTE(sileht): create a connection on startup + self.connection @property def connection(self): - if not self._connection: - self._connection = libvirt_utils.get_libvirt_connection(self.conf) - return self._connection + return libvirt_utils.refresh_libvirt_connection(self.conf, self) - @libvirt_utils.retry_on_disconnect def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) try: return self.connection.lookupByUUIDString(instance.id) - except Exception as ex: - if not libvirt or not isinstance(ex, libvirt.libvirtError): - raise virt_inspector.InspectorException(six.text_type(ex)) - error_code = ex.get_error_code() - if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): + except libvirt.libvirtError as ex: + if libvirt_utils.is_disconnection_exception(ex): raise msg = _("Error from libvirt while looking up instance " ": " "[Error Code %(error_code)s] " "%(ex)s") % {'name': instance_name, 'id': instance.id, - 'error_code': error_code, + 'error_code': ex.get_error_code(), 'ex': ex} raise virt_inspector.InstanceNotFoundException(msg) + except Exception as ex: + raise virt_inspector.InspectorException(six.text_type(ex)) + @libvirt_utils.retry_on_disconnect def inspect_cpus(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) # TODO(gordc): this can probably be cached since it can be used to get @@ -76,31 +71,15 @@ class LibvirtInspector(virt_inspector.Inspector): return virt_inspector.CPUStats(number=dom_stat['vcpu.current'], time=dom_stat['cpu.time']) + @libvirt_utils.raise_nodata_if_unsupported("l3 cache usage") + @libvirt_utils.retry_on_disconnect def inspect_cpu_l3_cache(self, instance): domain = self._lookup_by_uuid(instance) - try: - stats = self.connection.domainListGetStats( - [domain], libvirt.VIR_DOMAIN_STATS_PERF) - perf = stats[0][1] - usage = perf["perf.cmt"] - return virt_inspector.CPUL3CacheUsageStats(l3_cache_usage=usage) - except (KeyError, AttributeError) as e: - # NOTE(sileht): KeyError if for libvirt >=2.0.0,<2.3.0, the perf - # subsystem ws existing but not these attributes - # https://github.com/libvirt/libvirt/commit/bae660869de0612bee2a740083fb494c27e3f80c - msg = _('Perf is not supported by current version of libvirt, and ' - 'failed to inspect l3 cache usage of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - # domainListGetStats might launch an exception if the method or - # cmt perf event is not supported by the underlying hypervisor - # being used by libvirt. - except libvirt.libvirtError as e: - msg = _('Failed to inspect l3 cache usage of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) + stats = self.connection.domainListGetStats( + [domain], libvirt.VIR_DOMAIN_STATS_PERF) + perf = stats[0][1] + usage = perf["perf.cmt"] + return virt_inspector.CPUL3CacheUsageStats(l3_cache_usage=usage) def _get_domain_not_shut_off_or_raise(self, instance): instance_name = util.instance_name(instance) @@ -116,6 +95,7 @@ class LibvirtInspector(virt_inspector.Inspector): return domain + @libvirt_utils.retry_on_disconnect def inspect_vnics(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) @@ -150,6 +130,7 @@ class LibvirtInspector(virt_inspector.Inspector): tx_errors=dom_stats[7]) yield (interface, stats) + @libvirt_utils.retry_on_disconnect def inspect_disks(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) @@ -167,34 +148,19 @@ class LibvirtInspector(virt_inspector.Inspector): errors=block_stats[4]) yield (disk, stats) + @libvirt_utils.raise_nodata_if_unsupported("memory usge", False) + @libvirt_utils.retry_on_disconnect def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) domain = self._get_domain_not_shut_off_or_raise(instance) - try: - memory_stats = domain.memoryStats() - if (memory_stats and - memory_stats.get('available') and - memory_stats.get('unused')): - memory_used = (memory_stats.get('available') - - memory_stats.get('unused')) - # Stat provided from libvirt is in KB, converting it to MB. - memory_used = memory_used / units.Ki - return virt_inspector.MemoryUsageStats(usage=memory_used) - else: - msg = _('Failed to inspect memory usage of instance ' - ', ' - 'can not get info from libvirt.') % { - 'name': instance_name, 'id': instance.id} - raise virt_inspector.InstanceNoDataException(msg) - # memoryStats might launch an exception if the method is not supported - # by the underlying hypervisor being used by libvirt. - except libvirt.libvirtError as e: - msg = _('Failed to inspect memory usage of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) + memory_stats = domain.memoryStats() + memory_used = (memory_stats['available'] - + memory_stats['unused']) + # Stat provided from libvirt is in KB, converting it to MB. + memory_used = memory_used / units.Ki + return virt_inspector.MemoryUsageStats(usage=memory_used) + @libvirt_utils.retry_on_disconnect def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) @@ -222,65 +188,32 @@ class LibvirtInspector(virt_inspector.Inspector): physical=block_info[2]) yield (dsk, info) + @libvirt_utils.retry_on_disconnect def inspect_memory_resident(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) memory = domain.memoryStats()['rss'] / units.Ki return virt_inspector.MemoryResidentStats(resident=memory) + @libvirt_utils.raise_nodata_if_unsupported("memory bandwidth") + @libvirt_utils.retry_on_disconnect def inspect_memory_bandwidth(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) + stats = self.connection.domainListGetStats( + [domain], libvirt.VIR_DOMAIN_STATS_PERF) + perf = stats[0][1] + return virt_inspector.MemoryBandwidthStats(total=perf["perf.mbmt"], + local=perf["perf.mbml"]) - try: - stats = self.connection.domainListGetStats( - [domain], libvirt.VIR_DOMAIN_STATS_PERF) - perf = stats[0][1] - return virt_inspector.MemoryBandwidthStats(total=perf["perf.mbmt"], - local=perf["perf.mbml"]) - except (KeyError, AttributeError) as e: - # NOTE(sileht): KeyError if for libvirt >=2.0.0,<2.3.0, the perf - # subsystem ws existing but not these attributes - # https://github.com/libvirt/libvirt/commit/bae660869de0612bee2a740083fb494c27e3f80c - msg = _('Perf is not supported by current version of libvirt, and ' - 'failed to inspect memory bandwidth of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - # domainListGetStats might launch an exception if the method or - # mbmt/mbml perf event is not supported by the underlying hypervisor - # being used by libvirt. - except libvirt.libvirtError as e: - msg = _('Failed to inspect memory bandwidth of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - + @libvirt_utils.raise_nodata_if_unsupported("perf events") + @libvirt_utils.retry_on_disconnect def inspect_perf_events(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) - try: - stats = self.connection.domainListGetStats( - [domain], libvirt.VIR_DOMAIN_STATS_PERF) - perf = stats[0][1] - return virt_inspector.PerfEventsStats( - cpu_cycles=perf["perf.cpu_cycles"], - instructions=perf["perf.instructions"], - cache_references=perf["perf.cache_references"], - cache_misses=perf["perf.cache_misses"]) - # NOTE(sileht): KeyError if for libvirt >=2.0.0,<2.3.0, the perf - # subsystem ws existing but not these attributes - # https://github.com/libvirt/libvirt/commit/bae660869de0612bee2a740083fb494c27e3f80c - except (AttributeError, KeyError) as e: - msg = _LE('Perf is not supported by current version of libvirt, ' - 'and failed to inspect perf events of ' - '%(instance_uuid)s, can not get info from libvirt: ' - '%(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - # domainListGetStats might launch an exception if the method or - # mbmt/mbml perf event is not supported by the underlying hypervisor - # being used by libvirt. - except libvirt.libvirtError as e: - msg = _LE('Failed to inspect perf events of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) + stats = self.connection.domainListGetStats( + [domain], libvirt.VIR_DOMAIN_STATS_PERF) + perf = stats[0][1] + return virt_inspector.PerfEventsStats( + cpu_cycles=perf["perf.cpu_cycles"], + instructions=perf["perf.instructions"], + cache_references=perf["perf.cache_references"], + cache_misses=perf["perf.cache_misses"]) diff --git a/ceilometer/compute/virt/libvirt/utils.py b/ceilometer/compute/virt/libvirt/utils.py index 914a6de0ba..e640fbca0c 100644 --- a/ceilometer/compute/virt/libvirt/utils.py +++ b/ceilometer/compute/virt/libvirt/utils.py @@ -15,12 +15,16 @@ from oslo_config import cfg from oslo_log import log as logging +import tenacity try: import libvirt except ImportError: libvirt = None +from ceilometer.compute.virt import inspector as virt_inspector +from ceilometer.i18n import _LE + LOG = logging.getLogger(__name__) OPTS = [ @@ -75,7 +79,7 @@ LIBVIRT_STATUS = { } -def get_libvirt_connection(conf): +def new_libvirt_connection(conf): if not libvirt: raise ImportError("python-libvirt module is missing") uri = (conf.libvirt_uri or LIBVIRT_PER_TYPE_URIS.get(conf.libvirt_type, @@ -84,21 +88,46 @@ def get_libvirt_connection(conf): return libvirt.openReadOnly(uri) -def retry_on_disconnect(function): - def decorator(self, *args, **kwargs): - try: - return function(self, *args, **kwargs) - except ImportError: - # NOTE(sileht): in case of libvirt failed to be imported - raise - except libvirt.libvirtError as e: - if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): - LOG.debug('Connection to libvirt broken') - self.connection = None - return function(self, *args, **kwargs) - else: - raise - return decorator +def refresh_libvirt_connection(conf, klass): + connection = getattr(klass, '_libvirt_connection', None) + if not connection or not connection.isAlive(): + connection = new_libvirt_connection(conf) + setattr(klass, '_libvirt_connection', connection) + return connection + + +def is_disconnection_exception(e): + return (isinstance(e, libvirt.libvirtError) + and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, + libvirt.VIR_ERR_INTERNAL_ERROR) + and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, + libvirt.VIR_FROM_RPC)) + + +retry_on_disconnect = tenacity.retry( + retry=tenacity.retry_if_exception(is_disconnection_exception), + stop=tenacity.stop_after_attempt(2)) + + +class raise_nodata_if_unsupported(object): + def __init__(self, meter, permanent=True): + self.meter = meter + self.permanent = permanent + + def __call__(self, method): + def inner(in_self, instance, *args, **kwargs): + try: + return method(in_self, instance, *args, **kwargs) + except (libvirt.libvirtError, KeyError, AttributeError) as e: + # NOTE(sileht): At this point libvirt connection error + # have been reraise as tenacity.RetryError() + msg = _LE('Failed to inspect %(meter)s of %(instance_uuid)s, ' + 'can not get info from libvirt: %(error)s') % { + "meter": self.meter, + "instance_uuid": instance.id, + "error": e} + if self.permanent: + raise virt_inspector.NoDataException(msg) + else: + raise virt_inspector.InstanceNoDataException(msg) + return inner diff --git a/ceilometer/tests/unit/compute/pollsters/base.py b/ceilometer/tests/unit/compute/pollsters/base.py index 56b1a6069e..7a718d4f86 100644 --- a/ceilometer/tests/unit/compute/pollsters/base.py +++ b/ceilometer/tests/unit/compute/pollsters/base.py @@ -46,16 +46,14 @@ class TestPollsterBase(base.BaseTestCase): 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} - patch_virt = mockpatch.Patch( + self.useFixture(mockpatch.Patch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) + new=mock.Mock(return_value=self.inspector))) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) + self.useFixture(mockpatch.Patch( + 'ceilometer.compute.pollsters.BaseComputePollster._get_inspector', + return_value=self.inspector)) diff --git a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py b/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py index ce6118f19f..7c9f7155be 100644 --- a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py +++ b/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py @@ -14,11 +14,6 @@ """Tests for libvirt inspector. """ -try: - import contextlib2 as contextlib # for Python < 3.3 -except ImportError: - import contextlib - import fixtures import mock from oslo_config import fixture as fixture_config @@ -30,64 +25,66 @@ from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector from ceilometer.compute.virt.libvirt import utils -class TestLibvirtInspection(base.BaseTestCase): +class FakeLibvirtError(Exception): + pass - class fakeLibvirtError(Exception): - pass + +class VMInstance(object): + id = 'ff58e738-12f4-4c58-acde-77617b68da56' + name = 'instance-00000001' + + +class TestLibvirtInspection(base.BaseTestCase): def setUp(self): super(TestLibvirtInspection, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf - class VMInstance(object): - id = 'ff58e738-12f4-4c58-acde-77617b68da56' - name = 'instance-00000001' - self.instance = VMInstance - self.inspector = libvirt_inspector.LibvirtInspector(self.CONF) + self.instance = VMInstance() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError + libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=None): + self.inspector = libvirt_inspector.LibvirtInspector(self.CONF) self.domain = mock.Mock() - self.addCleanup(mock.patch.stopall) def test_inspect_cpus(self): - fake_stats = [({}, {'cpu.time': 999999, 'vcpu.current': 2})] - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - None, None))) - stack.enter_context(mock.patch.object(self.inspector.connection, - 'domainListGetStats', - return_value=fake_stats)) + domain = mock.Mock() + domain.info.return_value = (0, 0, 0, None, None) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + conn.domainListGetStats.return_value = [({}, {'cpu.time': 999999, + 'vcpu.current': 2})] + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): cpu_info = self.inspector.inspect_cpus(self.instance) self.assertEqual(2, cpu_info.number) self.assertEqual(999999, cpu_info.time) def test_inspect_cpus_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_cpus, - self.instance) + domain = mock.Mock() + domain.info.return_value = (5, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + self.assertRaises(virt_inspector.InstanceShutOffException, + self.inspector.inspect_cpus, + self.instance) def test_inspect_cpu_l3_cache(self): - fake_stats = [({}, {'perf.cmt': 90112})] - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(connection, - 'domainListGetStats', - return_value=fake_stats)) + domain = mock.Mock() + domain.info.return_value = (0, 0, 0, 2, 999999) + conn = mock.Mock() + conn.domainListGetStats.return_value = [({}, {'perf.cmt': 90112})] + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): cpu_info = self.inspector.inspect_cpu_l3_cache(self.instance) self.assertEqual(90112, cpu_info.l3_cache_usage) @@ -156,19 +153,15 @@ class TestLibvirtInspection(base.BaseTestCase): } interfaceStats = interface_stats.__getitem__ - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, - 'interfaceStats', - side_effect=interfaceStats)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) + domain = mock.Mock() + domain.XMLDesc.return_value = dom_xml + domain.info.return_value = (0, 0, 0, 2, 999999) + domain.interfaceStats.side_effect = interfaceStats + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): interfaces = list(self.inspector.inspect_vnics(self.instance)) self.assertEqual(3, len(interfaces)) @@ -209,14 +202,13 @@ class TestLibvirtInspection(base.BaseTestCase): self.assertEqual(12, info2.tx_packets) def test_inspect_vnics_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) + domain = mock.Mock() + domain.info.return_value = (5, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) @@ -236,19 +228,15 @@ class TestLibvirtInspection(base.BaseTestCase): """ + domain = mock.Mock() + domain.XMLDesc.return_value = dom_xml + domain.info.return_value = (0, 0, 0, 2, 999999) + domain.blockStats.return_value = (1, 2, 3, 4, -1) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockStats', - return_value=(1, 2, 3, - 4, -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disks(self.instance)) self.assertEqual(1, len(disks)) @@ -260,31 +248,28 @@ class TestLibvirtInspection(base.BaseTestCase): self.assertEqual(4, info0.write_bytes) def test_inspect_disks_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) + domain = mock.Mock() + domain.info.return_value = (5, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_memory_usage(self): - fake_memory_stats = {'available': 51200, 'unused': 25600} - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value=fake_memory_stats): - memory = self.inspector.inspect_memory_usage( - self.instance) - self.assertEqual(25600 / units.Ki, memory.usage) + domain = mock.Mock() + domain.info.return_value = (0, 0, 0, 2, 999999) + domain.memoryStats.return_value = {'available': 51200, 'unused': 25600} + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + memory = self.inspector.inspect_memory_usage(self.instance) + self.assertEqual(25600 / units.Ki, memory.usage) def test_inspect_disk_info(self): dom_xml = """ @@ -301,19 +286,15 @@ class TestLibvirtInspection(base.BaseTestCase): """ + domain = mock.Mock() + domain.XMLDesc.return_value = dom_xml + domain.blockInfo.return_value = (1, 2, 3, -1) + domain.info.return_value = (0, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance)) self.assertEqual(1, len(disks)) @@ -338,21 +319,16 @@ class TestLibvirtInspection(base.BaseTestCase): """ + domain = mock.Mock() + domain.XMLDesc.return_value = dom_xml + domain.blockInfo.return_value = (1, 2, 3, -1) + domain.info.return_value = (0, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance)) - self.assertEqual(0, len(disks)) def test_inspect_disk_info_without_source_element(self): @@ -371,111 +347,103 @@ class TestLibvirtInspection(base.BaseTestCase): """ + domain = mock.Mock() + domain.XMLDesc.return_value = dom_xml + domain.blockInfo.return_value = (1, 2, 3, -1) + domain.info.return_value = (0, 0, 0, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance)) - self.assertEqual(0, len(disks)) def test_inspect_memory_usage_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_memory_usage, - self.instance) + domain = mock.Mock() + domain.info.return_value = (5, 0, 51200, 2, 999999) + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + self.assertRaises(virt_inspector.InstanceShutOffException, + self.inspector.inspect_memory_usage, + self.instance) def test_inspect_memory_usage_with_empty_stats(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value={}): - self.assertRaises(virt_inspector.InstanceNoDataException, - self.inspector.inspect_memory_usage, - self.instance) + domain = mock.Mock() + domain.info.return_value = (0, 0, 51200, 2, 999999) + domain.memoryStats.return_value = {} + conn = mock.Mock() + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + self.assertRaises(virt_inspector.InstanceNoDataException, + self.inspector.inspect_memory_usage, + self.instance) def test_inspect_memory_bandwidth(self): - fake_stats = [({}, {'perf.mbmt': 1892352, 'perf.mbml': 1802240})] - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(connection, 'domainListGetStats', - return_value=fake_stats): - mb = self.inspector.inspect_memory_bandwidth(self.instance) - self.assertEqual(1892352, mb.total) - self.assertEqual(1802240, mb.local) + domain = mock.Mock() + domain.info.return_value = (0, 0, 51200, 2, 999999) + conn = mock.Mock() + conn.domainListGetStats.return_value = [ + ({}, {'perf.mbmt': 1892352, 'perf.mbml': 1802240})] + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + mb = self.inspector.inspect_memory_bandwidth(self.instance) + self.assertEqual(1892352, mb.total) + self.assertEqual(1802240, mb.local) def test_inspect_perf_events(self): - fake_stats = [({}, {'perf.cpu_cycles': 7259361, - 'perf.instructions': 8815623, - 'perf.cache_references': 74184, - 'perf.cache_misses': 16737})] - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(connection, 'domainListGetStats', - return_value=fake_stats): - pe = self.inspector.inspect_perf_events(self.instance) - self.assertEqual(7259361, pe.cpu_cycles) - self.assertEqual(8815623, pe.instructions) - self.assertEqual(74184, pe.cache_references) - self.assertEqual(16737, pe.cache_misses) + domain = mock.Mock() + domain.info.return_value = (0, 0, 51200, 2, 999999) + conn = mock.Mock() + conn.domainListGetStats.return_value = [ + ({}, {'perf.cpu_cycles': 7259361, + 'perf.instructions': 8815623, + 'perf.cache_references': 74184, + 'perf.cache_misses': 16737})] + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + pe = self.inspector.inspect_perf_events(self.instance) + self.assertEqual(7259361, pe.cpu_cycles) + self.assertEqual(8815623, pe.instructions) + self.assertEqual(74184, pe.cache_references) + self.assertEqual(16737, pe.cache_misses) def test_inspect_perf_events_libvirt_less_than_2_3_0(self): - fake_stats = [({}, {})] - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(connection, 'domainListGetStats', - return_value=fake_stats): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.inspect_perf_events, - self.instance) + domain = mock.Mock() + domain.info.return_value = (0, 0, 51200, 2, 999999) + conn = mock.Mock() + conn.domainListGetStats.return_value = [({}, {})] + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + self.assertRaises(virt_inspector.NoDataException, + self.inspector.inspect_perf_events, + self.instance) class TestLibvirtInspectionWithError(base.BaseTestCase): - class fakeLibvirtError(Exception): - pass - def setUp(self): super(TestLibvirtInspectionWithError, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf - self.inspector = libvirt_inspector.LibvirtInspector(self.CONF) self.useFixture(fixtures.MonkeyPatch( - 'ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector.connection', - mock.MagicMock(side_effect=Exception('dummy')))) + 'ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', + mock.MagicMock(side_effect=[None, Exception('dummy')]))) libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError + libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt + self.inspector = libvirt_inspector.LibvirtInspector(self.CONF) def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, diff --git a/requirements.txt b/requirements.txt index e8bf9888fa..0f97d02227 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,6 +43,7 @@ six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.9.0 # Apache-2.0 +tenacity>=3.2.1 # Apache-2.0 tooz>=1.47.0 # Apache-2.0 WebOb>=1.5.0 # MIT WSME>=0.8 # MIT diff --git a/test-requirements.txt b/test-requirements.txt index d426c18664..11c46e4394 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -contextlib2>=0.4.0 # PSF License coverage>=3.6 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD happybase!=0.7,>=0.5,<1.0.0;python_version=='2.7' # MIT