diff --git a/.gitignore b/.gitignore index 83cf18b2..32e8f748 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,3 @@ doc/build doc/source/api/ etc/ceilometer/ceilometer.conf subunit.log - -# Files created by releasenotes build -releasenotes/build diff --git a/README.rst b/README.rst index 8cd4f6b2..b7314705 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,6 @@ ceilometer ========== -Release notes can be read online at: - http://docs.openstack.org/developer/ceilometer/releasenotes/index.html - Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ diff --git a/ceilometer/agent/__init__.py b/ceilometer/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/agent/discovery/__init__.py b/ceilometer/agent/discovery/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/agent/discovery/endpoint.py b/ceilometer/agent/discovery/endpoint.py deleted file mode 100644 index bb177659..00000000 --- a/ceilometer/agent/discovery/endpoint.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014-2015 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.agent import plugin_base as plugin -from ceilometer.i18n import _LW -from ceilometer import keystone_client - -LOG = log.getLogger(__name__) - -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class EndpointDiscovery(plugin.DiscoveryBase): - """Discovery that supplies service endpoints. - - This discovery should be used when the relevant APIs are not well suited - to dividing the pollster's work into smaller pieces than a whole service - at once. - """ - - @staticmethod - def discover(manager, param=None): - endpoints = keystone_client.get_service_catalog( - manager.keystone).get_urls( - service_type=param, - interface=cfg.CONF.service_credentials.interface, - region_name=cfg.CONF.service_credentials.region_name) - if not endpoints: - LOG.warning(_LW('No endpoints found for service %s'), - "" if param is None else param) - return [] - return endpoints diff --git a/ceilometer/agent/discovery/localnode.py b/ceilometer/agent/discovery/localnode.py deleted file mode 100644 index 1de479f3..00000000 --- a/ceilometer/agent/discovery/localnode.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base - - -class LocalNodeDiscovery(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - """Return local node as resource.""" - return ['local_host'] diff --git a/ceilometer/agent/discovery/tenant.py b/ceilometer/agent/discovery/tenant.py deleted file mode 100644 index 8ae84688..00000000 --- a/ceilometer/agent/discovery/tenant.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from ceilometer.agent import plugin_base as plugin - -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class TenantDiscovery(plugin.DiscoveryBase): - """Discovery that supplies keystone tenants. - - This discovery should be used when the pollster's work can't be divided - into smaller pieces than per-tenants. Example of this is the Swift - pollster, which polls account details and does so per-project. - """ - - def discover(self, manager, param=None): - tenants = manager.keystone.projects.list() - return tenants or [] diff --git a/ceilometer/agent/manager.py b/ceilometer/agent/manager.py deleted file mode 100644 index b0784c25..00000000 --- a/ceilometer/agent/manager.py +++ /dev/null @@ -1,509 +0,0 @@ -# -# Copyright 2013 Julien Danjou -# Copyright 2014 Red Hat, Inc -# -# Authors: Julien Danjou -# Eoghan Glynn -# Nejc Saje -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import itertools -import random - -from keystoneauth1 import exceptions as ka_exceptions -from keystoneclient import exceptions as ks_exceptions -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from oslo_utils import timeutils -from six import moves -from six.moves.urllib import parse as urlparse -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import coordination -from ceilometer.i18n import _, _LE, _LI, _LW -from ceilometer import keystone_client -from ceilometer import messaging -from ceilometer import pipeline -from ceilometer.publisher import utils as publisher_utils -from ceilometer import service_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.BoolOpt('batch_polled_samples', - default=True, - help='To reduce polling agent load, samples are sent to the ' - 'notification agent in a batch. To gain higher ' - 'throughput at the cost of load set this to False.'), - cfg.IntOpt('shuffle_time_before_polling_task', - default=0, - help='To reduce large requests at same time to Nova or other ' - 'components from different compute agents, shuffle ' - 'start time of polling task.'), -] - -POLLING_OPTS = [ - cfg.StrOpt('partitioning_group_prefix', - deprecated_group='central', - help='Work-load partitioning group prefix. Use only if you ' - 'want to run multiple polling agents with different ' - 'config files. For each sub-group of the agent ' - 'pool with the same partitioning_group_prefix a disjoint ' - 'subset of pollsters should be loaded.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(POLLING_OPTS, group='polling') -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_group('service_types', 'ceilometer.energy.kwapi') -cfg.CONF.import_group('service_types', 'ceilometer.image.glance') -cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') -cfg.CONF.import_group('service_types', 'ceilometer.nova_client') -cfg.CONF.import_group('service_types', 'ceilometer.objectstore.rgw') -cfg.CONF.import_group('service_types', 'ceilometer.objectstore.swift') - - -class PollsterListForbidden(Exception): - def __init__(self): - msg = ('It is forbidden to use pollster-list option of polling agent ' - 'in case of using coordination between multiple agents. Please ' - 'use either multiple agents being coordinated or polling list ' - 'option for one polling agent.') - super(PollsterListForbidden, self).__init__(msg) - - -class EmptyPollstersList(Exception): - def __init__(self): - msg = ('No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.') - super(EmptyPollstersList, self).__init__(msg) - - -class Resources(object): - def __init__(self, agent_manager): - self.agent_manager = agent_manager - self._resources = [] - self._discovery = [] - self.blacklist = [] - - def setup(self, source): - self._resources = source.resources - self._discovery = source.discovery - - def get(self, discovery_cache=None): - source_discovery = (self.agent_manager.discover(self._discovery, - discovery_cache) - if self._discovery else []) - static_resources = [] - if self._resources: - static_resources_group = self.agent_manager.construct_group_id( - utils.hash_of_set(self._resources)) - p_coord = self.agent_manager.partition_coordinator - static_resources = p_coord.extract_my_subset( - static_resources_group, self._resources) - return static_resources + source_discovery - - @staticmethod - def key(source_name, pollster): - return '%s-%s' % (source_name, pollster.name) - - -class PollingTask(object): - """Polling task for polling samples and notifying. - - A polling task can be invoked periodically or only once. - """ - - def __init__(self, agent_manager): - self.manager = agent_manager - - # elements of the Cartesian product of sources X pollsters - # with a common interval - self.pollster_matches = collections.defaultdict(set) - - # we relate the static resources and per-source discovery to - # each combination of pollster and matching source - resource_factory = lambda: Resources(agent_manager) - self.resources = collections.defaultdict(resource_factory) - - self._batch = cfg.CONF.batch_polled_samples - self._telemetry_secret = cfg.CONF.publisher.telemetry_secret - - def add(self, pollster, source): - self.pollster_matches[source.name].add(pollster) - key = Resources.key(source.name, pollster) - self.resources[key].setup(source) - - def poll_and_notify(self): - """Polling sample and notify.""" - cache = {} - discovery_cache = {} - poll_history = {} - for source_name in self.pollster_matches: - for pollster in self.pollster_matches[source_name]: - key = Resources.key(source_name, pollster) - candidate_res = list( - self.resources[key].get(discovery_cache)) - if not candidate_res and pollster.obj.default_discovery: - candidate_res = self.manager.discover( - [pollster.obj.default_discovery], discovery_cache) - - # Remove duplicated resources and black resources. Using - # set() requires well defined __hash__ for each resource. - # Since __eq__ is defined, 'not in' is safe here. - polling_resources = [] - black_res = self.resources[key].blacklist - history = poll_history.get(pollster.name, []) - for x in candidate_res: - if x not in history: - history.append(x) - if x not in black_res: - polling_resources.append(x) - poll_history[pollster.name] = history - - # If no resources, skip for this pollster - if not polling_resources: - p_context = 'new ' if history else '' - LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s" - "resources found this cycle"), - {'name': pollster.name, 'p_context': p_context}) - continue - - LOG.info(_LI("Polling pollster %(poll)s in the context of " - "%(src)s"), - dict(poll=pollster.name, src=source_name)) - try: - polling_timestamp = timeutils.utcnow().isoformat() - samples = pollster.obj.get_samples( - manager=self.manager, - cache=cache, - resources=polling_resources - ) - sample_batch = [] - - for sample in samples: - # Note(yuywz): Unify the timestamp of polled samples - sample.set_timestamp(polling_timestamp) - sample_dict = ( - publisher_utils.meter_message_from_counter( - sample, self._telemetry_secret - )) - if self._batch: - sample_batch.append(sample_dict) - else: - self._send_notification([sample_dict]) - - if sample_batch: - self._send_notification(sample_batch) - - except plugin_base.PollsterPermanentError as err: - LOG.error(_( - 'Prevent pollster %(name)s for ' - 'polling source %(source)s anymore!') - % ({'name': pollster.name, 'source': source_name})) - self.resources[key].blacklist.extend(err.fail_res_list) - except Exception as err: - LOG.warning(_( - 'Continue after error from %(name)s: %(error)s') - % ({'name': pollster.name, 'error': err}), - exc_info=True) - - def _send_notification(self, samples): - self.manager.notifier.sample( - {}, - 'telemetry.polling', - {'samples': samples} - ) - - -class AgentManager(service_base.PipelineBasedService): - - def __init__(self, namespaces=None, pollster_list=None): - namespaces = namespaces or ['compute', 'central'] - pollster_list = pollster_list or [] - group_prefix = cfg.CONF.polling.partitioning_group_prefix - - # features of using coordination and pollster-list are exclusive, and - # cannot be used at one moment to avoid both samples duplication and - # samples being lost - if pollster_list and cfg.CONF.coordination.backend_url: - raise PollsterListForbidden() - - super(AgentManager, self).__init__() - - def _match(pollster): - """Find out if pollster name matches to one of the list.""" - return any(fnmatch.fnmatch(pollster.name, pattern) for - pattern in pollster_list) - - if type(namespaces) is not list: - namespaces = [namespaces] - - # we'll have default ['compute', 'central'] here if no namespaces will - # be passed - extensions = (self._extensions('poll', namespace).extensions - for namespace in namespaces) - # get the extensions from pollster builder - extensions_fb = (self._extensions_from_builder('poll', namespace) - for namespace in namespaces) - if pollster_list: - extensions = (moves.filter(_match, exts) - for exts in extensions) - extensions_fb = (moves.filter(_match, exts) - for exts in extensions_fb) - - self.extensions = list(itertools.chain(*list(extensions))) + list( - itertools.chain(*list(extensions_fb))) - - if self.extensions == []: - raise EmptyPollstersList() - - self.discovery_manager = self._extensions('discover') - self.partition_coordinator = coordination.PartitionCoordinator() - - # Compose coordination group prefix. - # We'll use namespaces as the basement for this partitioning. - namespace_prefix = '-'.join(sorted(namespaces)) - self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) - if group_prefix else namespace_prefix) - - self.notifier = oslo_messaging.Notifier( - messaging.get_transport(), - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id="ceilometer.polling") - - self._keystone = None - self._keystone_last_exception = None - - @staticmethod - def _get_ext_mgr(namespace): - def _catch_extension_load_error(mgr, ep, exc): - # Extension raising ExtensionLoadError can be ignored, - # and ignore anything we can't import as a safety measure. - if isinstance(exc, plugin_base.ExtensionLoadError): - LOG.exception(_("Skip loading extension for %s") % ep.name) - return - if isinstance(exc, ImportError): - LOG.error(_("Failed to import extension for %(name)s: " - "%(error)s"), - {'name': ep.name, 'error': exc}) - return - raise exc - - return extension.ExtensionManager( - namespace=namespace, - invoke_on_load=True, - on_load_failure_callback=_catch_extension_load_error, - ) - - def _extensions(self, category, agent_ns=None): - namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns - else 'ceilometer.%s' % category) - return self._get_ext_mgr(namespace) - - def _extensions_from_builder(self, category, agent_ns=None): - ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns - else 'ceilometer.builder.%s' % category) - mgr = self._get_ext_mgr(ns) - - def _build(ext): - return ext.plugin.get_pollsters_extensions() - - # NOTE: this seems a stevedore bug. if no extensions are found, - # map will raise runtimeError which is not documented. - if mgr.names(): - return list(itertools.chain(*mgr.map(_build))) - else: - return [] - - def join_partitioning_groups(self): - self.groups = set([self.construct_group_id(d.obj.group_id) - for d in self.discovery_manager]) - # let each set of statically-defined resources have its own group - static_resource_groups = set([ - self.construct_group_id(utils.hash_of_set(p.resources)) - for p in self.polling_manager.sources - if p.resources - ]) - self.groups.update(static_resource_groups) - for group in self.groups: - self.partition_coordinator.join_group(group) - - def create_polling_task(self): - """Create an initially empty polling task.""" - return PollingTask(self) - - def setup_polling_tasks(self): - polling_tasks = {} - for source in self.polling_manager.sources: - polling_task = None - for pollster in self.extensions: - if source.support_meter(pollster.name): - polling_task = polling_tasks.get(source.get_interval()) - if not polling_task: - polling_task = self.create_polling_task() - polling_tasks[source.get_interval()] = polling_task - polling_task.add(pollster, source) - return polling_tasks - - def construct_group_id(self, discovery_group_id): - return ('%s-%s' % (self.group_prefix, - discovery_group_id) - if discovery_group_id else None) - - def configure_polling_tasks(self): - # allow time for coordination if necessary - delay_start = self.partition_coordinator.is_active() - - # set shuffle time before polling task if necessary - delay_polling_time = random.randint( - 0, cfg.CONF.shuffle_time_before_polling_task) - - pollster_timers = [] - data = self.setup_polling_tasks() - for interval, polling_task in data.items(): - delay_time = (interval + delay_polling_time if delay_start - else delay_polling_time) - pollster_timers.append(self.tg.add_timer(interval, - self.interval_task, - initial_delay=delay_time, - task=polling_task)) - self.tg.add_timer(cfg.CONF.coordination.heartbeat, - self.partition_coordinator.heartbeat) - - return pollster_timers - - def start(self): - super(AgentManager, self).start() - self.polling_manager = pipeline.setup_polling() - - self.partition_coordinator.start() - self.join_partitioning_groups() - - self.pollster_timers = self.configure_polling_tasks() - - self.init_pipeline_refresh() - - def stop(self): - if self.started: - self.partition_coordinator.stop() - super(AgentManager, self).stop() - - def interval_task(self, task): - # NOTE(sileht): remove the previous keystone client - # and exception to get a new one in this polling cycle. - self._keystone = None - self._keystone_last_exception = None - - task.poll_and_notify() - - @property - def keystone(self): - # NOTE(sileht): we do lazy loading of the keystone client - # for multiple reasons: - # * don't use it if no plugin need it - # * use only one client for all plugins per polling cycle - if self._keystone is None and self._keystone_last_exception is None: - try: - self._keystone = keystone_client.get_client() - self._keystone_last_exception = None - except (ka_exceptions.ClientException, - ks_exceptions.ClientException) as e: - self._keystone = None - self._keystone_last_exception = e - if self._keystone is not None: - return self._keystone - else: - raise self._keystone_last_exception - - @staticmethod - def _parse_discoverer(url): - s = urlparse.urlparse(url) - return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) - - def _discoverer(self, name): - for d in self.discovery_manager: - if d.name == name: - return d.obj - return None - - def discover(self, discovery=None, discovery_cache=None): - resources = [] - discovery = discovery or [] - for url in discovery: - if discovery_cache is not None and url in discovery_cache: - resources.extend(discovery_cache[url]) - continue - name, param = self._parse_discoverer(url) - discoverer = self._discoverer(name) - if discoverer: - try: - if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: - service_type = getattr( - cfg.CONF.service_types, - discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) - if not keystone_client.get_service_catalog( - self.keystone).get_endpoints( - service_type=service_type): - LOG.warning(_LW( - 'Skipping %(name)s, %(service_type)s service ' - 'is not registered in keystone'), - {'name': name, 'service_type': service_type}) - continue - - discovered = discoverer.discover(self, param) - partitioned = self.partition_coordinator.extract_my_subset( - self.construct_group_id(discoverer.group_id), - discovered) - resources.extend(partitioned) - if discovery_cache is not None: - discovery_cache[url] = partitioned - except (ka_exceptions.ClientException, - ks_exceptions.ClientException) as e: - LOG.error(_LE('Skipping %(name)s, keystone issue: ' - '%(exc)s'), {'name': name, 'exc': e}) - except Exception as err: - LOG.exception(_('Unable to discover resources: %s') % err) - else: - LOG.warning(_('Unknown discovery extension: %s') % name) - return resources - - def stop_pollsters(self): - for x in self.pollster_timers: - try: - x.stop() - self.tg.timer_done(x) - except Exception: - LOG.error(_('Error stopping pollster.'), exc_info=True) - self.pollster_timers = [] - - def reload_pipeline(self): - if self.pipeline_validated: - LOG.info(_LI("Reconfiguring polling tasks.")) - - # stop existing pollsters and leave partitioning groups - self.stop_pollsters() - for group in self.groups: - self.partition_coordinator.leave_group(group) - - # re-create partitioning groups according to pipeline - # and configure polling tasks with latest pipeline conf - self.join_partitioning_groups() - self.pollster_timers = self.configure_polling_tasks() diff --git a/ceilometer/agent/plugin_base.py b/ceilometer/agent/plugin_base.py deleted file mode 100644 index e47db093..00000000 --- a/ceilometer/agent/plugin_base.py +++ /dev/null @@ -1,270 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base class for plugins. -""" - -import abc -import collections - -from oslo_log import log -import oslo_messaging -import six -from stevedore import extension - -from ceilometer.i18n import _LE -from ceilometer import messaging - -LOG = log.getLogger(__name__) - -ExchangeTopics = collections.namedtuple('ExchangeTopics', - ['exchange', 'topics']) - - -class PluginBase(object): - """Base class for all plugins.""" - - -@six.add_metaclass(abc.ABCMeta) -class NotificationBase(PluginBase): - """Base class for plugins that support the notification API.""" - def __init__(self, manager): - super(NotificationBase, self).__init__() - # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch - # messages to an endpoint. - if self.event_types: - self.filter_rule = oslo_messaging.NotificationFilter( - event_type='|'.join(self.event_types)) - self.manager = manager - - @staticmethod - def get_notification_topics(conf): - if 'notification_topics' in conf: - return conf.notification_topics - return conf.oslo_messaging_notifications.topics - - @abc.abstractproperty - def event_types(self): - """Return a sequence of strings. - - Strings are defining the event types to be given to this plugin. - """ - - @abc.abstractmethod - def get_targets(self, conf): - """Return a sequence of oslo.messaging.Target. - - Sequence is defining the exchange and topics to be connected for this - plugin. - :param conf: Configuration. - """ - - @abc.abstractmethod - def process_notification(self, message): - """Return a sequence of Counter instances for the given message. - - :param message: Message to process. - """ - - def info(self, notifications): - """RPC endpoint for notification messages at info level - - When another service sends a notification over the message - bus, this method receives it. - - :param notifications: list of notifications - """ - self._process_notifications('info', notifications) - - def sample(self, notifications): - """RPC endpoint for notification messages at sample level - - When another service sends a notification over the message - bus at sample priority, this method receives it. - - :param notifications: list of notifications - """ - self._process_notifications('sample', notifications) - - def _process_notifications(self, priority, notifications): - for notification in notifications: - try: - notification = messaging.convert_to_old_notification_format( - priority, notification) - self.to_samples_and_publish(notification) - except Exception: - LOG.error(_LE('Fail to process notification'), exc_info=True) - - def to_samples_and_publish(self, notification): - """Return samples produced by *process_notification*. - - Samples produced for the given notification. - :param context: Execution context from the service or RPC call - :param notification: The notification to process. - """ - with self.manager.publisher() as p: - p(list(self.process_notification(notification))) - - -class NonMetricNotificationBase(object): - """Use to mark non-measurement meters - - There are a number of historical non-measurement meters that should really - be captured as events. This common base allows us to disable these invalid - meters. - """ - pass - - -class ExtensionLoadError(Exception): - """Error of loading pollster plugin. - - PollsterBase provides a hook, setup_environment, called in pollster loading - to setup required HW/SW dependency. Any exception from it would be - propagated as ExtensionLoadError, then skip loading this pollster. - """ - pass - - -class PollsterPermanentError(Exception): - """Permanent error when polling. - - When unrecoverable error happened in polling, pollster can raise this - exception with failed resource to prevent itself from polling any more. - Resource is one of parameter resources from get_samples that cause polling - error. - """ - - def __init__(self, resources): - self.fail_res_list = resources - - -@six.add_metaclass(abc.ABCMeta) -class PollsterBase(PluginBase): - """Base class for plugins that support the polling API.""" - - def setup_environment(self): - """Setup required environment for pollster. - - Each subclass could overwrite it for specific usage. Any exception - raised in this function would prevent pollster being loaded. - """ - pass - - def __init__(self): - super(PollsterBase, self).__init__() - try: - self.setup_environment() - except Exception as err: - raise ExtensionLoadError(err) - - @abc.abstractproperty - def default_discovery(self): - """Default discovery to use for this pollster. - - There are three ways a pollster can get a list of resources to poll, - listed here in ascending order of precedence: - 1. from the per-agent discovery, - 2. from the per-pollster discovery (defined here) - 3. from the per-pipeline configured discovery and/or per-pipeline - configured static resources. - - If a pollster should only get resources from #1 or #3, this property - should be set to None. - """ - - @abc.abstractmethod - def get_samples(self, manager, cache, resources): - """Return a sequence of Counter instances from polling the resources. - - :param manager: The service manager class invoking the plugin. - :param cache: A dictionary to allow pollsters to pass data - between themselves when recomputing it would be - expensive (e.g., asking another service for a - list of objects). - :param resources: A list of resources the pollster will get data - from. It's up to the specific pollster to decide - how to use it. It is usually supplied by a discovery, - see ``default_discovery`` for more information. - - """ - - @classmethod - def build_pollsters(cls): - """Return a list of tuple (name, pollster). - - The name is the meter name which the pollster would return, the - pollster is a pollster object instance. The pollster which implements - this method should be registered in the namespace of - ceilometer.builder.xxx instead of ceilometer.poll.xxx. - """ - return [] - - @classmethod - def get_pollsters_extensions(cls): - """Return a list of stevedore extensions. - - The returned stevedore extensions wrap the pollster object instances - returned by build_pollsters. - """ - extensions = [] - try: - for name, pollster in cls.build_pollsters(): - ext = extension.Extension(name, None, cls, pollster) - extensions.append(ext) - except Exception as err: - raise ExtensionLoadError(err) - return extensions - - -@six.add_metaclass(abc.ABCMeta) -class DiscoveryBase(object): - KEYSTONE_REQUIRED_FOR_SERVICE = None - """Service type required in keystone catalog to works""" - - @abc.abstractmethod - def discover(self, manager, param=None): - """Discover resources to monitor. - - The most fine-grained discovery should be preferred, so the work is - the most evenly distributed among multiple agents (if they exist). - - For example: - if the pollster can separately poll individual resources, it should - have its own discovery implementation to discover those resources. If - it can only poll per-tenant, then the `TenantDiscovery` should be - used. If even that is not possible, use `EndpointDiscovery` (see - their respective docstrings). - - :param manager: The service manager class invoking the plugin. - :param param: an optional parameter to guide the discovery - """ - - @property - def group_id(self): - """Return group id of this discovery. - - All running recoveries with the same group_id should return the same - set of resources at a given point in time. By default, a discovery is - put into a global group, meaning that all discoveries of its type - running anywhere in the cloud, return the same set of resources. - - This property can be overridden to provide correct grouping of - localized discoveries. For example, compute discovery is localized - to a host, which is reflected in its group_id. - - A None value signifies that this discovery does not want to be part - of workload partitioning at all. - """ - return 'global' diff --git a/ceilometer/api/app.py b/ceilometer/api/app.py index adde6d16..1c8181be 100644 --- a/ceilometer/api/app.py +++ b/ceilometer/api/app.py @@ -36,6 +36,7 @@ OPTS = [ ), ] + API_OPTS = [ cfg.BoolOpt('pecan_debug', default=False, @@ -45,6 +46,12 @@ API_OPTS = [ default=100, help='Default maximum number of items returned by API request.' ), + cfg.IntOpt('workers', + default=1, + min=1, + deprecated_group='DEFAULT', + deprecated_name='api_workers', + help='Number of workers for api, default value is 1.'), ] CONF.register_opts(OPTS) @@ -55,7 +62,6 @@ def setup_app(pecan_config=None): # FIXME: Replace DBHook with a hooks.TransactionHook app_hooks = [hooks.ConfigHook(), hooks.DBHook(), - hooks.NotifierHook(), hooks.TranslationHook()] pecan_config = pecan_config or { diff --git a/ceilometer/api/controllers/v2/base.py b/ceilometer/api/controllers/v2/base.py index e3e5f9db..15e30b95 100644 --- a/ceilometer/api/controllers/v2/base.py +++ b/ceilometer/api/controllers/v2/base.py @@ -108,24 +108,6 @@ class Base(wtypes.DynamicBase): getattr(self, k) != wsme.Unset) -class Link(Base): - """A link representation.""" - - href = wtypes.text - "The url of a link" - - rel = wtypes.text - "The name of a link" - - @classmethod - def sample(cls): - return cls(href=('http://localhost:8777/v2/meters/volume?' - 'q.field=resource_id&' - 'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='volume' - ) - - class Query(Base): """Query filter.""" diff --git a/ceilometer/api/controllers/v2/capabilities.py b/ceilometer/api/controllers/v2/capabilities.py index 2a8c70d4..474d82e5 100644 --- a/ceilometer/api/controllers/v2/capabilities.py +++ b/ceilometer/api/controllers/v2/capabilities.py @@ -39,8 +39,6 @@ class Capabilities(base.Base): api = {wtypes.text: bool} "A flattened dictionary of API capabilities" - storage = {wtypes.text: bool} - "A flattened dictionary of storage capabilities" event_storage = {wtypes.text: bool} "A flattened dictionary of event storage capabilities" @@ -48,30 +46,8 @@ class Capabilities(base.Base): def sample(cls): return cls( api=_flatten_capabilities({ - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True, - 'quartile': False}}}, 'events': {'query': {'simple': True}}, }), - storage=_flatten_capabilities( - {'storage': {'production_ready': True}}), event_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) @@ -88,13 +64,10 @@ class CapabilitiesController(rest.RestController): """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers - conn = pecan.request.storage_conn event_conn = pecan.request.event_storage_conn - driver_capabilities = conn.get_capabilities().copy() - driver_capabilities['events'] = event_conn.get_capabilities()['events'] - driver_perf = conn.get_storage_capabilities() + driver_capabilities = {'events': + event_conn.get_capabilities()['events']} event_driver_perf = event_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), - storage=_flatten_capabilities(driver_perf), event_storage=_flatten_capabilities( event_driver_perf)) diff --git a/ceilometer/api/controllers/v2/meters.py b/ceilometer/api/controllers/v2/meters.py deleted file mode 100644 index 9aa500eb..00000000 --- a/ceilometer/api/controllers/v2/meters.py +++ /dev/null @@ -1,505 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import datetime - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import strutils -from oslo_utils import timeutils -import pecan -from pecan import rest -import six -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils as v2_utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer.publisher import utils as publisher_utils -from ceilometer import sample -from ceilometer import storage -from ceilometer.storage import base as storage_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -class OldSample(base.Base): - """A single measurement for a given meter and resource. - - This class is deprecated in favor of Sample. - """ - - source = wtypes.text - "The ID of the source that identifies where the sample comes from" - - counter_name = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the meter" - # FIXME(dhellmann): Make this meter_name? - - counter_type = wsme.wsattr(wtypes.text, mandatory=True) - "The type of the meter (see :ref:`measurements`)" - # FIXME(dhellmann): Make this meter_type? - - counter_unit = wsme.wsattr(wtypes.text, mandatory=True) - "The unit of measure for the value in counter_volume" - # FIXME(dhellmann): Make this meter_unit? - - counter_volume = wsme.wsattr(float, mandatory=True) - "The actual measured value" - - user_id = wtypes.text - "The ID of the user who last triggered an update to the resource" - - project_id = wtypes.text - "The ID of the project or tenant that owns the resource" - - resource_id = wsme.wsattr(wtypes.text, mandatory=True) - "The ID of the :class:`Resource` for which the measurements are taken" - - timestamp = datetime.datetime - "UTC date and time when the measurement was made" - - recorded_at = datetime.datetime - "When the sample has been recorded." - - resource_metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the resource" - - message_id = wtypes.text - "A unique identifier for the sample" - - def __init__(self, counter_volume=None, resource_metadata=None, - timestamp=None, **kwds): - resource_metadata = resource_metadata or {} - if counter_volume is not None: - counter_volume = float(counter_volume) - resource_metadata = v2_utils.flatten_metadata(resource_metadata) - # this is to make it easier for clients to pass a timestamp in - if timestamp and isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - - super(OldSample, self).__init__(counter_volume=counter_volume, - resource_metadata=resource_metadata, - timestamp=timestamp, **kwds) - - if self.resource_metadata in (wtypes.Unset, None): - self.resource_metadata = {} - - @classmethod - def sample(cls): - return cls(source='openstack', - counter_name='instance', - counter_type='gauge', - counter_unit='instance', - counter_volume=1, - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - resource_metadata={'name1': 'value1', - 'name2': 'value2'}, - message_id='5460acce-4fd6-480d-ab18-9735ec7b1996', - ) - - -class Statistics(base.Base): - """Computed statistics for a query.""" - - groupby = {wtypes.text: wtypes.text} - "Dictionary of field names for group, if groupby statistics are requested" - - unit = wtypes.text - "The unit type of the data set" - - min = float - "The minimum volume seen in the data" - - max = float - "The maximum volume seen in the data" - - avg = float - "The average of all of the volume values seen in the data" - - sum = float - "The total of all of the volume values seen in the data" - - count = int - "The number of samples seen" - - aggregate = {wtypes.text: float} - "The selectable aggregate value(s)" - - duration = float - "The difference, in seconds, between the oldest and newest timestamp" - - duration_start = datetime.datetime - "UTC date and time of the earliest timestamp, or the query start time" - - duration_end = datetime.datetime - "UTC date and time of the oldest timestamp, or the query end time" - - period = int - "The difference, in seconds, between the period start and end" - - period_start = datetime.datetime - "UTC date and time of the period start" - - period_end = datetime.datetime - "UTC date and time of the period end" - - def __init__(self, start_timestamp=None, end_timestamp=None, **kwds): - super(Statistics, self).__init__(**kwds) - self._update_duration(start_timestamp, end_timestamp) - - def _update_duration(self, start_timestamp, end_timestamp): - # "Clamp" the timestamps we return to the original time - # range, excluding the offset. - if (start_timestamp and - self.duration_start and - self.duration_start < start_timestamp): - self.duration_start = start_timestamp - LOG.debug('clamping min timestamp to range') - if (end_timestamp and - self.duration_end and - self.duration_end > end_timestamp): - self.duration_end = end_timestamp - LOG.debug('clamping max timestamp to range') - - # If we got valid timestamps back, compute a duration in seconds. - # - # If the min > max after clamping then we know the - # timestamps on the samples fell outside of the time - # range we care about for the query, so treat them as - # "invalid." - # - # If the timestamps are invalid, return None as a - # sentinel indicating that there is something "funny" - # about the range. - if (self.duration_start and - self.duration_end and - self.duration_start <= self.duration_end): - self.duration = timeutils.delta_seconds(self.duration_start, - self.duration_end) - else: - self.duration_start = self.duration_end = self.duration = None - - @classmethod - def sample(cls): - return cls(unit='GiB', - min=1, - max=9, - avg=4.5, - sum=45, - count=10, - duration_start=datetime.datetime(2013, 1, 4, 16, 42), - duration_end=datetime.datetime(2013, 1, 4, 16, 47), - period=7200, - period_start=datetime.datetime(2013, 1, 4, 16, 00), - period_end=datetime.datetime(2013, 1, 4, 18, 00), - ) - - -class Aggregate(base.Base): - - func = wsme.wsattr(wtypes.text, mandatory=True) - "The aggregation function name" - - param = wsme.wsattr(wtypes.text, default=None) - "The paramter to the aggregation function" - - def __init__(self, **kwargs): - super(Aggregate, self).__init__(**kwargs) - - @staticmethod - def validate(aggregate): - valid_agg = (storage_base.Connection.CAPABILITIES.get('statistics', {}) - .get('aggregation', {}).get('selectable', {}).keys()) - if aggregate.func not in valid_agg: - msg = _('Invalid aggregation function: %s') % aggregate.func - raise base.ClientSideError(msg) - return aggregate - - @classmethod - def sample(cls): - return cls(func='cardinality', - param='resource_id') - - -def _validate_groupby_fields(groupby_fields): - """Checks that the list of groupby fields from request is valid. - - If all fields are valid, returns fields with duplicates removed. - """ - # NOTE(terriyu): Currently, metadata fields are supported in our - # group by statistics implementation only for mongodb - valid_fields = set(['user_id', 'resource_id', 'project_id', 'source', - 'resource_metadata.instance_type']) - - invalid_fields = set(groupby_fields) - valid_fields - if invalid_fields: - raise wsme.exc.UnknownArgument(invalid_fields, - "Invalid groupby fields") - - # Remove duplicate fields - # NOTE(terriyu): This assumes that we don't care about the order of the - # group by fields. - return list(set(groupby_fields)) - - -class MeterController(rest.RestController): - """Manages operations on a single meter.""" - _custom_actions = { - 'statistics': ['GET'], - } - - def __init__(self, meter_name): - pecan.request.context['meter_name'] = meter_name - self.meter_name = meter_name - - @wsme_pecan.wsexpose([OldSample], [base.Query], int) - def get_all(self, q=None, limit=None): - """Return samples for the meter. - - :param q: Filter rules for the data to be returned. - :param limit: Maximum number of samples to return. - """ - - rbac.enforce('get_samples', pecan.request) - - q = q or [] - limit = v2_utils.enforce_limit(limit) - kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) - kwargs['meter'] = self.meter_name - f = storage.SampleFilter(**kwargs) - return [OldSample.from_db_model(e) - for e in pecan.request.storage_conn.get_samples(f, limit=limit) - ] - - @wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201) - def post(self, direct='', samples=None): - """Post a list of new Samples to Telemetry. - - :param direct: a flag indicates whether the samples will be posted - directly to storage or not. - :param samples: a list of samples within the request body. - """ - rbac.enforce('create_samples', pecan.request) - - direct = strutils.bool_from_string(direct) - if not samples: - msg = _('Samples should be included in request body') - raise base.ClientSideError(msg) - - now = timeutils.utcnow() - auth_project = rbac.get_limited_to_project(pecan.request.headers) - def_source = pecan.request.cfg.sample_source - def_project_id = pecan.request.headers.get('X-Project-Id') - def_user_id = pecan.request.headers.get('X-User-Id') - - published_samples = [] - for s in samples: - if self.meter_name != s.counter_name: - raise wsme.exc.InvalidInput('counter_name', s.counter_name, - 'should be %s' % self.meter_name) - - if s.message_id: - raise wsme.exc.InvalidInput('message_id', s.message_id, - 'The message_id must not be set') - - if s.counter_type not in sample.TYPES: - raise wsme.exc.InvalidInput('counter_type', s.counter_type, - 'The counter type must be: ' + - ', '.join(sample.TYPES)) - - s.user_id = (s.user_id or def_user_id) - s.project_id = (s.project_id or def_project_id) - s.source = '%s:%s' % (s.project_id, (s.source or def_source)) - s.timestamp = (s.timestamp or now) - - if auth_project and auth_project != s.project_id: - # non admin user trying to cross post to another project_id - auth_msg = 'can not post samples to other projects' - raise wsme.exc.InvalidInput('project_id', s.project_id, - auth_msg) - - published_sample = sample.Sample( - name=s.counter_name, - type=s.counter_type, - unit=s.counter_unit, - volume=s.counter_volume, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp.isoformat(), - resource_metadata=utils.restore_nesting(s.resource_metadata, - separator='.'), - source=s.source) - s.message_id = published_sample.id - - sample_dict = publisher_utils.meter_message_from_counter( - published_sample, cfg.CONF.publisher.telemetry_secret) - if direct: - ts = timeutils.parse_isotime(sample_dict['timestamp']) - sample_dict['timestamp'] = timeutils.normalize_time(ts) - pecan.request.storage_conn.record_metering_data(sample_dict) - else: - published_samples.append(sample_dict) - if not direct: - pecan.request.notifier.sample( - {'user': def_user_id, - 'tenant': def_project_id, - 'is_admin': True}, - 'telemetry.api', - {'samples': published_samples}) - - return samples - - @wsme_pecan.wsexpose([Statistics], - [base.Query], [six.text_type], int, [Aggregate]) - def statistics(self, q=None, groupby=None, period=None, aggregate=None): - """Computes the statistics of the samples in the time range given. - - :param q: Filter rules for the data to be returned. - :param groupby: Fields for group by aggregation - :param period: Returned result will be an array of statistics for a - period long of that number of seconds. - :param aggregate: The selectable aggregation functions to be applied. - """ - - rbac.enforce('compute_statistics', pecan.request) - - q = q or [] - groupby = groupby or [] - aggregate = aggregate or [] - - if period and period < 0: - raise base.ClientSideError(_("Period must be positive.")) - - kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) - kwargs['meter'] = self.meter_name - f = storage.SampleFilter(**kwargs) - g = _validate_groupby_fields(groupby) - - aggregate = utils.uniq(aggregate, ['func', 'param']) - # Find the original timestamp in the query to use for clamping - # the duration returned in the statistics. - start = end = None - for i in q: - if i.field == 'timestamp' and i.op in ('lt', 'le'): - end = timeutils.parse_isotime(i.value).replace( - tzinfo=None) - elif i.field == 'timestamp' and i.op in ('gt', 'ge'): - start = timeutils.parse_isotime(i.value).replace( - tzinfo=None) - - try: - computed = pecan.request.storage_conn.get_meter_statistics( - f, period, g, aggregate) - return [Statistics(start_timestamp=start, - end_timestamp=end, - **c.as_dict()) - for c in computed] - except OverflowError as e: - params = dict(period=period, err=e) - raise base.ClientSideError( - _("Invalid period %(period)s: %(err)s") % params) - - -class Meter(base.Base): - """One category of measurements.""" - - name = wtypes.text - "The unique name for the meter" - - type = wtypes.Enum(str, *sample.TYPES) - "The meter type (see :ref:`measurements`)" - - unit = wtypes.text - "The unit of measure" - - resource_id = wtypes.text - "The ID of the :class:`Resource` for which the measurements are taken" - - project_id = wtypes.text - "The ID of the project or tenant that owns the resource" - - user_id = wtypes.text - "The ID of the user who last triggered an update to the resource" - - source = wtypes.text - "The ID of the source that identifies where the meter comes from" - - meter_id = wtypes.text - "The unique identifier for the meter" - - def __init__(self, **kwargs): - meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name']) - # meter_id is of type Unicode but base64.encodestring() only accepts - # strings. See bug #1333177 - meter_id = base64.b64encode(meter_id.encode('utf-8')) - kwargs['meter_id'] = meter_id - super(Meter, self).__init__(**kwargs) - - @classmethod - def sample(cls): - return cls(name='instance', - type='gauge', - unit='instance', - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - source='openstack', - ) - - -class MetersController(rest.RestController): - """Works on meters.""" - - @pecan.expose() - def _lookup(self, meter_name, *remainder): - return MeterController(meter_name), remainder - - @wsme_pecan.wsexpose([Meter], [base.Query], int, str) - def get_all(self, q=None, limit=None, unique=''): - """Return all known meters, based on the data recorded so far. - - :param q: Filter rules for the meters to be returned. - :param unique: flag to indicate unique meters to be returned. - """ - - rbac.enforce('get_meters', pecan.request) - - q = q or [] - - # Timestamp field is not supported for Meter queries - limit = v2_utils.enforce_limit(limit) - kwargs = v2_utils.query_to_kwargs( - q, pecan.request.storage_conn.get_meters, - ['limit'], allow_timestamps=False) - return [Meter.from_db_model(m) - for m in pecan.request.storage_conn.get_meters( - limit=limit, unique=strutils.bool_from_string(unique), - **kwargs)] diff --git a/ceilometer/api/controllers/v2/query.py b/ceilometer/api/controllers/v2/query.py deleted file mode 100644 index 1c5af060..00000000 --- a/ceilometer/api/controllers/v2/query.py +++ /dev/null @@ -1,359 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import jsonschema -from oslo_log import log -from oslo_utils import timeutils -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import samples -from ceilometer.api.controllers.v2 import utils as v2_utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer import storage -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -class ComplexQuery(base.Base): - """Holds a sample query encoded in json.""" - - filter = wtypes.text - "The filter expression encoded in json." - - orderby = wtypes.text - "List of single-element dicts for specifying the ordering of the results." - - limit = int - "The maximum number of results to be returned." - - @classmethod - def sample(cls): - return cls(filter='{"and": [{"and": [{"=": ' + - '{"counter_name": "cpu_util"}}, ' + - '{">": {"counter_volume": 0.23}}, ' + - '{"<": {"counter_volume": 0.26}}]}, ' + - '{"or": [{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:00:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + - '{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:30:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', - orderby='[{"counter_volume": "ASC"}, ' + - '{"timestamp": "DESC"}]', - limit=42 - ) - - -def _list_to_regexp(items, regexp_prefix=""): - regexp = ["^%s$" % item for item in items] - regexp = regexp_prefix + "|".join(regexp) - return regexp - - -class ValidatedComplexQuery(object): - complex_operators = ["and", "or"] - order_directions = ["asc", "desc"] - simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] - regexp_prefix = "(?i)" - - complex_ops = _list_to_regexp(complex_operators, regexp_prefix) - simple_ops = _list_to_regexp(simple_ops, regexp_prefix) - order_directions = _list_to_regexp(order_directions, regexp_prefix) - - timestamp_fields = ["timestamp", "state_timestamp"] - - def __init__(self, query, db_model, additional_name_mapping=None, - metadata_allowed=False): - additional_name_mapping = additional_name_mapping or {} - self.name_mapping = {"user": "user_id", - "project": "project_id"} - self.name_mapping.update(additional_name_mapping) - valid_keys = db_model.get_field_names() - valid_keys = list(valid_keys) + list(self.name_mapping.keys()) - valid_fields = _list_to_regexp(valid_keys) - - if metadata_allowed: - valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" - else: - valid_filter_fields = valid_fields - - schema_value = { - "oneOf": [{"type": "string"}, - {"type": "number"}, - {"type": "boolean"}], - "minProperties": 1, - "maxProperties": 1} - - schema_value_in = { - "type": "array", - "items": {"oneOf": [{"type": "string"}, - {"type": "number"}]}, - "minItems": 1} - - schema_field = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_field_in = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_in = { - "type": "object", - "patternProperties": {"(?i)^in$": schema_field_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_simple_ops = { - "type": "object", - "patternProperties": {self.simple_ops: schema_field}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_and_or_array = { - "type": "array", - "items": {"$ref": "#"}, - "minItems": 2} - - schema_and_or = { - "type": "object", - "patternProperties": {self.complex_ops: schema_and_or_array}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_not = { - "type": "object", - "patternProperties": {"(?i)^not$": {"$ref": "#"}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - self.schema = { - "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, - {"$ref": "#/definitions/leaf_in"}, - {"$ref": "#/definitions/and_or"}, - {"$ref": "#/definitions/not"}], - "minProperties": 1, - "maxProperties": 1, - "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, - "leaf_in": schema_leaf_in, - "and_or": schema_and_or, - "not": schema_not}} - - self.orderby_schema = { - "type": "array", - "items": { - "type": "object", - "patternProperties": - {valid_fields: - {"type": "string", - "pattern": self.order_directions}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1}} - - self.original_query = query - - def validate(self, visibility_field): - """Validates the query content and does the necessary conversions.""" - if self.original_query.filter is wtypes.Unset: - self.filter_expr = None - else: - try: - self.filter_expr = json.loads(self.original_query.filter) - self._validate_filter(self.filter_expr) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Filter expression not valid: %s") % e) - self._replace_isotime_with_datetime(self.filter_expr) - self._convert_operator_to_lower_case(self.filter_expr) - self._normalize_field_names_for_db_model(self.filter_expr) - - self._force_visibility(visibility_field) - - if self.original_query.orderby is wtypes.Unset: - self.orderby = None - else: - try: - self.orderby = json.loads(self.original_query.orderby) - self._validate_orderby(self.orderby) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Order-by expression not valid: %s") % e) - self._convert_orderby_to_lower_case(self.orderby) - self._normalize_field_names_in_orderby(self.orderby) - - self.limit = (None if self.original_query.limit is wtypes.Unset - else self.original_query.limit) - - self.limit = v2_utils.enforce_limit(self.limit) - - @staticmethod - def _convert_orderby_to_lower_case(orderby): - for orderby_field in orderby: - utils.lowercase_values(orderby_field) - - def _normalize_field_names_in_orderby(self, orderby): - for orderby_field in orderby: - self._replace_field_names(orderby_field) - - def _traverse_postorder(self, tree, visitor): - op = list(tree.keys())[0] - if op.lower() in self.complex_operators: - for i, operand in enumerate(tree[op]): - self._traverse_postorder(operand, visitor) - if op.lower() == "not": - self._traverse_postorder(tree[op], visitor) - - visitor(tree) - - def _check_cross_project_references(self, own_project_id, - visibility_field): - """Do not allow other than own_project_id.""" - def check_project_id(subfilter): - op, value = list(subfilter.items())[0] - if (op.lower() not in self.complex_operators - and list(value.keys())[0] == visibility_field - and value[visibility_field] != own_project_id): - raise base.ProjectNotAuthorized(value[visibility_field]) - - self._traverse_postorder(self.filter_expr, check_project_id) - - def _force_visibility(self, visibility_field): - """Force visibility field. - - If the tenant is not admin insert an extra - "and =" clause to the query. - """ - authorized_project = rbac.get_limited_to_project(pecan.request.headers) - is_admin = authorized_project is None - if not is_admin: - self._restrict_to_project(authorized_project, visibility_field) - self._check_cross_project_references(authorized_project, - visibility_field) - - def _restrict_to_project(self, project_id, visibility_field): - restriction = {"=": {visibility_field: project_id}} - if self.filter_expr is None: - self.filter_expr = restriction - else: - self.filter_expr = {"and": [restriction, self.filter_expr]} - - def _replace_isotime_with_datetime(self, filter_expr): - def replace_isotime(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - field = list(value.keys())[0] - if field in self.timestamp_fields: - date_time = self._convert_to_datetime(subfilter[op][field]) - subfilter[op][field] = date_time - - self._traverse_postorder(filter_expr, replace_isotime) - - def _normalize_field_names_for_db_model(self, filter_expr): - def _normalize_field_names(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - self._replace_field_names(value) - self._traverse_postorder(filter_expr, - _normalize_field_names) - - def _replace_field_names(self, subfilter): - field, value = list(subfilter.items())[0] - if field in self.name_mapping: - del subfilter[field] - subfilter[self.name_mapping[field]] = value - if field.startswith("metadata."): - del subfilter[field] - subfilter["resource_" + field] = value - - def _convert_operator_to_lower_case(self, filter_expr): - self._traverse_postorder(filter_expr, utils.lowercase_keys) - - @staticmethod - def _convert_to_datetime(isotime): - try: - date_time = timeutils.parse_isotime(isotime) - date_time = date_time.replace(tzinfo=None) - return date_time - except ValueError: - LOG.exception(_("String %s is not a valid isotime") % isotime) - msg = _('Failed to parse the timestamp value %s') % isotime - raise base.ClientSideError(msg) - - def _validate_filter(self, filter_expr): - jsonschema.validate(filter_expr, self.schema) - - def _validate_orderby(self, orderby_expr): - jsonschema.validate(orderby_expr, self.orderby_schema) - - -class QuerySamplesController(rest.RestController): - """Provides complex query possibilities for samples.""" - - @wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery) - def post(self, body): - """Define query for retrieving Sample data. - - :param body: Query rules for the samples to be returned. - """ - - rbac.enforce('query_sample', pecan.request) - - sample_name_mapping = {"resource": "resource_id", - "meter": "counter_name", - "type": "counter_type", - "unit": "counter_unit", - "volume": "counter_volume"} - - query = ValidatedComplexQuery(body, - storage.models.Sample, - sample_name_mapping, - metadata_allowed=True) - query.validate(visibility_field="project_id") - conn = pecan.request.storage_conn - return [samples.Sample.from_db_model(s) - for s in conn.query_samples(query.filter_expr, - query.orderby, - query.limit)] - - -class QueryController(rest.RestController): - - samples = QuerySamplesController() diff --git a/ceilometer/api/controllers/v2/resources.py b/ceilometer/api/controllers/v2/resources.py deleted file mode 100644 index b9918c21..00000000 --- a/ceilometer/api/controllers/v2/resources.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import urllib - -import pecan -from pecan import rest -import six -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils -from ceilometer.api import rbac -from ceilometer.i18n import _ - - -class Resource(base.Base): - """An externally defined object for which samples have been received.""" - - resource_id = wtypes.text - "The unique identifier for the resource" - - project_id = wtypes.text - "The ID of the owning project or tenant" - - user_id = wtypes.text - "The ID of the user who created the resource or updated it last" - - first_sample_timestamp = datetime.datetime - "UTC date & time not later than the first sample known for this resource" - - last_sample_timestamp = datetime.datetime - "UTC date & time not earlier than the last sample known for this resource" - - metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the resource" - - links = [base.Link] - "A list containing a self link and associated meter links" - - source = wtypes.text - "The source where the resource come from" - - def __init__(self, metadata=None, **kwds): - metadata = metadata or {} - metadata = utils.flatten_metadata(metadata) - super(Resource, self).__init__(metadata=metadata, **kwds) - - @classmethod - def sample(cls): - return cls( - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - source="openstack", - metadata={'name1': 'value1', - 'name2': 'value2'}, - links=[ - base.Link(href=('http://localhost:8777/v2/resources/' - 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='self'), - base.Link(href=('http://localhost:8777/v2/meters/volume?' - 'q.field=resource_id&q.value=' - 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='volume') - ], - ) - - -class ResourcesController(rest.RestController): - """Works on resources.""" - - @staticmethod - def _make_link(rel_name, url, type, type_arg, query=None): - query_str = '' - if query: - query_str = '?q.field=%s&q.value=%s' % (query['field'], - query['value']) - return base.Link(href='%s/v2/%s/%s%s' % (url, type, - type_arg, query_str), - rel=rel_name) - - def _resource_links(self, resource_id, meter_links=1): - links = [self._make_link('self', pecan.request.application_url, - 'resources', resource_id)] - if meter_links: - for meter in pecan.request.storage_conn.get_meters( - resource=resource_id): - query = {'field': 'resource_id', 'value': resource_id} - links.append(self._make_link(meter.name, - pecan.request.application_url, - 'meters', meter.name, - query=query)) - return links - - @wsme_pecan.wsexpose(Resource, six.text_type) - def get_one(self, resource_id): - """Retrieve details about one resource. - - :param resource_id: The UUID of the resource. - """ - - rbac.enforce('get_resource', pecan.request) - # In case we have special character in resource id, for example, swift - # can generate samples with resource id like - # 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance - resource_id = urllib.unquote(resource_id) - - authorized_project = rbac.get_limited_to_project(pecan.request.headers) - resources = list(pecan.request.storage_conn.get_resources( - resource=resource_id, project=authorized_project)) - if not resources: - raise base.EntityNotFound(_('Resource'), resource_id) - return Resource.from_db_and_links(resources[0], - self._resource_links(resource_id)) - - @wsme_pecan.wsexpose([Resource], [base.Query], int, int) - def get_all(self, q=None, limit=None, meter_links=1): - """Retrieve definitions of all of the resources. - - :param q: Filter rules for the resources to be returned. - :param meter_links: option to include related meter links - """ - - rbac.enforce('get_resources', pecan.request) - - q = q or [] - limit = utils.enforce_limit(limit) - kwargs = utils.query_to_kwargs( - q, pecan.request.storage_conn.get_resources, ['limit']) - resources = [ - Resource.from_db_and_links(r, - self._resource_links(r.resource_id, - meter_links)) - for r in pecan.request.storage_conn.get_resources(limit=limit, - **kwargs)] - return resources diff --git a/ceilometer/api/controllers/v2/root.py b/ceilometer/api/controllers/v2/root.py index 2387f9f5..1701325e 100644 --- a/ceilometer/api/controllers/v2/root.py +++ b/ceilometer/api/controllers/v2/root.py @@ -18,82 +18,8 @@ # License for the specific language governing permissions and limitations # under the License. -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -from oslo_utils import strutils -import pecan - from ceilometer.api.controllers.v2 import capabilities from ceilometer.api.controllers.v2 import events -from ceilometer.api.controllers.v2 import meters -from ceilometer.api.controllers.v2 import query -from ceilometer.api.controllers.v2 import resources -from ceilometer.api.controllers.v2 import samples -from ceilometer.i18n import _, _LW -from ceilometer import keystone_client - - -API_OPTS = [ - cfg.BoolOpt('gnocchi_is_enabled', - default=None, - help=('Set True to disable resource/meter/sample URLs. ' - 'Default autodetection by querying keystone.')), - cfg.BoolOpt('aodh_is_enabled', - default=None, - help=('Set True to redirect alarms URLs to aodh. ' - 'Default autodetection by querying keystone.')), - cfg.StrOpt('aodh_url', - default=None, - help=('The endpoint of Aodh to redirect alarms URLs ' - 'to Aodh API. Default autodetection by querying ' - 'keystone.')), -] - -cfg.CONF.register_opts(API_OPTS, group='api') -cfg.CONF.import_opt('meter_dispatchers', 'ceilometer.dispatcher') - -LOG = log.getLogger(__name__) - - -def gnocchi_abort(): - pecan.abort(410, ("This telemetry installation is configured to use " - "Gnocchi. Please use the Gnocchi API available on " - "the metric endpoint to retrieve data.")) - - -def aodh_abort(): - pecan.abort(410, _("alarms URLs is unavailable when Aodh is " - "disabled or unavailable.")) - - -def aodh_redirect(url): - # NOTE(sileht): we use 307 and not 301 or 302 to allow - # client to redirect POST/PUT/DELETE/... - # FIXME(sileht): it would be better to use 308, but webob - # doesn't handle it :( - # https://github.com/Pylons/webob/pull/207 - pecan.redirect(location=url + pecan.request.path_qs, - code=307) - - -class QueryController(object): - def __init__(self, gnocchi_is_enabled=False, aodh_url=None): - self.gnocchi_is_enabled = gnocchi_is_enabled - self.aodh_url = aodh_url - - @pecan.expose() - def _lookup(self, kind, *remainder): - if kind == 'alarms' and self.aodh_url: - aodh_redirect(self.aodh_url) - elif kind == 'alarms': - aodh_abort() - elif kind == 'samples' and self.gnocchi_is_enabled: - gnocchi_abort() - elif kind == 'samples': - return query.QuerySamplesController(), remainder - else: - pecan.abort(404) class V2Controller(object): @@ -102,94 +28,3 @@ class V2Controller(object): event_types = events.EventTypesController() events = events.EventsController() capabilities = capabilities.CapabilitiesController() - - def __init__(self): - self._gnocchi_is_enabled = None - self._aodh_is_enabled = None - self._aodh_url = None - - @property - def gnocchi_is_enabled(self): - if self._gnocchi_is_enabled is None: - if cfg.CONF.api.gnocchi_is_enabled is not None: - self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled - - elif ("gnocchi" not in cfg.CONF.meter_dispatchers - or "database" in cfg.CONF.meter_dispatchers): - self._gnocchi_is_enabled = False - else: - try: - catalog = keystone_client.get_service_catalog( - keystone_client.get_client()) - catalog.url_for(service_type='metric') - except exceptions.EndpointNotFound: - self._gnocchi_is_enabled = False - except exceptions.ClientException: - LOG.warning(_LW("Can't connect to keystone, assuming " - "gnocchi is disabled and retry later")) - else: - self._gnocchi_is_enabled = True - LOG.warning(_LW("ceilometer-api started with gnocchi " - "enabled. The resources/meters/samples " - "URLs are disabled.")) - return self._gnocchi_is_enabled - - @property - def aodh_url(self): - if self._aodh_url is None: - if cfg.CONF.api.aodh_is_enabled is False: - self._aodh_url = "" - elif cfg.CONF.api.aodh_url is not None: - self._aodh_url = self._normalize_aodh_url( - cfg.CONF.api.aodh_url) - else: - try: - catalog = keystone_client.get_service_catalog( - keystone_client.get_client()) - self._aodh_url = self._normalize_aodh_url( - catalog.url_for(service_type='alarming')) - except exceptions.EndpointNotFound: - self._aodh_url = "" - except exceptions.ClientException: - LOG.warning(_LW("Can't connect to keystone, assuming aodh " - "is disabled and retry later.")) - else: - LOG.warning(_LW("ceilometer-api started with aodh " - "enabled. Alarms URLs will be redirected " - "to aodh endpoint.")) - return self._aodh_url - - @pecan.expose() - def _lookup(self, kind, *remainder): - if (kind in ['meters', 'resources', 'samples'] - and self.gnocchi_is_enabled): - if kind == 'meters' and pecan.request.method == 'POST': - direct = pecan.request.params.get('direct', '') - if strutils.bool_from_string(direct): - pecan.abort(400, _('direct option cannot be true when ' - 'Gnocchi is enabled.')) - return meters.MetersController(), remainder - gnocchi_abort() - elif kind == 'meters': - return meters.MetersController(), remainder - elif kind == 'resources': - return resources.ResourcesController(), remainder - elif kind == 'samples': - return samples.SamplesController(), remainder - elif kind == 'query': - return QueryController( - gnocchi_is_enabled=self.gnocchi_is_enabled, - aodh_url=self.aodh_url, - ), remainder - elif kind == 'alarms' and (not self.aodh_url): - aodh_abort() - elif kind == 'alarms' and self.aodh_url: - aodh_redirect(self.aodh_url) - else: - pecan.abort(404) - - @staticmethod - def _normalize_aodh_url(url): - if url.endswith("/"): - return url[:-1] - return url diff --git a/ceilometer/api/controllers/v2/samples.py b/ceilometer/api/controllers/v2/samples.py deleted file mode 100644 index 05ded82f..00000000 --- a/ceilometer/api/controllers/v2/samples.py +++ /dev/null @@ -1,145 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer import sample -from ceilometer import storage - - -class Sample(base.Base): - """One measurement.""" - - id = wtypes.text - "The unique identifier for the sample." - - meter = wtypes.text - "The meter name this sample is for." - - type = wtypes.Enum(str, *sample.TYPES) - "The meter type (see :ref:`meter_types`)" - - unit = wtypes.text - "The unit of measure." - - volume = float - "The metered value." - - user_id = wtypes.text - "The user this sample was taken for." - - project_id = wtypes.text - "The project this sample was taken for." - - resource_id = wtypes.text - "The :class:`Resource` this sample was taken for." - - source = wtypes.text - "The source that identifies where the sample comes from." - - timestamp = datetime.datetime - "When the sample has been generated." - - recorded_at = datetime.datetime - "When the sample has been recorded." - - metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the sample." - - @classmethod - def from_db_model(cls, m): - return cls(id=m.message_id, - meter=m.counter_name, - type=m.counter_type, - unit=m.counter_unit, - volume=m.counter_volume, - user_id=m.user_id, - project_id=m.project_id, - resource_id=m.resource_id, - source=m.source, - timestamp=m.timestamp, - recorded_at=m.recorded_at, - metadata=utils.flatten_metadata(m.resource_metadata)) - - @classmethod - def sample(cls): - return cls(id=str(uuid.uuid1()), - meter='instance', - type='gauge', - unit='instance', - volume=1, - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - source='openstack', - metadata={'name1': 'value1', - 'name2': 'value2'}, - ) - - -class SamplesController(rest.RestController): - """Controller managing the samples.""" - - @wsme_pecan.wsexpose([Sample], [base.Query], int) - def get_all(self, q=None, limit=None): - """Return all known samples, based on the data recorded so far. - - :param q: Filter rules for the samples to be returned. - :param limit: Maximum number of samples to be returned. - """ - - rbac.enforce('get_samples', pecan.request) - - q = q or [] - - limit = utils.enforce_limit(limit) - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - f = storage.SampleFilter(**kwargs) - return map(Sample.from_db_model, - pecan.request.storage_conn.get_samples(f, limit=limit)) - - @wsme_pecan.wsexpose(Sample, wtypes.text) - def get_one(self, sample_id): - """Return a sample. - - :param sample_id: the id of the sample. - """ - - rbac.enforce('get_sample', pecan.request) - - f = storage.SampleFilter(message_id=sample_id) - - samples = list(pecan.request.storage_conn.get_samples(f)) - if len(samples) < 1: - raise base.EntityNotFound(_('Sample'), sample_id) - - return Sample.from_db_model(samples[0]) diff --git a/ceilometer/api/controllers/v2/utils.py b/ceilometer/api/controllers/v2/utils.py index 88142cbd..03f89a2a 100644 --- a/ceilometer/api/controllers/v2/utils.py +++ b/ceilometer/api/controllers/v2/utils.py @@ -18,22 +18,15 @@ # License for the specific language governing permissions and limitations # under the License. -import copy -import datetime import functools -import inspect from oslo_config import cfg from oslo_log import log -from oslo_utils import timeutils import pecan -import six -import wsme from ceilometer.api.controllers.v2 import base from ceilometer.api import rbac from ceilometer.i18n import _, _LI -from ceilometer import utils LOG = log.getLogger(__name__) cfg.CONF.import_opt('default_api_return_limit', 'ceilometer.api.app', @@ -61,265 +54,6 @@ def get_auth_project(on_behalf_of=None): return auth_project -def sanitize_query(query, db_func, on_behalf_of=None): - """Check the query. - - See if: - 1) the request is coming from admin - then allow full visibility - 2) non-admin - make sure that the query includes the requester's project. - """ - q = copy.copy(query) - - auth_project = get_auth_project(on_behalf_of) - if auth_project: - _verify_query_segregation(q, auth_project) - - proj_q = [i for i in q if i.field == 'project_id'] - valid_keys = inspect.getargspec(db_func)[0] - if not proj_q and 'on_behalf_of' not in valid_keys: - # The user is restricted, but they didn't specify a project - # so add it for them. - q.append(base.Query(field='project_id', - op='eq', - value=auth_project)) - return q - - -def _verify_query_segregation(query, auth_project=None): - """Ensure non-admin queries are not constrained to another project.""" - auth_project = (auth_project or - rbac.get_limited_to_project(pecan.request.headers)) - - if not auth_project: - return - - for q in query: - if q.field in ('project', 'project_id') and auth_project != q.value: - raise base.ProjectNotAuthorized(q.value) - - -def validate_query(query, db_func, internal_keys=None, - allow_timestamps=True): - """Validates the syntax of the query and verifies the query. - - Verification check if the query request is authorized for the included - project. - :param query: Query expression that should be validated - :param db_func: the function on the storage level, of which arguments - will form the valid_keys list, which defines the valid fields for a - query expression - :param internal_keys: internally used field names, that should not be - used for querying - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable for this query or not - - :raises InvalidInput: if an operator is not supported for a given field - :raises InvalidInput: if timestamp constraints are allowed, but - search_offset was included without timestamp constraint - :raises: UnknownArgument: if a field name is not a timestamp field, nor - in the list of valid keys - """ - - internal_keys = internal_keys or [] - _verify_query_segregation(query) - - valid_keys = inspect.getargspec(db_func)[0] - - internal_timestamp_keys = ['end_timestamp', 'start_timestamp', - 'end_timestamp_op', 'start_timestamp_op'] - if 'start_timestamp' in valid_keys: - internal_keys += internal_timestamp_keys - valid_keys += ['timestamp', 'search_offset'] - internal_keys.append('self') - internal_keys.append('metaquery') - valid_keys = set(valid_keys) - set(internal_keys) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource'} - - has_timestamp_query = _validate_timestamp_fields(query, - 'timestamp', - ('lt', 'le', 'gt', 'ge'), - allow_timestamps) - has_search_offset_query = _validate_timestamp_fields(query, - 'search_offset', - 'eq', - allow_timestamps) - - if has_search_offset_query and not has_timestamp_query: - raise wsme.exc.InvalidInput('field', 'search_offset', - "search_offset cannot be used without " + - "timestamp") - - def _is_field_metadata(field): - return (field.startswith('metadata.') or - field.startswith('resource_metadata.')) - - for i in query: - if i.field not in ('timestamp', 'search_offset'): - key = translation.get(i.field, i.field) - operator = i.op - if key in valid_keys or _is_field_metadata(i.field): - if operator == 'eq': - if key == 'enabled': - i._get_value_as_type('boolean') - elif _is_field_metadata(key): - i._get_value_as_type() - else: - raise wsme.exc.InvalidInput('op', i.op, - 'unimplemented operator for ' - '%s' % i.field) - else: - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (query, sorted(valid_keys)) - raise wsme.exc.UnknownArgument(key, msg) - - -def _validate_timestamp_fields(query, field_name, operator_list, - allow_timestamps): - """Validates the timestamp related constraints in a query if there are any. - - :param query: query expression that may contain the timestamp fields - :param field_name: timestamp name, which should be checked (timestamp, - search_offset) - :param operator_list: list of operators that are supported for that - timestamp, which was specified in the parameter field_name - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable to this query or not - - :returns: True, if there was a timestamp constraint, containing - a timestamp field named as defined in field_name, in the query and it - was allowed and syntactically correct. - :returns: False, if there wasn't timestamp constraint, containing a - timestamp field named as defined in field_name, in the query - - :raises InvalidInput: if an operator is unsupported for a given timestamp - field - :raises UnknownArgument: if the timestamp constraint is not allowed in - the query - """ - - for item in query: - if item.field == field_name: - # If *timestamp* or *search_offset* field was specified in the - # query, but timestamp is not supported on that resource, on - # which the query was invoked, then raise an exception. - if not allow_timestamps: - raise wsme.exc.UnknownArgument(field_name, - "not valid for " + - "this resource") - if item.op not in operator_list: - raise wsme.exc.InvalidInput('op', item.op, - 'unimplemented operator for %s' % - item.field) - return True - return False - - -def query_to_kwargs(query, db_func, internal_keys=None, - allow_timestamps=True): - validate_query(query, db_func, internal_keys=internal_keys, - allow_timestamps=allow_timestamps) - query = sanitize_query(query, db_func) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource'} - stamp = {} - metaquery = {} - kwargs = {} - for i in query: - if i.field == 'timestamp': - if i.op in ('lt', 'le'): - stamp['end_timestamp'] = i.value - stamp['end_timestamp_op'] = i.op - elif i.op in ('gt', 'ge'): - stamp['start_timestamp'] = i.value - stamp['start_timestamp_op'] = i.op - else: - if i.op == 'eq': - if i.field == 'search_offset': - stamp['search_offset'] = i.value - elif i.field == 'enabled': - kwargs[i.field] = i._get_value_as_type('boolean') - elif i.field.startswith('metadata.'): - metaquery[i.field] = i._get_value_as_type() - elif i.field.startswith('resource_metadata.'): - metaquery[i.field[9:]] = i._get_value_as_type() - else: - key = translation.get(i.field, i.field) - kwargs[key] = i.value - - if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]: - kwargs['metaquery'] = metaquery - if stamp: - kwargs.update(_get_query_timestamps(stamp)) - return kwargs - - -def _get_query_timestamps(args=None): - """Return any optional timestamp information in the request. - - Determine the desired range, if any, from the GET arguments. Set - up the query range using the specified offset. - - [query_start ... start_timestamp ... end_timestamp ... query_end] - - Returns a dictionary containing: - - start_timestamp: First timestamp to use for query - start_timestamp_op: First timestamp operator to use for query - end_timestamp: Final timestamp to use for query - end_timestamp_op: Final timestamp operator to use for query - """ - - if args is None: - return {} - search_offset = int(args.get('search_offset', 0)) - - def _parse_timestamp(timestamp): - if not timestamp: - return None - try: - iso_timestamp = timeutils.parse_isotime(timestamp) - iso_timestamp = iso_timestamp.replace(tzinfo=None) - except ValueError: - raise wsme.exc.InvalidInput('timestamp', timestamp, - 'invalid timestamp format') - return iso_timestamp - - start_timestamp = _parse_timestamp(args.get('start_timestamp')) - end_timestamp = _parse_timestamp(args.get('end_timestamp')) - start_timestamp = start_timestamp - datetime.timedelta( - minutes=search_offset) if start_timestamp else None - end_timestamp = end_timestamp + datetime.timedelta( - minutes=search_offset) if end_timestamp else None - return {'start_timestamp': start_timestamp, - 'end_timestamp': end_timestamp, - 'start_timestamp_op': args.get('start_timestamp_op'), - 'end_timestamp_op': args.get('end_timestamp_op')} - - -def flatten_metadata(metadata): - """Return flattened resource metadata. - - Metadata is returned with flattened nested structures (except nested sets) - and with all values converted to unicode strings. - """ - if metadata: - # After changing recursive_keypairs` output we need to keep - # flattening output unchanged. - # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.') - # output before: a.b:c=d - # output now: a.b.c=d - # So to keep the first variant just replace all dots except the first - return dict((k.replace('.', ':').replace(':', '.', 1), - six.text_type(v)) - for k, v in utils.recursive_keypairs(metadata, - separator='.') - if type(v) is not set) - return {} - - # TODO(fabiog): this decorator should disappear and have a more unified # way of controlling access and scope. Before messing with this, though # I feel this file should be re-factored in smaller chunks one for each diff --git a/ceilometer/api/hooks.py b/ceilometer/api/hooks.py index 003a2363..8a28587f 100644 --- a/ceilometer/api/hooks.py +++ b/ceilometer/api/hooks.py @@ -14,20 +14,11 @@ # under the License. from oslo_config import cfg -from oslo_log import log -import oslo_messaging from pecan import hooks -from ceilometer.i18n import _LE -from ceilometer import messaging from ceilometer import storage -LOG = log.getLogger(__name__) - -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') - class ConfigHook(hooks.PecanHook): """Attach the configuration object to the request. @@ -43,45 +34,12 @@ class ConfigHook(hooks.PecanHook): class DBHook(hooks.PecanHook): def __init__(self): - self.storage_connection = DBHook.get_connection('metering') - self.event_storage_connection = DBHook.get_connection('event') - - if (not self.storage_connection - and not self.event_storage_connection): - raise Exception("Api failed to start. Failed to connect to " - "databases, purpose: %s" % - ', '.join(['metering', 'event'])) + self.event_storage_connection = storage.get_connection_from_config( + cfg.CONF) def before(self, state): - state.request.storage_conn = self.storage_connection state.request.event_storage_conn = self.event_storage_connection - @staticmethod - def get_connection(purpose): - try: - return storage.get_connection_from_config(cfg.CONF, purpose) - except Exception as err: - params = {"purpose": purpose, "err": err} - LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " - "retry later: %(err)s") % params) - - -class NotifierHook(hooks.PecanHook): - """Create and attach a notifier to the request. - - Usually, samples will be push to notification bus by notifier when they - are posted via /v2/meters/ API. - """ - - def __init__(self): - transport = messaging.get_transport() - self.notifier = oslo_messaging.Notifier( - transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id="ceilometer.api") - - def before(self, state): - state.request.notifier = self.notifier - class TranslationHook(hooks.PecanHook): diff --git a/ceilometer/cmd/agent_notification.py b/ceilometer/cmd/agent_notification.py deleted file mode 100644 index 08b16464..00000000 --- a/ceilometer/cmd/agent_notification.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_service import service as os_service - -from ceilometer import notification -from ceilometer import service - -CONF = cfg.CONF - - -def main(): - service.prepare_service() - os_service.launch(CONF, notification.NotificationService(), - workers=CONF.notification.workers).wait() diff --git a/ceilometer/cmd/collector.py b/ceilometer/cmd/collector.py deleted file mode 100644 index 0a56a7f5..00000000 --- a/ceilometer/cmd/collector.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_service import service as os_service - -from ceilometer import collector -from ceilometer import service - -CONF = cfg.CONF - - -def main(): - service.prepare_service() - os_service.launch(CONF, collector.CollectorService(), - workers=CONF.collector.workers).wait() diff --git a/ceilometer/cmd/polling.py b/ceilometer/cmd/polling.py deleted file mode 100644 index e4bb583f..00000000 --- a/ceilometer/cmd/polling.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service as os_service - -from ceilometer.agent import manager -from ceilometer.i18n import _LW -from ceilometer import service - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF - - -class MultiChoicesOpt(cfg.Opt): - def __init__(self, name, choices=None, **kwargs): - super(MultiChoicesOpt, self).__init__( - name, type=DeduplicatedCfgList(choices), **kwargs) - self.choices = choices - - def _get_argparse_kwargs(self, group, **kwargs): - """Extends the base argparse keyword dict for multi choices options.""" - kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group) - kwargs['nargs'] = '+' - choices = kwargs.get('choices', self.choices) - if choices: - kwargs['choices'] = choices - return kwargs - - -class DeduplicatedCfgList(cfg.types.List): - def __init__(self, choices=None, **kwargs): - super(DeduplicatedCfgList, self).__init__(**kwargs) - self.choices = choices or [] - - def __call__(self, *args, **kwargs): - result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs) - result_set = set(result) - if len(result) != len(result_set): - LOG.warning(_LW("Duplicated values: %s found in CLI options, " - "auto de-duplicated"), result) - result = list(result_set) - if self.choices and not (result_set <= set(self.choices)): - raise Exception('Valid values are %s, but found %s' - % (self.choices, result)) - return result - - -CLI_OPTS = [ - MultiChoicesOpt('polling-namespaces', - default=['compute', 'central'], - choices=['compute', 'central', 'ipmi'], - dest='polling_namespaces', - help='Polling namespace(s) to be used while ' - 'resource polling'), - MultiChoicesOpt('pollster-list', - default=[], - dest='pollster_list', - help='List of pollsters (or wildcard templates) to be ' - 'used while polling'), -] - -CONF.register_cli_opts(CLI_OPTS) - - -def main(): - service.prepare_service() - os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces, - CONF.pollster_list)).wait() diff --git a/ceilometer/cmd/sample.py b/ceilometer/cmd/sample.py deleted file mode 100644 index 6157f3c9..00000000 --- a/ceilometer/cmd/sample.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2012-2014 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for creating meter for Ceilometer. -""" -import logging -import sys - -from oslo_config import cfg -from oslo_utils import timeutils -from stevedore import extension - -from ceilometer import pipeline -from ceilometer import sample -from ceilometer import service - - -def send_sample(): - cfg.CONF.register_cli_opts([ - cfg.StrOpt('sample-name', - short='n', - help='Meter name.', - required=True), - cfg.StrOpt('sample-type', - short='y', - help='Meter type (gauge, delta, cumulative).', - default='gauge', - required=True), - cfg.StrOpt('sample-unit', - short='U', - help='Meter unit.'), - cfg.IntOpt('sample-volume', - short='l', - help='Meter volume value.', - default=1), - cfg.StrOpt('sample-resource', - short='r', - help='Meter resource id.', - required=True), - cfg.StrOpt('sample-user', - short='u', - help='Meter user id.'), - cfg.StrOpt('sample-project', - short='p', - help='Meter project id.'), - cfg.StrOpt('sample-timestamp', - short='i', - help='Meter timestamp.', - default=timeutils.utcnow().isoformat()), - cfg.StrOpt('sample-metadata', - short='m', - help='Meter metadata.'), - ]) - - service.prepare_service() - - # Set up logging to use the console - console = logging.StreamHandler(sys.stderr) - console.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(message)s') - console.setFormatter(formatter) - root_logger = logging.getLogger('') - root_logger.addHandler(console) - root_logger.setLevel(logging.DEBUG) - - pipeline_manager = pipeline.setup_pipeline( - extension.ExtensionManager('ceilometer.transformer')) - - with pipeline_manager.publisher() as p: - p([sample.Sample( - name=cfg.CONF.sample_name, - type=cfg.CONF.sample_type, - unit=cfg.CONF.sample_unit, - volume=cfg.CONF.sample_volume, - user_id=cfg.CONF.sample_user, - project_id=cfg.CONF.sample_project, - resource_id=cfg.CONF.sample_resource, - timestamp=cfg.CONF.sample_timestamp, - resource_metadata=cfg.CONF.sample_metadata and eval( - cfg.CONF.sample_metadata))]) diff --git a/ceilometer/cmd/storage.py b/ceilometer/cmd/storage.py index 977b1929..bc28f256 100644 --- a/ceilometer/cmd/storage.py +++ b/ceilometer/cmd/storage.py @@ -27,25 +27,15 @@ LOG = log.getLogger(__name__) def dbsync(): service.prepare_service() - storage.get_connection_from_config(cfg.CONF, 'metering').upgrade() - storage.get_connection_from_config(cfg.CONF, 'event').upgrade() + storage.get_connection_from_config(cfg.CONF).upgrade() def expirer(): service.prepare_service() - if cfg.CONF.database.metering_time_to_live > 0: - LOG.debug("Clearing expired metering data") - storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering') - storage_conn.clear_expired_metering_data( - cfg.CONF.database.metering_time_to_live) - else: - LOG.info(_LI("Nothing to clean, database metering time to live " - "is disabled")) - if cfg.CONF.database.event_time_to_live > 0: LOG.debug("Clearing expired event data") - event_conn = storage.get_connection_from_config(cfg.CONF, 'event') + event_conn = storage.get_connection_from_config(cfg.CONF) event_conn.clear_expired_event_data( cfg.CONF.database.event_time_to_live) else: diff --git a/ceilometer/collector.py b/ceilometer/collector.py deleted file mode 100644 index 4ec3bd99..00000000 --- a/ceilometer/collector.py +++ /dev/null @@ -1,184 +0,0 @@ -# -# Copyright 2012-2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from itertools import chain -import socket - -import msgpack -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import netutils -from oslo_utils import units - -from ceilometer import dispatcher -from ceilometer.i18n import _, _LE, _LW -from ceilometer import messaging -from ceilometer import service_base -from ceilometer import utils - -OPTS = [ - cfg.StrOpt('udp_address', - default='0.0.0.0', - help='Address to which the UDP socket is bound. Set to ' - 'an empty string to disable.'), - cfg.PortOpt('udp_port', - default=4952, - help='Port to which the UDP socket is bound.'), - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'dispatching them'), - cfg.IntOpt('batch_timeout', - default=None, - help='Number of seconds to wait before dispatching samples' - 'when batch_size is not reached (None means indefinitely)'), -] - -cfg.CONF.register_opts(OPTS, group="collector") -cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_opt('store_events', 'ceilometer.notification', - group='notification') - - -LOG = log.getLogger(__name__) - - -class CollectorService(service_base.ServiceBase): - """Listener for the collector service.""" - def start(self): - """Bind the UDP socket and handle incoming data.""" - # ensure dispatcher is configured before starting other services - dispatcher_managers = dispatcher.load_dispatcher_manager() - (self.meter_manager, self.event_manager) = dispatcher_managers - self.sample_listener = None - self.event_listener = None - self.udp_thread = None - super(CollectorService, self).start() - - if cfg.CONF.collector.udp_address: - self.udp_thread = utils.spawn_thread(self.start_udp) - - transport = messaging.get_transport(optional=True) - if transport: - if list(self.meter_manager): - sample_target = oslo_messaging.Target( - topic=cfg.CONF.publisher_notifier.metering_topic) - self.sample_listener = ( - messaging.get_batch_notification_listener( - transport, [sample_target], - [SampleEndpoint(self.meter_manager)], - allow_requeue=True, - batch_size=cfg.CONF.collector.batch_size, - batch_timeout=cfg.CONF.collector.batch_timeout)) - self.sample_listener.start() - - if cfg.CONF.notification.store_events and list(self.event_manager): - event_target = oslo_messaging.Target( - topic=cfg.CONF.publisher_notifier.event_topic) - self.event_listener = ( - messaging.get_batch_notification_listener( - transport, [event_target], - [EventEndpoint( - EventDispatcherVerificator(self.event_manager))], - allow_requeue=True, - batch_size=cfg.CONF.collector.batch_size, - batch_timeout=cfg.CONF.collector.batch_timeout)) - self.event_listener.start() - - def start_udp(self): - address_family = socket.AF_INET - if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address): - address_family = socket.AF_INET6 - udp = socket.socket(address_family, socket.SOCK_DGRAM) - udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - udp.bind((cfg.CONF.collector.udp_address, - cfg.CONF.collector.udp_port)) - - self.udp_run = True - while self.udp_run: - # NOTE(jd) Arbitrary limit of 64K because that ought to be - # enough for anybody. - data, source = udp.recvfrom(64 * units.Ki) - try: - sample = msgpack.loads(data, encoding='utf-8') - except Exception: - LOG.warning(_("UDP: Cannot decode data sent by %s"), source) - else: - try: - LOG.debug("UDP: Storing %s", sample) - self.meter_manager.map_method( - 'verify_and_record_metering_data', sample) - except Exception: - LOG.exception(_("UDP: Unable to store meter")) - - def stop(self): - if self.sample_listener: - utils.kill_listeners([self.sample_listener]) - if self.event_listener: - utils.kill_listeners([self.event_listener]) - if self.udp_thread: - self.udp_run = False - self.udp_thread.join() - super(CollectorService, self).stop() - - -class CollectorEndpoint(object): - def __init__(self, dispatcher_manager): - self.dispatcher_manager = dispatcher_manager - - def sample(self, messages): - """RPC endpoint for notification messages - - When another service sends a notification over the message - bus, this method receives it. - """ - samples = list(chain.from_iterable(m["payload"] for m in messages)) - try: - self.dispatcher_manager.map_method(self.method, samples) - except Exception: - LOG.exception(_LE("Dispatcher failed to handle the %s, " - "requeue it."), self.ep_type) - return oslo_messaging.NotificationResult.REQUEUE - - -class SampleEndpoint(CollectorEndpoint): - method = 'verify_and_record_metering_data' - ep_type = 'sample' - - -class EventDispatcherVerificator(object): - def __init__(self, dispatcher): - self.dispatcher = dispatcher - - def verify_and_record_events(self, events): - """Verify event signature and record them.""" - goods = [] - for event in events: - if utils.verify_signature( - event, self.conf.publisher.telemetry_secret): - goods.append(event) - else: - LOG.warning(_LW( - 'event signature invalid, discarding event: %s'), event) - return self.dispatcher.record_events(goods) - - -class EventEndpoint(CollectorEndpoint): - method = 'verify_and_record_events' - ep_type = 'event' diff --git a/ceilometer/compute/__init__.py b/ceilometer/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/discovery.py b/ceilometer/compute/discovery.py deleted file mode 100644 index d00de2a3..00000000 --- a/ceilometer/compute/discovery.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.agent import plugin_base -from ceilometer import nova_client - -OPTS = [ - cfg.BoolOpt('workload_partitioning', - default=False, - help='Enable work-load partitioning, allowing multiple ' - 'compute agents to be run simultaneously.'), - cfg.IntOpt('resource_update_interval', - default=0, - min=0, - help="New instances will be discovered periodically based" - " on this option (in seconds). By default, " - "the agent discovers instances according to pipeline " - "polling interval. If option is greater than 0, " - "the instance list to poll will be updated based " - "on this option's interval. Measurements relating " - "to the instances will match intervals " - "defined in pipeline.") -] -cfg.CONF.register_opts(OPTS, group='compute') - - -class InstanceDiscovery(plugin_base.DiscoveryBase): - def __init__(self): - super(InstanceDiscovery, self).__init__() - self.nova_cli = nova_client.Client() - self.last_run = None - self.instances = {} - self.expiration_time = cfg.CONF.compute.resource_update_interval - - def discover(self, manager, param=None): - """Discover resources to monitor.""" - secs_from_last_update = 0 - if self.last_run: - secs_from_last_update = timeutils.delta_seconds( - self.last_run, timeutils.utcnow(True)) - - instances = [] - # NOTE(ityaptin) we update make a nova request only if - # it's a first discovery or resources expired - if not self.last_run or secs_from_last_update >= self.expiration_time: - try: - utc_now = timeutils.utcnow(True) - since = self.last_run.isoformat() if self.last_run else None - instances = self.nova_cli.instance_get_all_by_host( - cfg.CONF.host, since) - self.last_run = utc_now - except Exception: - # NOTE(zqfan): instance_get_all_by_host is wrapped and will log - # exception when there is any error. It is no need to raise it - # again and print one more time. - return [] - - for instance in instances: - if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', - 'error']: - self.instances.pop(instance.id, None) - else: - self.instances[instance.id] = instance - - return self.instances.values() - - @property - def group_id(self): - if cfg.CONF.compute.workload_partitioning: - return cfg.CONF.host - else: - return None diff --git a/ceilometer/compute/notifications/__init__.py b/ceilometer/compute/notifications/__init__.py deleted file mode 100644 index fac3ff28..00000000 --- a/ceilometer/compute/notifications/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright 2013 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base - - -OPTS = [ - cfg.StrOpt('nova_control_exchange', - default='nova', - help="Exchange name for Nova notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class ComputeNotificationBase(plugin_base.NotificationBase): - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.nova_control_exchange) - for topic in self.get_notification_topics(conf)] diff --git a/ceilometer/compute/notifications/instance.py b/ceilometer/compute/notifications/instance.py deleted file mode 100644 index 356fbea5..00000000 --- a/ceilometer/compute/notifications/instance.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing compute sample messages from notification events. -""" - -import abc - -import six - -from ceilometer.agent import plugin_base -from ceilometer.compute import notifications -from ceilometer.compute import util -from ceilometer import sample - - -@six.add_metaclass(abc.ABCMeta) -class UserMetadataAwareInstanceNotificationBase( - notifications.ComputeNotificationBase): - """Consumes notifications containing instance user metadata.""" - - def process_notification(self, message): - instance_properties = self.get_instance_properties(message) - if isinstance(instance_properties.get('metadata'), dict): - src_metadata = instance_properties['metadata'] - del instance_properties['metadata'] - util.add_reserved_user_metadata(src_metadata, instance_properties) - return self.get_sample(message) - - def get_instance_properties(self, message): - """Retrieve instance properties from notification payload.""" - return message['payload'] - - @abc.abstractmethod - def get_sample(self, message): - """Derive sample from notification payload.""" - - -class InstanceScheduled(UserMetadataAwareInstanceNotificationBase, - plugin_base.NonMetricNotificationBase): - event_types = ['scheduler.run_instance.scheduled'] - - def get_instance_properties(self, message): - """Retrieve instance properties from notification payload.""" - return message['payload']['request_spec']['instance_properties'] - - def get_sample(self, message): - yield sample.Sample.from_notification( - name='instance.scheduled', - type=sample.TYPE_DELTA, - volume=1, - unit='instance', - user_id=None, - project_id=message['payload']['request_spec'] - ['instance_properties']['project_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class ComputeInstanceNotificationBase( - UserMetadataAwareInstanceNotificationBase): - """Convert compute.instance.* notifications into Samples.""" - event_types = ['compute.instance.*'] - - -class Instance(ComputeInstanceNotificationBase, - plugin_base.NonMetricNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='instance', - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) diff --git a/ceilometer/compute/pollsters/__init__.py b/ceilometer/compute/pollsters/__init__.py deleted file mode 100644 index f56122b1..00000000 --- a/ceilometer/compute/pollsters/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import timeutils -import six - -from ceilometer.agent import plugin_base -from ceilometer.compute.virt import inspector as virt_inspector - - -@six.add_metaclass(abc.ABCMeta) -class BaseComputePollster(plugin_base.PollsterBase): - - def setup_environment(self): - super(BaseComputePollster, self).setup_environment() - # propagate exception from check_sanity - self.inspector.check_sanity() - - @property - def inspector(self): - try: - inspector = self._inspector - except AttributeError: - inspector = virt_inspector.get_hypervisor_inspector() - BaseComputePollster._inspector = inspector - return inspector - - @property - def default_discovery(self): - return 'local_instances' - - @staticmethod - def _populate_cache_create(_i_cache, _instance, _inspector, - _DiskData, _inspector_attr, _stats_attr): - """Settings and return cache.""" - if _instance.id not in _i_cache: - _data = 0 - _per_device_data = {} - disk_rates = getattr(_inspector, _inspector_attr)(_instance) - for disk, stats in disk_rates: - _data += getattr(stats, _stats_attr) - _per_device_data[disk.device] = ( - getattr(stats, _stats_attr)) - _per_disk_data = { - _stats_attr: _per_device_data - } - _i_cache[_instance.id] = _DiskData( - _data, - _per_disk_data - ) - return _i_cache[_instance.id] - - def _record_poll_time(self): - """Method records current time as the poll time. - - :return: time in seconds since the last poll time was recorded - """ - current_time = timeutils.utcnow() - duration = None - if hasattr(self, '_last_poll_time'): - duration = timeutils.delta_seconds(self._last_poll_time, - current_time) - self._last_poll_time = current_time - return duration diff --git a/ceilometer/compute/pollsters/cpu.py b/ceilometer/compute/pollsters/cpu.py deleted file mode 100644 index d8ee3671..00000000 --- a/ceilometer/compute/pollsters/cpu.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class CPUPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - for instance in resources: - LOG.debug('checking instance %s', instance.id) - try: - cpu_info = self.inspector.inspect_cpus(instance) - LOG.debug("CPUTIME USAGE: %(instance)s %(time)d", - {'instance': instance, - 'time': cpu_info.time}) - cpu_num = {'cpu_number': cpu_info.number} - yield util.make_sample_from_instance( - instance, - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume=cpu_info.time, - additional_metadata=cpu_num, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining CPU time is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('could not get CPU time for %(id)s: %(e)s'), - {'id': instance.id, 'e': err}) - - -class CPUUtilPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking CPU util for instance %s', instance.id) - try: - cpu_info = self.inspector.inspect_cpu_util( - instance, self._inspection_duration) - LOG.debug("CPU UTIL: %(instance)s %(util)d", - {'instance': instance, - 'util': cpu_info.util}) - yield util.make_sample_from_instance( - instance, - name='cpu_util', - type=sample.TYPE_GAUGE, - unit='%', - volume=cpu_info.util, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining CPU Util is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'), - {'id': instance.id, 'e': err}) diff --git a/ceilometer/compute/pollsters/disk.py b/ceilometer/compute/pollsters/disk.py deleted file mode 100644 index d277cbca..00000000 --- a/ceilometer/compute/pollsters/disk.py +++ /dev/null @@ -1,694 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# Copyright 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc -import collections - -from oslo_log import log -import six - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -DiskIOData = collections.namedtuple( - 'DiskIOData', - 'r_bytes r_requests w_bytes w_requests per_disk_requests', -) - -DiskRateData = collections.namedtuple('DiskRateData', - ['read_bytes_rate', - 'read_requests_rate', - 'write_bytes_rate', - 'write_requests_rate', - 'per_disk_rate']) - -DiskLatencyData = collections.namedtuple('DiskLatencyData', - ['disk_latency', - 'per_disk_latency']) - -DiskIOPSData = collections.namedtuple('DiskIOPSData', - ['iops_count', - 'per_disk_iops']) - -DiskInfoData = collections.namedtuple('DiskInfoData', - ['capacity', - 'allocation', - 'physical', - 'per_disk_info']) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(pollsters.BaseComputePollster): - - DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:", - "%s %s:", - "read-requests=%d", - "read-bytes=%d", - "write-requests=%d", - "write-bytes=%d", - "errors=%d", - ]) - - CACHE_KEY_DISK = 'diskio' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK, {}) - if instance.id not in i_cache: - r_bytes = 0 - r_requests = 0 - w_bytes = 0 - w_requests = 0 - per_device_read_bytes = {} - per_device_read_requests = {} - per_device_write_bytes = {} - per_device_write_requests = {} - for disk, info in inspector.inspect_disks(instance): - LOG.debug(self.DISKIO_USAGE_MESSAGE, - instance, disk.device, info.read_requests, - info.read_bytes, info.write_requests, - info.write_bytes, info.errors) - r_bytes += info.read_bytes - r_requests += info.read_requests - w_bytes += info.write_bytes - w_requests += info.write_requests - # per disk data - per_device_read_bytes[disk.device] = info.read_bytes - per_device_read_requests[disk.device] = info.read_requests - per_device_write_bytes[disk.device] = info.write_bytes - per_device_write_requests[disk.device] = info.write_requests - per_device_requests = { - 'read_bytes': per_device_read_bytes, - 'read_requests': per_device_read_requests, - 'write_bytes': per_device_write_bytes, - 'write_requests': per_device_write_requests, - } - i_cache[instance.id] = DiskIOData( - r_bytes=r_bytes, - r_requests=r_requests, - w_bytes=w_bytes, - w_requests=w_requests, - per_disk_requests=per_device_requests, - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(instance, c_data): - """Return one or more Sample.""" - - @staticmethod - def _get_sample_read_and_write(instance, _name, _unit, c_data, - _volume, _metadata): - """Read / write Pollster and return one Sample""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_CUMULATIVE, - unit=_unit, - volume=getattr(c_data, _volume), - additional_metadata={ - 'device': c_data.per_disk_requests[_metadata].keys()}, - )] - - @staticmethod - def _get_samples_per_device(c_data, _attr, instance, _name, _unit): - """Return one or more Samples for meter 'disk.device.*'""" - samples = [] - for disk, value in six.iteritems(c_data.per_disk_requests[_attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_CUMULATIVE, - unit=_unit, - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def get_samples(self, manager, cache, resources): - for instance in resources: - instance_name = util.instance_name(instance) - try: - c_data = self._populate_cache( - self.inspector, - cache, - instance, - ) - for s in self._get_samples(instance, c_data): - yield s - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class ReadRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.read.requests', 'request', c_data, - 'r_requests', 'read_requests') - - -class PerDeviceReadRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'read_requests', instance, - 'disk.device.read.requests', 'request') - - -class ReadBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.read.bytes', 'B', c_data, - 'r_bytes', 'read_bytes') - - -class PerDeviceReadBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'read_bytes', instance, - 'disk.device.read.bytes', 'B') - - -class WriteRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.write.requests', 'request', - c_data, 'w_requests', 'write_requests') - - -class PerDeviceWriteRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'write_requests', instance, - 'disk.device.write.requests', 'request') - - -class WriteBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.write.bytes', 'B', - c_data, 'w_bytes', 'write_bytes') - - -class PerDeviceWriteBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'write_bytes', instance, - 'disk.device.write.bytes', 'B') - - -@six.add_metaclass(abc.ABCMeta) -class _DiskRatesPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_RATE = 'diskio-rate' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {}) - if instance.id not in i_cache: - r_bytes_rate = 0 - r_requests_rate = 0 - w_bytes_rate = 0 - w_requests_rate = 0 - per_disk_r_bytes_rate = {} - per_disk_r_requests_rate = {} - per_disk_w_bytes_rate = {} - per_disk_w_requests_rate = {} - disk_rates = inspector.inspect_disk_rates( - instance, self._inspection_duration) - for disk, info in disk_rates: - r_bytes_rate += info.read_bytes_rate - r_requests_rate += info.read_requests_rate - w_bytes_rate += info.write_bytes_rate - w_requests_rate += info.write_requests_rate - - per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate - per_disk_r_requests_rate[disk.device] = info.read_requests_rate - per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate - per_disk_w_requests_rate[disk.device] = ( - info.write_requests_rate) - per_disk_rate = { - 'read_bytes_rate': per_disk_r_bytes_rate, - 'read_requests_rate': per_disk_r_requests_rate, - 'write_bytes_rate': per_disk_w_bytes_rate, - 'write_requests_rate': per_disk_w_requests_rate, - } - i_cache[instance.id] = DiskRateData( - r_bytes_rate, - r_requests_rate, - w_bytes_rate, - w_requests_rate, - per_disk_rate - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - try: - disk_rates_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_rate in self._get_samples(instance, disk_rates_info): - yield disk_rate - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - def _get_samples_per_device(self, disk_rates_info, _attr, instance, - _name, _unit): - """Return one or more Samples for meter 'disk.device.*'.""" - samples = [] - for disk, value in six.iteritems(disk_rates_info.per_disk_rate[ - _attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit=_unit, - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def _get_sample_read_and_write(self, instance, _name, _unit, _element, - _attr1, _attr2): - """Read / write Pollster and return one Sample""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit=_unit, - volume=getattr(_element, _attr1), - additional_metadata={ - 'device': getattr(_element, _attr2)[_attr1].keys()}, - )] - - -class ReadBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.read.bytes.rate', 'B/s', disk_rates_info, - 'read_bytes_rate', 'per_disk_rate') - - -class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'read_bytes_rate', instance, - 'disk.device.read.bytes.rate', 'B/s') - - -class ReadRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.read.requests.rate', 'requests/s', disk_rates_info, - 'read_requests_rate', 'per_disk_rate') - - -class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'read_requests_rate', instance, - 'disk.device.read.requests.rate', 'requests/s') - - -class WriteBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.write.bytes.rate', 'B/s', disk_rates_info, - 'write_bytes_rate', 'per_disk_rate') - - -class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'write_bytes_rate', instance, - 'disk.device.write.bytes.rate', 'B/s') - - -class WriteRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.write.requests.rate', 'requests/s', - disk_rates_info, 'write_requests_rate', 'per_disk_rate') - - -class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'write_requests_rate', instance, - 'disk.device.write.requests.rate', 'requests/s') - - -@six.add_metaclass(abc.ABCMeta) -class _DiskLatencyPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_LATENCY = 'disk-latency' - - def _populate_cache(self, inspector, cache, instance): - return self._populate_cache_create( - cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {}), - instance, inspector, DiskLatencyData, - 'inspect_disk_latency', 'disk_latency') - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_latency_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_latency in self._get_samples(instance, - disk_latency_info): - yield disk_latency - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class DiskLatencyPollster(_DiskLatencyPollsterBase): - - def _get_samples(self, instance, disk_latency_info): - return [util.make_sample_from_instance( - instance, - name='disk.latency', - type=sample.TYPE_GAUGE, - unit='ms', - volume=disk_latency_info.disk_latency / 1000 - )] - - -class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase): - - def _get_samples(self, instance, disk_latency_info): - samples = [] - for disk, value in six.iteritems(disk_latency_info.per_disk_latency[ - 'disk_latency']): - samples.append(util.make_sample_from_instance( - instance, - name='disk.device.latency', - type=sample.TYPE_GAUGE, - unit='ms', - volume=value / 1000, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - -class _DiskIOPSPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_IOPS = 'disk-iops' - - def _populate_cache(self, inspector, cache, instance): - return self._populate_cache_create( - cache.setdefault(self.CACHE_KEY_DISK_IOPS, {}), - instance, inspector, DiskIOPSData, - 'inspect_disk_iops', 'iops_count') - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_iops_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_iops in self._get_samples(instance, - disk_iops_info): - yield disk_iops - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - '%(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class DiskIOPSPollster(_DiskIOPSPollsterBase): - - def _get_samples(self, instance, disk_iops_info): - return [util.make_sample_from_instance( - instance, - name='disk.iops', - type=sample.TYPE_GAUGE, - unit='count/s', - volume=disk_iops_info.iops_count - )] - - -class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase): - - def _get_samples(self, instance, disk_iops_info): - samples = [] - for disk, value in six.iteritems(disk_iops_info.per_disk_iops[ - 'iops_count']): - samples.append(util.make_sample_from_instance( - instance, - name='disk.device.iops', - type=sample.TYPE_GAUGE, - unit='count/s', - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - -@six.add_metaclass(abc.ABCMeta) -class _DiskInfoPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_INFO = 'diskinfo' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {}) - if instance.id not in i_cache: - all_capacity = 0 - all_allocation = 0 - all_physical = 0 - per_disk_capacity = {} - per_disk_allocation = {} - per_disk_physical = {} - disk_info = inspector.inspect_disk_info( - instance) - for disk, info in disk_info: - all_capacity += info.capacity - all_allocation += info.allocation - all_physical += info.physical - - per_disk_capacity[disk.device] = info.capacity - per_disk_allocation[disk.device] = info.allocation - per_disk_physical[disk.device] = info.physical - per_disk_info = { - 'capacity': per_disk_capacity, - 'allocation': per_disk_allocation, - 'physical': per_disk_physical, - } - i_cache[instance.id] = DiskInfoData( - all_capacity, - all_allocation, - all_physical, - per_disk_info - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(self, instance, disk_info): - """Return one or more Sample.""" - - def _get_samples_per_device(self, disk_info, _attr, instance, _name): - """Return one or more Samples for meter 'disk.device.*'.""" - samples = [] - for disk, value in six.iteritems(disk_info.per_disk_info[_attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit='B', - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def _get_samples_task(self, instance, _name, disk_info, _attr1, _attr2): - """Return one or more Samples for meter 'disk.task.*'.""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit='B', - volume=getattr(disk_info, _attr1), - additional_metadata={ - 'device': disk_info.per_disk_info[_attr2].keys()}, - )] - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_size_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_info in self._get_samples(instance, disk_size_info): - yield disk_info - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s ' - '(%(instance_id)s) : %(error)s') % ( - {'name': instance_name, - 'instance_id': instance.id, - 'error': err})) - - -class CapacityPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.capacity', disk_info, - 'capacity', 'capacity') - - -class PerDeviceCapacityPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'capacity', instance, 'disk.device.capacity') - - -class AllocationPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.allocation', disk_info, - 'allocation', 'allocation') - - -class PerDeviceAllocationPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'allocation', instance, 'disk.device.allocation') - - -class PhysicalPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.usage', disk_info, - 'physical', 'physical') - - -class PerDevicePhysicalPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'physical', instance, 'disk.device.usage') diff --git a/ceilometer/compute/pollsters/instance.py b/ceilometer/compute/pollsters/instance.py deleted file mode 100644 index cc9e7eb7..00000000 --- a/ceilometer/compute/pollsters/instance.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer import sample - - -class InstancePollster(pollsters.BaseComputePollster): - - @staticmethod - def get_samples(manager, cache, resources): - for instance in resources: - yield util.make_sample_from_instance( - instance, - name='instance', - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - ) diff --git a/ceilometer/compute/pollsters/memory.py b/ceilometer/compute/pollsters/memory.py deleted file mode 100644 index 9f126336..00000000 --- a/ceilometer/compute/pollsters/memory.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _, _LE, _LW -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class MemoryUsagePollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking memory usage for instance %s', instance.id) - try: - memory_info = self.inspector.inspect_memory_usage( - instance, self._inspection_duration) - LOG.debug("MEMORY USAGE: %(instance)s %(usage)f", - {'instance': instance, - 'usage': memory_info.usage}) - yield util.make_sample_from_instance( - instance, - name='memory.usage', - type=sample.TYPE_GAUGE, - unit='MB', - volume=memory_info.usage, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except virt_inspector.NoDataException as e: - LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' - '%(instance_id)s, non-fatal reason: %(exc)s'), - {'pollster': self.__class__.__name__, - 'instance_id': instance.id, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining Memory Usage is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('Could not get Memory Usage for ' - '%(id)s: %(e)s'), {'id': instance.id, - 'e': err}) - - -class MemoryResidentPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking resident memory for instance %s', - instance.id) - try: - memory_info = self.inspector.inspect_memory_resident( - instance, self._inspection_duration) - LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f", - {'instance': instance, - 'resident': memory_info.resident}) - yield util.make_sample_from_instance( - instance, - name='memory.resident', - type=sample.TYPE_GAUGE, - unit='MB', - volume=memory_info.resident, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except virt_inspector.NoDataException as e: - LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' - '%(instance_id)s, non-fatal reason: %(exc)s'), - {'pollster': self.__class__.__name__, - 'instance_id': instance.id, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining Resident Memory is not implemented' - ' for %s', self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_LE('Could not get Resident Memory Usage for ' - '%(id)s: %(e)s'), {'id': instance.id, - 'e': err}) diff --git a/ceilometer/compute/pollsters/net.py b/ceilometer/compute/pollsters/net.py deleted file mode 100644 index e3131f99..00000000 --- a/ceilometer/compute/pollsters/net.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class _Base(pollsters.BaseComputePollster): - - NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d", - "write-bytes=%d"]) - - @staticmethod - def make_vnic_sample(instance, name, type, unit, volume, vnic_data): - metadata = copy.copy(vnic_data) - additional_metadata = dict(zip(metadata._fields, metadata)) - if vnic_data.fref is not None: - rid = vnic_data.fref - additional_metadata['vnic_name'] = vnic_data.fref - else: - instance_name = util.instance_name(instance) - rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name) - additional_metadata['vnic_name'] = vnic_data.name - - return util.make_sample_from_instance( - instance=instance, - name=name, - type=type, - unit=unit, - volume=volume, - resource_id=rid, - additional_metadata=additional_metadata - ) - - CACHE_KEY_VNIC = 'vnics' - - def _get_vnic_info(self, inspector, instance): - return inspector.inspect_vnics(instance) - - @staticmethod - def _get_rx_info(info): - return info.rx_bytes - - @staticmethod - def _get_tx_info(info): - return info.tx_bytes - - def _get_vnics_for_instance(self, cache, inspector, instance): - i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {}) - if instance.id not in i_cache: - i_cache[instance.id] = list( - self._get_vnic_info(inspector, instance) - ) - return i_cache[instance.id] - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - instance_name = util.instance_name(instance) - LOG.debug('checking net info for instance %s', instance.id) - try: - vnics = self._get_vnics_for_instance( - cache, - self.inspector, - instance, - ) - for vnic, info in vnics: - LOG.debug(self.NET_USAGE_MESSAGE, instance_name, - vnic.name, self._get_rx_info(info), - self._get_tx_info(info)) - yield self._get_sample(instance, vnic, info) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class _RateBase(_Base): - - NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:", - "read-bytes-rate=%d", - "write-bytes-rate=%d"]) - - CACHE_KEY_VNIC = 'vnic-rates' - - def _get_vnic_info(self, inspector, instance): - return inspector.inspect_vnic_rates(instance, - self._inspection_duration) - - @staticmethod - def _get_rx_info(info): - return info.rx_bytes_rate - - @staticmethod - def _get_tx_info(info): - return info.tx_bytes_rate - - -class IncomingBytesPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=info.rx_bytes, - vnic_data=vnic, - ) - - -class IncomingPacketsPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.packets', - type=sample.TYPE_CUMULATIVE, - unit='packet', - volume=info.rx_packets, - vnic_data=vnic, - ) - - -class OutgoingBytesPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=info.tx_bytes, - vnic_data=vnic, - ) - - -class OutgoingPacketsPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.packets', - type=sample.TYPE_CUMULATIVE, - unit='packet', - volume=info.tx_packets, - vnic_data=vnic, - ) - - -class IncomingBytesRatePollster(_RateBase): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.bytes.rate', - type=sample.TYPE_GAUGE, - unit='B/s', - volume=info.rx_bytes_rate, - vnic_data=vnic, - ) - - -class OutgoingBytesRatePollster(_RateBase): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.bytes.rate', - type=sample.TYPE_GAUGE, - unit='B/s', - volume=info.tx_bytes_rate, - vnic_data=vnic, - ) diff --git a/ceilometer/compute/pollsters/util.py b/ceilometer/compute/pollsters/util.py deleted file mode 100644 index 1fd4f95f..00000000 --- a/ceilometer/compute/pollsters/util.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.compute import util as compute_util -from ceilometer import sample - - -INSTANCE_PROPERTIES = [ - # Identity properties - 'reservation_id', - # Type properties - 'architecture', - 'OS-EXT-AZ:availability_zone', - 'kernel_id', - 'os_type', - 'ramdisk_id', -] - - -def _get_metadata_from_object(instance): - """Return a metadata dictionary for the instance.""" - instance_type = instance.flavor['name'] if instance.flavor else None - metadata = { - 'display_name': instance.name, - 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), - 'instance_id': instance.id, - 'instance_type': instance_type, - 'host': instance.hostId, - 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''), - 'flavor': instance.flavor, - 'status': instance.status.lower(), - 'state': getattr(instance, 'OS-EXT-STS:vm_state', u''), - } - - # Image properties - if instance.image: - metadata['image'] = instance.image - metadata['image_ref'] = instance.image['id'] - # Images that come through the conductor API in the nova notifier - # plugin will not have links. - if instance.image.get('links'): - metadata['image_ref_url'] = instance.image['links'][0]['href'] - else: - metadata['image_ref_url'] = None - else: - metadata['image'] = None - metadata['image_ref'] = None - metadata['image_ref_url'] = None - - for name in INSTANCE_PROPERTIES: - if hasattr(instance, name): - metadata[name] = getattr(instance, name) - - metadata['vcpus'] = instance.flavor['vcpus'] - metadata['memory_mb'] = instance.flavor['ram'] - metadata['disk_gb'] = instance.flavor['disk'] - metadata['ephemeral_gb'] = instance.flavor['ephemeral'] - metadata['root_gb'] = (int(metadata['disk_gb']) - - int(metadata['ephemeral_gb'])) - - return compute_util.add_reserved_user_metadata(instance.metadata, metadata) - - -def make_sample_from_instance(instance, name, type, unit, volume, - resource_id=None, additional_metadata=None): - additional_metadata = additional_metadata or {} - resource_metadata = _get_metadata_from_object(instance) - resource_metadata.update(additional_metadata) - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=instance.user_id, - project_id=instance.tenant_id, - resource_id=resource_id or instance.id, - resource_metadata=resource_metadata, - ) - - -def instance_name(instance): - """Shortcut to get instance name.""" - return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) diff --git a/ceilometer/compute/util.py b/ceilometer/compute/util.py deleted file mode 100644 index 6c253839..00000000 --- a/ceilometer/compute/util.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import six - - -# Below config is for collecting metadata which user defined in nova or else, -# and then storing it to Sample for future use according to user's requirement. -# Such as using it as OpenTSDB tags for metrics. -OPTS = [ - cfg.ListOpt('reserved_metadata_namespace', - default=['metering.'], - help='List of metadata prefixes reserved for metering use.'), - cfg.IntOpt('reserved_metadata_length', - default=256, - help='Limit on length of reserved metadata values.'), - cfg.ListOpt('reserved_metadata_keys', - default=[], - help='List of metadata keys reserved for metering use. And ' - 'these keys are additional to the ones included in the ' - 'namespace.'), -] - -cfg.CONF.register_opts(OPTS) - - -def add_reserved_user_metadata(src_metadata, dest_metadata): - limit = cfg.CONF.reserved_metadata_length - user_metadata = {} - for prefix in cfg.CONF.reserved_metadata_namespace: - md = dict( - (k[len(prefix):].replace('.', '_'), - v[:limit] if isinstance(v, six.string_types) else v) - for k, v in src_metadata.items() - if (k.startswith(prefix) and - k[len(prefix):].replace('.', '_') not in dest_metadata) - ) - user_metadata.update(md) - - for metadata_key in cfg.CONF.reserved_metadata_keys: - md = dict( - (k.replace('.', '_'), - v[:limit] if isinstance(v, six.string_types) else v) - for k, v in src_metadata.items() - if (k == metadata_key and - k.replace('.', '_') not in dest_metadata) - ) - user_metadata.update(md) - - if user_metadata: - dest_metadata['user_metadata'] = user_metadata - - return dest_metadata diff --git a/ceilometer/compute/virt/__init__.py b/ceilometer/compute/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/hyperv/__init__.py b/ceilometer/compute/virt/hyperv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/hyperv/inspector.py b/ceilometer/compute/virt/hyperv/inspector.py deleted file mode 100644 index 38409295..00000000 --- a/ceilometer/compute/virt/hyperv/inspector.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for Hyper-V""" - -import collections -import functools -import sys - -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_utils import units -import six - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector - - -def convert_exceptions(function, exception_map): - expected_exceptions = tuple(exception_map.keys()) - - @functools.wraps(function) - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except expected_exceptions as ex: - # exception might be a subclass of an expected exception. - for expected in expected_exceptions: - if isinstance(ex, expected): - raised_exception = exception_map[expected] - break - - exc_info = sys.exc_info() - # NOTE(claudiub): Python 3 raises the exception object given as - # the second argument in six.reraise. - # The original message will be maintained by passing the original - # exception. - exc = raised_exception(six.text_type(exc_info[1])) - six.reraise(raised_exception, exc, exc_info[2]) - return wrapper - - -def decorate_all_methods(decorator, *args, **kwargs): - def decorate(cls): - for attr in cls.__dict__: - class_member = getattr(cls, attr) - if callable(class_member): - setattr(cls, attr, decorator(class_member, *args, **kwargs)) - return cls - - return decorate - - -exception_conversion_map = collections.OrderedDict([ - # NOTE(claudiub): order should be from the most specialized exception type - # to the most generic exception type. - # (expected_exception, converted_exception) - (os_win_exc.NotFound, virt_inspector.InstanceNotFoundException), - (os_win_exc.OSWinException, virt_inspector.InspectorException), -]) - -# NOTE(claudiub): the purpose of the decorator below is to prevent any -# os_win exceptions (subclasses of OSWinException) to leak outside of the -# HyperVInspector. - - -@decorate_all_methods(convert_exceptions, exception_conversion_map) -class HyperVInspector(virt_inspector.Inspector): - - def __init__(self): - super(HyperVInspector, self).__init__() - self._utils = utilsfactory.get_metricsutils() - self._host_max_cpu_clock = self._compute_host_max_cpu_clock() - - def _compute_host_max_cpu_clock(self): - hostutils = utilsfactory.get_hostutils() - # host's number of CPUs and CPU clock speed will not change. - cpu_info = hostutils.get_cpus_info() - host_cpu_count = len(cpu_info) - host_cpu_clock = cpu_info[0]['MaxClockSpeed'] - - return float(host_cpu_clock * host_cpu_count) - - def inspect_cpus(self, instance): - instance_name = util.instance_name(instance) - (cpu_clock_used, - cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name) - - cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock - - # Nanoseconds - cpu_time = (int(uptime * cpu_percent_used) * units.k) - - return virt_inspector.CPUStats(number=cpu_count, time=cpu_time) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - usage = self._utils.get_memory_metrics(instance_name) - return virt_inspector.MemoryUsageStats(usage=usage) - - def inspect_vnics(self, instance): - instance_name = util.instance_name(instance) - for vnic_metrics in self._utils.get_vnic_metrics(instance_name): - interface = virt_inspector.Interface( - name=vnic_metrics["element_name"], - mac=vnic_metrics["address"], - fref=None, - parameters=None) - - stats = virt_inspector.InterfaceStats( - rx_bytes=vnic_metrics['rx_mb'] * units.Mi, - rx_packets=0, - tx_bytes=vnic_metrics['tx_mb'] * units.Mi, - tx_packets=0) - - yield (interface, stats) - - def inspect_disks(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_metrics(instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskStats( - read_requests=0, - # Return bytes - read_bytes=disk_metrics['read_mb'] * units.Mi, - write_requests=0, - write_bytes=disk_metrics['write_mb'] * units.Mi, - errors=0) - - yield (disk, stats) - - def inspect_disk_latency(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_latency_metrics( - instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskLatencyStats( - disk_latency=disk_metrics['disk_latency']) - - yield (disk, stats) - - def inspect_disk_iops(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_iops_count(instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskIOPSStats( - iops_count=disk_metrics['iops_count']) - - yield (disk, stats) diff --git a/ceilometer/compute/virt/inspector.py b/ceilometer/compute/virt/inspector.py deleted file mode 100644 index 484bf82f..00000000 --- a/ceilometer/compute/virt/inspector.py +++ /dev/null @@ -1,315 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector abstraction for read-only access to hypervisors.""" - -import collections - -from oslo_config import cfg -from oslo_log import log -from stevedore import driver - -import ceilometer -from ceilometer.i18n import _ - - -OPTS = [ - cfg.StrOpt('hypervisor_inspector', - default='libvirt', - help='Inspector to use for inspecting the hypervisor layer. ' - 'Known inspectors are libvirt, hyperv, vmware, xenapi ' - 'and powervm.'), -] - -cfg.CONF.register_opts(OPTS) - - -LOG = log.getLogger(__name__) - -# Named tuple representing instances. -# -# name: the name of the instance -# uuid: the UUID associated with the instance -# -Instance = collections.namedtuple('Instance', ['name', 'UUID']) - - -# Named tuple representing CPU statistics. -# -# number: number of CPUs -# time: cumulative CPU time -# -CPUStats = collections.namedtuple('CPUStats', ['number', 'time']) - -# Named tuple representing CPU Utilization statistics. -# -# util: CPU utilization in percentage -# -CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util']) - -# Named tuple representing Memory usage statistics. -# -# usage: Amount of memory used -# -MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage']) - - -# Named tuple representing Resident Memory usage statistics. -# -# resident: Amount of resident memory -# -MemoryResidentStats = collections.namedtuple('MemoryResidentStats', - ['resident']) - - -# Named tuple representing vNICs. -# -# name: the name of the vNIC -# mac: the MAC address -# fref: the filter ref -# parameters: miscellaneous parameters -# -Interface = collections.namedtuple('Interface', ['name', 'mac', - 'fref', 'parameters']) - - -# Named tuple representing vNIC statistics. -# -# rx_bytes: number of received bytes -# rx_packets: number of received packets -# tx_bytes: number of transmitted bytes -# tx_packets: number of transmitted packets -# -InterfaceStats = collections.namedtuple('InterfaceStats', - ['rx_bytes', 'rx_packets', - 'tx_bytes', 'tx_packets']) - - -# Named tuple representing vNIC rate statistics. -# -# rx_bytes_rate: rate of received bytes -# tx_bytes_rate: rate of transmitted bytes -# -InterfaceRateStats = collections.namedtuple('InterfaceRateStats', - ['rx_bytes_rate', 'tx_bytes_rate']) - - -# Named tuple representing disks. -# -# device: the device name for the disk -# -Disk = collections.namedtuple('Disk', ['device']) - - -# Named tuple representing disk statistics. -# -# read_bytes: number of bytes read -# read_requests: number of read operations -# write_bytes: number of bytes written -# write_requests: number of write operations -# errors: number of errors -# -DiskStats = collections.namedtuple('DiskStats', - ['read_bytes', 'read_requests', - 'write_bytes', 'write_requests', - 'errors']) - -# Named tuple representing disk rate statistics. -# -# read_bytes_rate: number of bytes read per second -# read_requests_rate: number of read operations per second -# write_bytes_rate: number of bytes written per second -# write_requests_rate: number of write operations per second -# -DiskRateStats = collections.namedtuple('DiskRateStats', - ['read_bytes_rate', - 'read_requests_rate', - 'write_bytes_rate', - 'write_requests_rate']) - -# Named tuple representing disk latency statistics. -# -# disk_latency: average disk latency -# -DiskLatencyStats = collections.namedtuple('DiskLatencyStats', - ['disk_latency']) - -# Named tuple representing disk iops statistics. -# -# iops: number of iops per second -# -DiskIOPSStats = collections.namedtuple('DiskIOPSStats', - ['iops_count']) - - -# Named tuple representing disk Information. -# -# capacity: capacity of the disk -# allocation: allocation of the disk -# physical: usage of the disk - -DiskInfo = collections.namedtuple('DiskInfo', - ['capacity', - 'allocation', - 'physical']) - - -# Exception types -# -class InspectorException(Exception): - def __init__(self, message=None): - super(InspectorException, self).__init__(message) - - -class InstanceNotFoundException(InspectorException): - pass - - -class InstanceShutOffException(InspectorException): - pass - - -class NoDataException(InspectorException): - pass - - -class NoSanityException(InspectorException): - pass - - -# Main virt inspector abstraction layering over the hypervisor API. -# -class Inspector(object): - - def check_sanity(self): - """Check the sanity of hypervisor inspector. - - Each subclass could overwrite it to throw any exception - when detecting mis-configured inspector - """ - pass - - def inspect_cpus(self, instance): - """Inspect the CPU statistics for an instance. - - :param instance: the target instance - :return: the number of CPUs and cumulative CPU time - """ - raise ceilometer.NotImplementedError - - def inspect_cpu_util(self, instance, duration=None): - """Inspect the CPU Utilization (%) for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the percentage of CPU utilization - """ - raise ceilometer.NotImplementedError - - def inspect_vnics(self, instance): - """Inspect the vNIC statistics for an instance. - - :param instance: the target instance - :return: for each vNIC, the number of bytes & packets - received and transmitted - """ - raise ceilometer.NotImplementedError - - def inspect_vnic_rates(self, instance, duration=None): - """Inspect the vNIC rate statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: for each vNIC, the rate of bytes & packets - received and transmitted - """ - raise ceilometer.NotImplementedError - - def inspect_disks(self, instance): - """Inspect the disk statistics for an instance. - - :param instance: the target instance - :return: for each disk, the number of bytes & operations - read and written, and the error count - """ - raise ceilometer.NotImplementedError - - def inspect_memory_usage(self, instance, duration=None): - """Inspect the memory usage statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the amount of memory used - """ - raise ceilometer.NotImplementedError - - def inspect_memory_resident(self, instance, duration=None): - """Inspect the resident memory statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the amount of resident memory - """ - raise ceilometer.NotImplementedError - - def inspect_disk_rates(self, instance, duration=None): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: for each disk, the number of bytes & operations - read and written per second, with the error count - """ - raise ceilometer.NotImplementedError - - def inspect_disk_latency(self, instance): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :return: for each disk, the average disk latency - """ - raise ceilometer.NotImplementedError - - def inspect_disk_iops(self, instance): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :return: for each disk, the number of iops per second - """ - raise ceilometer.NotImplementedError - - def inspect_disk_info(self, instance): - """Inspect the disk information for an instance. - - :param instance: the target instance - :return: for each disk , capacity , alloaction and usage - """ - raise ceilometer.NotImplementedError - - -def get_hypervisor_inspector(): - try: - namespace = 'ceilometer.compute.virt' - mgr = driver.DriverManager(namespace, - cfg.CONF.hypervisor_inspector, - invoke_on_load=True) - return mgr.driver - except ImportError as e: - LOG.error(_("Unable to load the hypervisor inspector: %s") % e) - return Inspector() diff --git a/ceilometer/compute/virt/libvirt/__init__.py b/ceilometer/compute/virt/libvirt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/libvirt/inspector.py b/ceilometer/compute/virt/libvirt/inspector.py deleted file mode 100644 index 8220d997..00000000 --- a/ceilometer/compute/virt/libvirt/inspector.py +++ /dev/null @@ -1,228 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for libvirt.""" - -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ - -libvirt = None - -LOG = logging.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('libvirt_type', - default='kvm', - choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'], - help='Libvirt domain type.'), - cfg.StrOpt('libvirt_uri', - default='', - help='Override the default libvirt URI ' - '(which is dependent on libvirt_type).'), -] - -CONF = cfg.CONF -CONF.register_opts(OPTS) - - -def retry_on_disconnect(function): - def decorator(self, *args, **kwargs): - try: - return function(self, *args, **kwargs) - except ImportError: - # NOTE(sileht): in case of libvirt failed to be imported - raise - except libvirt.libvirtError as e: - if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): - LOG.debug('Connection to libvirt broken') - self.connection = None - return function(self, *args, **kwargs) - else: - raise - return decorator - - -class LibvirtInspector(virt_inspector.Inspector): - - per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///') - - def __init__(self): - self.uri = self._get_uri() - self.connection = None - - def _get_uri(self): - return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type, - 'qemu:///system') - - def _get_connection(self): - if not self.connection: - global libvirt - if libvirt is None: - libvirt = __import__('libvirt') - LOG.debug('Connecting to libvirt: %s', self.uri) - self.connection = libvirt.openReadOnly(self.uri) - - return self.connection - - def check_sanity(self): - if not self._get_connection(): - raise virt_inspector.NoSanityException() - - @retry_on_disconnect - def _lookup_by_uuid(self, instance): - instance_name = util.instance_name(instance) - try: - return self._get_connection().lookupByUUIDString(instance.id) - except Exception as ex: - if not libvirt or not isinstance(ex, libvirt.libvirtError): - raise virt_inspector.InspectorException(six.text_type(ex)) - error_code = ex.get_error_code() - if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): - raise - msg = _("Error from libvirt while looking up instance " - ": " - "[Error Code %(error_code)s] " - "%(ex)s") % {'name': instance_name, - 'id': instance.id, - 'error_code': error_code, - 'ex': ex} - raise virt_inspector.InstanceNotFoundException(msg) - - def inspect_cpus(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - dom_info = domain.info() - return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4]) - - def _get_domain_not_shut_off_or_raise(self, instance): - instance_name = util.instance_name(instance) - domain = self._lookup_by_uuid(instance) - - state = domain.info()[0] - if state == libvirt.VIR_DOMAIN_SHUTOFF: - msg = _('Failed to inspect data of instance ' - ', ' - 'domain state is SHUTOFF.') % { - 'name': instance_name, 'id': instance.id} - raise virt_inspector.InstanceShutOffException(msg) - - return domain - - def inspect_vnics(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for iface in tree.findall('devices/interface'): - target = iface.find('target') - if target is not None: - name = target.get('dev') - else: - continue - mac = iface.find('mac') - if mac is not None: - mac_address = mac.get('address') - else: - continue - fref = iface.find('filterref') - if fref is not None: - fref = fref.get('filter') - - params = dict((p.get('name').lower(), p.get('value')) - for p in iface.findall('filterref/parameter')) - interface = virt_inspector.Interface(name=name, mac=mac_address, - fref=fref, parameters=params) - dom_stats = domain.interfaceStats(name) - stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0], - rx_packets=dom_stats[1], - tx_bytes=dom_stats[4], - tx_packets=dom_stats[5]) - yield (interface, stats) - - def inspect_disks(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for device in filter( - bool, - [target.get("dev") - for target in tree.findall('devices/disk/target')]): - disk = virt_inspector.Disk(device=device) - block_stats = domain.blockStats(device) - stats = virt_inspector.DiskStats(read_requests=block_stats[0], - read_bytes=block_stats[1], - write_requests=block_stats[2], - write_bytes=block_stats[3], - errors=block_stats[4]) - yield (disk, stats) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - domain = self._get_domain_not_shut_off_or_raise(instance) - - try: - memory_stats = domain.memoryStats() - if (memory_stats and - memory_stats.get('available') and - memory_stats.get('unused')): - memory_used = (memory_stats.get('available') - - memory_stats.get('unused')) - # Stat provided from libvirt is in KB, converting it to MB. - memory_used = memory_used / units.Ki - return virt_inspector.MemoryUsageStats(usage=memory_used) - else: - msg = _('Failed to inspect memory usage of instance ' - ', ' - 'can not get info from libvirt.') % { - 'name': instance_name, 'id': instance.id} - raise virt_inspector.NoDataException(msg) - # memoryStats might launch an exception if the method is not supported - # by the underlying hypervisor being used by libvirt. - except libvirt.libvirtError as e: - msg = _('Failed to inspect memory usage of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - - def inspect_disk_info(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for device in filter( - bool, - [target.get("dev") - for target in tree.findall('devices/disk/target')]): - disk = virt_inspector.Disk(device=device) - block_info = domain.blockInfo(device) - info = virt_inspector.DiskInfo(capacity=block_info[0], - allocation=block_info[1], - physical=block_info[2]) - - yield (disk, info) - - def inspect_memory_resident(self, instance, duration=None): - domain = self._get_domain_not_shut_off_or_raise(instance) - memory = domain.memoryStats()['rss'] / units.Ki - return virt_inspector.MemoryResidentStats(resident=memory) diff --git a/ceilometer/compute/virt/vmware/__init__.py b/ceilometer/compute/virt/vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/vmware/inspector.py b/ceilometer/compute/virt/vmware/inspector.py deleted file mode 100644 index 0009defa..00000000 --- a/ceilometer/compute/virt/vmware/inspector.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of Inspector abstraction for VMware vSphere""" - -from oslo_config import cfg -from oslo_utils import units -from oslo_vmware import api -import six - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.vmware import vsphere_operations -from ceilometer.i18n import _ - - -opt_group = cfg.OptGroup(name='vmware', - title='Options for VMware') - -OPTS = [ - cfg.StrOpt('host_ip', - default='', - help='IP address of the VMware vSphere host.'), - cfg.PortOpt('host_port', - default=443, - help='Port of the VMware vSphere host.'), - cfg.StrOpt('host_username', - default='', - help='Username of VMware vSphere.'), - cfg.StrOpt('host_password', - default='', - help='Password of VMware vSphere.', - secret=True), - cfg.StrOpt('ca_file', - help='CA bundle file to use in verifying the vCenter server ' - 'certificate.'), - cfg.BoolOpt('insecure', - default=False, - help='If true, the vCenter server certificate is not ' - 'verified. If false, then the default CA truststore is ' - 'used for verification. This option is ignored if ' - '"ca_file" is set.'), - cfg.IntOpt('api_retry_count', - default=10, - help='Number of times a VMware vSphere API may be retried.'), - cfg.FloatOpt('task_poll_interval', - default=0.5, - help='Sleep time in seconds for polling an ongoing async ' - 'task.'), - cfg.StrOpt('wsdl_location', - help='Optional vim service WSDL location ' - 'e.g http:///vimService.wsdl. ' - 'Optional over-ride to default location for bug ' - 'work-arounds.'), -] - -cfg.CONF.register_group(opt_group) -cfg.CONF.register_opts(OPTS, group=opt_group) - -VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average' -VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average' -VC_NETWORK_RX_COUNTER = 'net:received:average' -VC_NETWORK_TX_COUNTER = 'net:transmitted:average' -VC_DISK_READ_RATE_CNTR = "disk:read:average" -VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average" -VC_DISK_WRITE_RATE_CNTR = "disk:write:average" -VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average" - - -def get_api_session(): - api_session = api.VMwareAPISession( - cfg.CONF.vmware.host_ip, - cfg.CONF.vmware.host_username, - cfg.CONF.vmware.host_password, - cfg.CONF.vmware.api_retry_count, - cfg.CONF.vmware.task_poll_interval, - wsdl_loc=cfg.CONF.vmware.wsdl_location, - port=cfg.CONF.vmware.host_port, - cacert=cfg.CONF.vmware.ca_file, - insecure=cfg.CONF.vmware.insecure) - return api_session - - -class VsphereInspector(virt_inspector.Inspector): - - def __init__(self): - super(VsphereInspector, self).__init__() - self._ops = vsphere_operations.VsphereOperations( - get_api_session(), 1000) - - def inspect_cpu_util(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if vm_moid is None: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - cpu_util_counter_id = self._ops.get_perf_counter_id( - VC_AVERAGE_CPU_CONSUMED_CNTR) - cpu_util = self._ops.query_vm_aggregate_stats( - vm_moid, cpu_util_counter_id, duration) - - # For this counter vSphere returns values scaled-up by 100, since the - # corresponding API can't return decimals, but only longs. - # For e.g. if the utilization is 12.34%, the value returned is 1234. - # Hence, dividing by 100. - cpu_util = cpu_util / 100 - return virt_inspector.CPUUtilStats(util=cpu_util) - - def inspect_vnic_rates(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if not vm_moid: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - - vnic_stats = {} - vnic_ids = set() - - for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER): - net_counter_id = self._ops.get_perf_counter_id(net_counter) - vnic_id_to_stats_map = self._ops.query_vm_device_stats( - vm_moid, net_counter_id, duration) - vnic_stats[net_counter] = vnic_id_to_stats_map - vnic_ids.update(six.iterkeys(vnic_id_to_stats_map)) - - # Stats provided from vSphere are in KB/s, converting it to B/s. - for vnic_id in vnic_ids: - rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER] - .get(vnic_id, 0) * units.Ki) - tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER] - .get(vnic_id, 0) * units.Ki) - - stats = virt_inspector.InterfaceRateStats(rx_bytes_rate, - tx_bytes_rate) - interface = virt_inspector.Interface( - name=vnic_id, - mac=None, - fref=None, - parameters=None) - yield (interface, stats) - - def inspect_memory_usage(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if vm_moid is None: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - mem_counter_id = self._ops.get_perf_counter_id( - VC_AVERAGE_MEMORY_CONSUMED_CNTR) - memory = self._ops.query_vm_aggregate_stats( - vm_moid, mem_counter_id, duration) - # Stat provided from vSphere is in KB, converting it to MB. - memory = memory / units.Ki - return virt_inspector.MemoryUsageStats(usage=memory) - - def inspect_disk_rates(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if not vm_moid: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - - disk_stats = {} - disk_ids = set() - disk_counters = [ - VC_DISK_READ_RATE_CNTR, - VC_DISK_READ_REQUESTS_RATE_CNTR, - VC_DISK_WRITE_RATE_CNTR, - VC_DISK_WRITE_REQUESTS_RATE_CNTR - ] - - for disk_counter in disk_counters: - disk_counter_id = self._ops.get_perf_counter_id(disk_counter) - disk_id_to_stat_map = self._ops.query_vm_device_stats( - vm_moid, disk_counter_id, duration) - disk_stats[disk_counter] = disk_id_to_stat_map - disk_ids.update(six.iterkeys(disk_id_to_stat_map)) - - for disk_id in disk_ids: - - def stat_val(counter_name): - return disk_stats[counter_name].get(disk_id, 0) - - disk = virt_inspector.Disk(device=disk_id) - # Stats provided from vSphere are in KB/s, converting it to B/s. - disk_rate_info = virt_inspector.DiskRateStats( - read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, - read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), - write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, - write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR) - ) - yield(disk, disk_rate_info) diff --git a/ceilometer/compute/virt/vmware/vsphere_operations.py b/ceilometer/compute/virt/vmware/vsphere_operations.py deleted file mode 100644 index 16b92b8b..00000000 --- a/ceilometer/compute/virt/vmware/vsphere_operations.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_vmware import vim_util - - -PERF_MANAGER_TYPE = "PerformanceManager" -PERF_COUNTER_PROPERTY = "perfCounter" -VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value' - -# ESXi Servers sample performance data every 20 seconds. 20-second interval -# data is called instance data or real-time data. To retrieve instance data, -# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId" -# property. In that case the "QueryPerf" method operates as a raw data feed -# that bypasses the vCenter database and instead retrieves performance data -# from an ESXi host. -# The following value is time interval for real-time performance stats -# in seconds and it is not configurable. -VC_REAL_TIME_SAMPLING_INTERVAL = 20 - - -class VsphereOperations(object): - """Class to invoke vSphere APIs calls. - - vSphere APIs calls are required by various pollsters, collecting data from - VMware infrastructure. - """ - def __init__(self, api_session, max_objects): - self._api_session = api_session - self._max_objects = max_objects - # Mapping between "VM's Nova instance Id" -> "VM's MOID" - # In case a VM is deployed by Nova, then its name is instance ID. - # So this map essentially has VM names as keys. - self._vm_moid_lookup_map = {} - - # Mapping from full name -> ID, for VC Performance counters - self._perf_counter_id_lookup_map = None - - def _init_vm_moid_lookup_map(self): - session = self._api_session - result = session.invoke_api(vim_util, "get_objects", session.vim, - "VirtualMachine", self._max_objects, - [VM_INSTANCE_ID_PROPERTY], - False) - while result: - for vm_object in result.objects: - vm_moid = vm_object.obj.value - # propSet will be set only if the server provides value - if hasattr(vm_object, 'propSet') and vm_object.propSet: - vm_instance_id = vm_object.propSet[0].val - if vm_instance_id: - self._vm_moid_lookup_map[vm_instance_id] = vm_moid - - result = session.invoke_api(vim_util, "continue_retrieval", - session.vim, result) - - def get_vm_moid(self, vm_instance_id): - """Method returns VC MOID of the VM by its NOVA instance ID.""" - if vm_instance_id not in self._vm_moid_lookup_map: - self._init_vm_moid_lookup_map() - - return self._vm_moid_lookup_map.get(vm_instance_id, None) - - def _init_perf_counter_id_lookup_map(self): - - # Query details of all the performance counters from VC - session = self._api_session - client_factory = session.vim.client.factory - perf_manager = session.vim.service_content.perfManager - - prop_spec = vim_util.build_property_spec( - client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY]) - - obj_spec = vim_util.build_object_spec( - client_factory, perf_manager, None) - - filter_spec = vim_util.build_property_filter_spec( - client_factory, [prop_spec], [obj_spec]) - - options = client_factory.create('ns0:RetrieveOptions') - options.maxObjects = 1 - - prop_collector = session.vim.service_content.propertyCollector - result = session.invoke_api(session.vim, "RetrievePropertiesEx", - prop_collector, specSet=[filter_spec], - options=options) - - perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo - - # Extract the counter Id for each counter and populate the map - self._perf_counter_id_lookup_map = {} - for perf_counter_info in perf_counter_infos: - - counter_group = perf_counter_info.groupInfo.key - counter_name = perf_counter_info.nameInfo.key - counter_rollup_type = perf_counter_info.rollupType - counter_id = perf_counter_info.key - - counter_full_name = (counter_group + ":" + counter_name + ":" + - counter_rollup_type) - self._perf_counter_id_lookup_map[counter_full_name] = counter_id - - def get_perf_counter_id(self, counter_full_name): - """Method returns the ID of VC performance counter by its full name. - - A VC performance counter is uniquely identified by the - tuple {'Group Name', 'Counter Name', 'Rollup Type'}. - It will have an id - counter ID (changes from one VC to another), - which is required to query performance stats from that VC. - This method returns the ID for a counter, - assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'. - """ - if not self._perf_counter_id_lookup_map: - self._init_perf_counter_id_lookup_map() - return self._perf_counter_id_lookup_map[counter_full_name] - - # TODO(akhils@vmware.com) Move this method to common library - # when it gets checked-in - def query_vm_property(self, vm_moid, property_name): - """Method returns the value of specified property for a VM. - - :param vm_moid: moid of the VM whose property is to be queried - :param property_name: path of the property - """ - vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine") - session = self._api_session - return session.invoke_api(vim_util, "get_object_property", - session.vim, vm_mobj, property_name) - - def query_vm_aggregate_stats(self, vm_moid, counter_id, duration): - """Method queries the aggregated real-time stat value for a VM. - - This method should be used for aggregate counters. - - :param vm_moid: moid of the VM - :param counter_id: id of the perf counter in VC - :param duration: in seconds from current time, - over which the stat value was applicable - :return: the aggregated stats value for the counter - """ - # For aggregate counters, device_name should be "" - stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration) - - # Performance manager provides the aggregated stats value - # with device name -> None - return stats.get(None, 0) - - def query_vm_device_stats(self, vm_moid, counter_id, duration): - """Method queries the real-time stat values for a VM, for all devices. - - This method should be used for device(non-aggregate) counters. - - :param vm_moid: moid of the VM - :param counter_id: id of the perf counter in VC - :param duration: in seconds from current time, - over which the stat value was applicable - :return: a map containing the stat values keyed by the device ID/name - """ - # For device counters, device_name should be "*" to get stat values - # for all devices. - stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration) - - # For some device counters, in addition to the per device value - # the Performance manager also returns the aggregated value. - # Just to be consistent, deleting the aggregated value if present. - stats.pop(None, None) - return stats - - def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration): - """Method queries the real-time stat values for a VM. - - :param vm_moid: moid of the VM for which stats are needed - :param counter_id: id of the perf counter in VC - :param device_name: name of the device for which stats are to be - queried. For aggregate counters pass empty string (""). - For device counters pass "*", if stats are required over all - devices. - :param duration: in seconds from current time, - over which the stat value was applicable - :return: a map containing the stat values keyed by the device ID/name - """ - - session = self._api_session - client_factory = session.vim.client.factory - - # Construct the QuerySpec - metric_id = client_factory.create('ns0:PerfMetricId') - metric_id.counterId = counter_id - metric_id.instance = device_name - - query_spec = client_factory.create('ns0:PerfQuerySpec') - query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine") - query_spec.metricId = [metric_id] - query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL - # We query all samples which are applicable over the specified duration - samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL) - if duration and - duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1) - query_spec.maxSample = samples_cnt - - perf_manager = session.vim.service_content.perfManager - perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager, - querySpec=[query_spec]) - - stat_values = {} - if perf_stats: - entity_metric = perf_stats[0] - sample_infos = entity_metric.sampleInfo - - if len(sample_infos) > 0: - for metric_series in entity_metric.value: - # Take the average of all samples to improve the accuracy - # of the stat value - stat_value = float(sum(metric_series.value)) / samples_cnt - device_id = metric_series.id.instance - stat_values[device_id] = stat_value - - return stat_values diff --git a/ceilometer/compute/virt/xenapi/__init__.py b/ceilometer/compute/virt/xenapi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/xenapi/inspector.py b/ceilometer/compute/virt/xenapi/inspector.py deleted file mode 100644 index 19405dd0..00000000 --- a/ceilometer/compute/virt/xenapi/inspector.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for XenAPI.""" - -from oslo_config import cfg -from oslo_utils import units -import six.moves.urllib.parse as urlparse -try: - import XenAPI as api -except ImportError: - api = None - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ - -opt_group = cfg.OptGroup(name='xenapi', - title='Options for XenAPI') - -OPTS = [ - cfg.StrOpt('connection_url', - help='URL for connection to XenServer/Xen Cloud Platform.'), - cfg.StrOpt('connection_username', - default='root', - help='Username for connection to XenServer/Xen Cloud ' - 'Platform.'), - cfg.StrOpt('connection_password', - help='Password for connection to XenServer/Xen Cloud Platform.', - secret=True), -] - -CONF = cfg.CONF -CONF.register_group(opt_group) -CONF.register_opts(OPTS, group=opt_group) - - -class XenapiException(virt_inspector.InspectorException): - pass - - -def swap_xapi_host(url, host_addr): - """Replace the XenServer address present in 'url' with 'host_addr'.""" - temp_url = urlparse.urlparse(url) - # The connection URL is served by XAPI and doesn't support having a - # path for the connection url after the port. And username/password - # will be pass separately. So the URL like "http://abc:abc@abc:433/abc" - # should not appear for XAPI case. - temp_netloc = temp_url.netloc.replace(temp_url.hostname, '%s' % host_addr) - replaced = temp_url._replace(netloc=temp_netloc) - return urlparse.urlunparse(replaced) - - -def get_api_session(): - if not api: - raise ImportError(_('XenAPI not installed')) - - url = CONF.xenapi.connection_url - username = CONF.xenapi.connection_username - password = CONF.xenapi.connection_password - if not url or password is None: - raise XenapiException(_('Must specify connection_url, and ' - 'connection_password to use')) - - try: - session = (api.xapi_local() if url == 'unix://local' - else api.Session(url)) - session.login_with_password(username, password) - except api.Failure as e: - if e.details[0] == 'HOST_IS_SLAVE': - master = e.details[1] - url = swap_xapi_host(url, master) - try: - session = api.Session(url) - session.login_with_password(username, password) - except api.Failure as es: - raise XenapiException(_('Could not connect slave host: %s ') % - es.details[0]) - else: - msg = _("Could not connect to XenAPI: %s") % e.details[0] - raise XenapiException(msg) - return session - - -class XenapiInspector(virt_inspector.Inspector): - - def __init__(self): - super(XenapiInspector, self).__init__() - self.session = get_api_session() - - def _get_host_ref(self): - """Return the xenapi host on which nova-compute runs on.""" - return self.session.xenapi.session.get_this_host(self.session.handle) - - def _call_xenapi(self, method, *args): - return self.session.xenapi_request(method, args) - - def _lookup_by_name(self, instance_name): - vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name) - n = len(vm_refs) - if n == 0: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in XenServer') % instance_name) - elif n > 1: - raise XenapiException( - _('Multiple VM %s found in XenServer') % instance_name) - else: - return vm_refs[0] - - def inspect_cpu_util(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) - metrics_rec = self._call_xenapi("VM_metrics.get_record", - metrics_ref) - vcpus_number = metrics_rec['VCPUs_number'] - vcpus_utils = metrics_rec['VCPUs_utilisation'] - if len(vcpus_utils) == 0: - msg = _("Could not get VM %s CPU Utilization") % instance_name - raise XenapiException(msg) - - utils = 0.0 - for num in range(int(vcpus_number)): - utils += vcpus_utils.get(str(num)) - utils = utils / int(vcpus_number) * 100 - return virt_inspector.CPUUtilStats(util=utils) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) - metrics_rec = self._call_xenapi("VM_metrics.get_record", - metrics_ref) - # Stat provided from XenServer is in B, converting it to MB. - memory = int(metrics_rec['memory_actual']) / units.Mi - return virt_inspector.MemoryUsageStats(usage=memory) - - def inspect_vnic_rates(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref) - if vif_refs: - for vif_ref in vif_refs: - vif_rec = self._call_xenapi("VIF.get_record", vif_ref) - vif_metrics_ref = self._call_xenapi( - "VIF.get_metrics", vif_ref) - vif_metrics_rec = self._call_xenapi( - "VIF_metrics.get_record", vif_metrics_ref) - - interface = virt_inspector.Interface( - name=vif_rec['uuid'], - mac=vif_rec['MAC'], - fref=None, - parameters=None) - rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki - tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki - stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate) - yield (interface, stats) - - def inspect_disk_rates(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) - if vbd_refs: - for vbd_ref in vbd_refs: - vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) - vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", - vbd_ref) - vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", - vbd_metrics_ref) - - disk = virt_inspector.Disk(device=vbd_rec['device']) - # Stats provided from XenServer are in KB/s, - # converting it to B/s. - read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki - write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki - disk_rate_info = virt_inspector.DiskRateStats( - read_bytes_rate=read_rate, - read_requests_rate=0, - write_bytes_rate=write_rate, - write_requests_rate=0) - yield(disk, disk_rate_info) diff --git a/ceilometer/coordination.py b/ceilometer/coordination.py deleted file mode 100644 index 2bb584bf..00000000 --- a/ceilometer/coordination.py +++ /dev/null @@ -1,229 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_config import cfg -from oslo_log import log -import retrying -import tooz.coordination - -from ceilometer.i18n import _LE, _LI, _LW -from ceilometer import utils - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('backend_url', - help='The backend URL to use for distributed coordination. If ' - 'left empty, per-deployment central agent and per-host ' - 'compute agent won\'t do workload ' - 'partitioning and will only function correctly if a ' - 'single instance of that service is running.'), - cfg.FloatOpt('heartbeat', - default=1.0, - help='Number of seconds between heartbeats for distributed ' - 'coordination.'), - cfg.FloatOpt('check_watchers', - default=10.0, - help='Number of seconds between checks to see if group ' - 'membership has changed'), - cfg.IntOpt('retry_backoff', - default=1, - help='Retry backoff factor when retrying to connect with' - 'coordination backend'), - cfg.IntOpt('max_retry_interval', - default=30, - help='Maximum number of seconds between retry to join ' - 'partitioning group') -] -cfg.CONF.register_opts(OPTS, group='coordination') - - -class ErrorJoiningPartitioningGroup(Exception): - def __init__(self): - super(ErrorJoiningPartitioningGroup, self).__init__(_LE( - 'Coordination join_group Error joining partitioning group')) - - -class MemberNotInGroupError(Exception): - def __init__(self, group_id, members, my_id): - super(MemberNotInGroupError, self).__init__(_LE( - 'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: ' - 'Current agent is not part of group and cannot take tasks') % - {'group_id': group_id, 'members': members, 'me': my_id}) - - -def retry_on_error_joining_partition(exception): - return isinstance(exception, ErrorJoiningPartitioningGroup) - - -def retry_on_member_not_in_group(exception): - return isinstance(exception, MemberNotInGroupError) - - -class PartitionCoordinator(object): - """Workload partitioning coordinator. - - This class uses the `tooz` library to manage group membership. - - To ensure that the other agents know this agent is still alive, - the `heartbeat` method should be called periodically. - - Coordination errors and reconnects are handled under the hood, so the - service using the partition coordinator need not care whether the - coordination backend is down. The `extract_my_subset` will simply return an - empty iterable in this case. - """ - - def __init__(self, my_id=None): - self._coordinator = None - self._groups = set() - self._my_id = my_id or str(uuid.uuid4()) - - def start(self): - backend_url = cfg.CONF.coordination.backend_url - if backend_url: - try: - self._coordinator = tooz.coordination.get_coordinator( - backend_url, self._my_id) - self._coordinator.start() - LOG.info(_LI('Coordination backend started successfully.')) - except tooz.coordination.ToozError: - LOG.exception(_LE('Error connecting to coordination backend.')) - - def stop(self): - if not self._coordinator: - return - - for group in list(self._groups): - self.leave_group(group) - - try: - self._coordinator.stop() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error connecting to coordination backend.')) - finally: - self._coordinator = None - - def is_active(self): - return self._coordinator is not None - - def heartbeat(self): - if self._coordinator: - if not self._coordinator.is_started: - # re-connect - self.start() - try: - self._coordinator.heartbeat() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error sending a heartbeat to coordination ' - 'backend.')) - - def watch_group(self, namespace, callback): - if self._coordinator: - self._coordinator.watch_join_group(namespace, callback) - self._coordinator.watch_leave_group(namespace, callback) - - def run_watchers(self): - if self._coordinator: - self._coordinator.run_watchers() - - def join_group(self, group_id): - if (not self._coordinator or not self._coordinator.is_started - or not group_id): - return - - retry_backoff = cfg.CONF.coordination.retry_backoff * 1000 - max_retry_interval = cfg.CONF.coordination.max_retry_interval * 1000 - - @retrying.retry( - wait_exponential_multiplier=retry_backoff, - wait_exponential_max=max_retry_interval, - retry_on_exception=retry_on_error_joining_partition, - wrap_exception=True) - def _inner(): - try: - join_req = self._coordinator.join_group(group_id) - join_req.get() - LOG.info(_LI('Joined partitioning group %s'), group_id) - except tooz.coordination.MemberAlreadyExist: - return - except tooz.coordination.GroupNotCreated: - create_grp_req = self._coordinator.create_group(group_id) - try: - create_grp_req.get() - except tooz.coordination.GroupAlreadyExist: - pass - raise ErrorJoiningPartitioningGroup() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error joining partitioning group %s,' - ' re-trying'), group_id) - raise ErrorJoiningPartitioningGroup() - self._groups.add(group_id) - - return _inner() - - def leave_group(self, group_id): - if group_id not in self._groups: - return - if self._coordinator: - self._coordinator.leave_group(group_id) - self._groups.remove(group_id) - LOG.info(_LI('Left partitioning group %s'), group_id) - - def _get_members(self, group_id): - if not self._coordinator: - return [self._my_id] - - while True: - get_members_req = self._coordinator.get_members(group_id) - try: - return get_members_req.get() - except tooz.coordination.GroupNotCreated: - self.join_group(group_id) - - @retrying.retry(stop_max_attempt_number=5, wait_random_max=2000, - retry_on_exception=retry_on_member_not_in_group) - def extract_my_subset(self, group_id, iterable, attempt=0): - """Filters an iterable, returning only objects assigned to this agent. - - We have a list of objects and get a list of active group members from - `tooz`. We then hash all the objects into buckets and return only - the ones that hashed into *our* bucket. - """ - if not group_id: - return iterable - if group_id not in self._groups: - self.join_group(group_id) - try: - members = self._get_members(group_id) - LOG.debug('Members of group: %s, Me: %s', members, self._my_id) - if self._my_id not in members: - LOG.warning(_LW('Cannot extract tasks because agent failed to ' - 'join group properly. Rejoining group.')) - self.join_group(group_id) - members = self._get_members(group_id) - if self._my_id not in members: - raise MemberNotInGroupError(group_id, members, self._my_id) - hr = utils.HashRing(members) - filtered = [v for v in iterable - if hr.get_node(str(v)) == self._my_id] - LOG.debug('My subset: %s', [str(f) for f in filtered]) - return filtered - except tooz.coordination.ToozError: - LOG.exception(_LE('Error getting group membership info from ' - 'coordination backend.')) - return [] diff --git a/ceilometer/declarative.py b/ceilometer/declarative.py deleted file mode 100644 index 47ebbe73..00000000 --- a/ceilometer/declarative.py +++ /dev/null @@ -1,188 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from jsonpath_rw_ext import parser -from oslo_config import cfg -from oslo_log import log -import six -import yaml - -from ceilometer.i18n import _, _LI - -LOG = log.getLogger(__name__) - - -class DefinitionException(Exception): - def __init__(self, message, definition_cfg): - msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message) - super(DefinitionException, self).__init__(msg) - self.brief_message = message - - -class MeterDefinitionException(DefinitionException): - pass - - -class EventDefinitionException(DefinitionException): - pass - - -class ResourceDefinitionException(DefinitionException): - pass - - -class Definition(object): - JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() - GETTERS_CACHE = {} - - def __init__(self, name, cfg, plugin_manager): - self.cfg = cfg - self.name = name - self.plugin = None - if isinstance(cfg, dict): - if 'fields' not in cfg: - raise DefinitionException( - _("The field 'fields' is required for %s") % name, - self.cfg) - - if 'plugin' in cfg: - plugin_cfg = cfg['plugin'] - if isinstance(plugin_cfg, six.string_types): - plugin_name = plugin_cfg - plugin_params = {} - else: - try: - plugin_name = plugin_cfg['name'] - except KeyError: - raise DefinitionException( - _('Plugin specified, but no plugin name supplied ' - 'for %s') % name, self.cfg) - plugin_params = plugin_cfg.get('parameters') - if plugin_params is None: - plugin_params = {} - try: - plugin_ext = plugin_manager[plugin_name] - except KeyError: - raise DefinitionException( - _('No plugin named %(plugin)s available for ' - '%(name)s') % dict( - plugin=plugin_name, - name=name), self.cfg) - plugin_class = plugin_ext.plugin - self.plugin = plugin_class(**plugin_params) - - fields = cfg['fields'] - else: - # Simple definition "foobar: jsonpath" - fields = cfg - - if isinstance(fields, list): - # NOTE(mdragon): if not a string, we assume a list. - if len(fields) == 1: - fields = fields[0] - else: - fields = '|'.join('(%s)' % path for path in fields) - - if isinstance(fields, six.integer_types): - self.getter = fields - else: - try: - self.getter = self.make_getter(fields) - except Exception as e: - raise DefinitionException( - _("Parse error in JSONPath specification " - "'%(jsonpath)s' for %(name)s: %(err)s") - % dict(jsonpath=fields, name=name, err=e), self.cfg) - - def _get_path(self, match): - if match.context is not None: - for path_element in self._get_path(match.context): - yield path_element - yield str(match.path) - - def parse(self, obj, return_all_values=False): - if callable(self.getter): - values = self.getter(obj) - else: - return self.getter - - values = [match for match in values - if return_all_values or match.value is not None] - - if self.plugin is not None: - if return_all_values and not self.plugin.support_return_all_values: - raise DefinitionException("Plugin %s don't allows to " - "return multiple values" % - self.cfg["plugin"]["name"], self.cfg) - values_map = [('.'.join(self._get_path(match)), match.value) for - match in values] - values = [v for v in self.plugin.trait_values(values_map) - if v is not None] - else: - values = [match.value for match in values if match is not None] - if return_all_values: - return values - else: - return values[0] if values else None - - def make_getter(self, fields): - if fields in self.GETTERS_CACHE: - return self.GETTERS_CACHE[fields] - else: - getter = self.JSONPATH_RW_PARSER.parse(fields).find - self.GETTERS_CACHE[fields] = getter - return getter - - -def load_definitions(defaults, config_file, fallback_file=None): - """Setup a definitions from yaml config file.""" - - if not os.path.exists(config_file): - config_file = cfg.CONF.find_file(config_file) - if not config_file and fallback_file is not None: - LOG.debug("No Definitions configuration file found!" - "Using default config.") - config_file = fallback_file - - if config_file is not None: - LOG.debug("Loading definitions configuration file: %s", config_file) - - with open(config_file) as cf: - config = cf.read() - - try: - definition_cfg = yaml.safe_load(config) - except yaml.YAMLError as err: - if hasattr(err, 'problem_mark'): - mark = err.problem_mark - errmsg = (_("Invalid YAML syntax in Definitions file " - "%(file)s at line: %(line)s, column: %(column)s.") - % dict(file=config_file, - line=mark.line + 1, - column=mark.column + 1)) - else: - errmsg = (_("YAML error reading Definitions file " - "%(file)s") - % dict(file=config_file)) - LOG.error(errmsg) - raise - - else: - LOG.debug("No Definitions configuration file found!" - "Using default config.") - definition_cfg = defaults - - LOG.info(_LI("Definitions: %s"), definition_cfg) - return definition_cfg diff --git a/ceilometer/dispatcher/__init__.py b/ceilometer/dispatcher/__init__.py index bd8e42b8..d9cddb1f 100644 --- a/ceilometer/dispatcher/__init__.py +++ b/ceilometer/dispatcher/__init__.py @@ -16,26 +16,8 @@ import abc from oslo_config import cfg -from oslo_log import log import six -from stevedore import named -from ceilometer.i18n import _LW - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.MultiStrOpt('meter_dispatchers', - deprecated_name='dispatcher', - default=['database'], - help='Dispatchers to process metering data.'), - cfg.MultiStrOpt('event_dispatchers', - default=['database'], - deprecated_name='dispatcher', - help='Dispatchers to process event data.'), -] -cfg.CONF.register_opts(OPTS) STORAGE_OPTS = [ cfg.IntOpt('max_retries', @@ -51,59 +33,11 @@ STORAGE_OPTS = [ cfg.CONF.register_opts(STORAGE_OPTS, group='storage') -def _load_dispatcher_manager(dispatcher_type): - namespace = 'ceilometer.dispatcher.%s' % dispatcher_type - conf_name = '%s_dispatchers' % dispatcher_type - - LOG.debug('loading dispatchers from %s', namespace) - # set propagate_map_exceptions to True to enable stevedore - # to propagate exceptions. - dispatcher_manager = named.NamedExtensionManager( - namespace=namespace, - names=getattr(cfg.CONF, conf_name), - invoke_on_load=True, - invoke_args=[cfg.CONF], - propagate_map_exceptions=True) - if not list(dispatcher_manager): - LOG.warning(_LW('Failed to load any dispatchers for %s'), - namespace) - return dispatcher_manager - - -def load_dispatcher_manager(): - return (_load_dispatcher_manager('meter'), - _load_dispatcher_manager('event')) - - -class Base(object): +@six.add_metaclass(abc.ABCMeta) +class EventDispatcherBase(object): def __init__(self, conf): self.conf = conf - -@six.add_metaclass(abc.ABCMeta) -class MeterDispatcherBase(Base): - @abc.abstractmethod - def record_metering_data(self, data): - """Recording metering data interface.""" - - def verify_and_record_metering_data(self, datapoints): - """Verify metering data's signature and record valid ones.""" - if not isinstance(datapoints, list): - datapoints = [datapoints] - - valid_datapoints = [] - for datapoint in datapoints: - if utils.verify_signature(datapoint, - self.conf.publisher.telemetry_secret): - valid_datapoints.append(datapoint) - else: - LOG.warning(_LW('Message signature is invalid, discarding ' - 'it: <%r>.'), datapoint) - return self.record_metering_data(valid_datapoints) - - -@six.add_metaclass(abc.ABCMeta) -class EventDispatcherBase(Base): @abc.abstractmethod def record_events(self, events): """Record events.""" diff --git a/ceilometer/dispatcher/database.py b/ceilometer/dispatcher/database.py index 6d734d05..aa383a74 100644 --- a/ceilometer/dispatcher/database.py +++ b/ceilometer/dispatcher/database.py @@ -24,8 +24,7 @@ from ceilometer import storage LOG = log.getLogger(__name__) -class DatabaseDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): +class DatabaseDispatcher(dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class which records each meter into a database configured @@ -41,59 +40,7 @@ class DatabaseDispatcher(dispatcher.MeterDispatcherBase, def __init__(self, conf): super(DatabaseDispatcher, self).__init__(conf) - - self._meter_conn = self._get_db_conn('metering', True) - self._event_conn = self._get_db_conn('event', True) - - def _get_db_conn(self, purpose, ignore_exception=False): - try: - return storage.get_connection_from_config(self.conf, purpose) - except Exception as err: - params = {"purpose": purpose, "err": err} - LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " - "re-try later: %(err)s") % params) - if not ignore_exception: - raise - - @property - def meter_conn(self): - if not self._meter_conn: - self._meter_conn = self._get_db_conn('metering') - - return self._meter_conn - - @property - def event_conn(self): - if not self._event_conn: - self._event_conn = self._get_db_conn('event') - - return self._event_conn - - def record_metering_data(self, data): - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - - for meter in data: - LOG.debug( - 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', - {'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']}) - try: - # Convert the timestamp to a datetime instance. - # Storage engines are responsible for converting - # that value to something they can store. - if meter.get('timestamp'): - ts = timeutils.parse_isotime(meter['timestamp']) - meter['timestamp'] = timeutils.normalize_time(ts) - self.meter_conn.record_metering_data(meter) - except Exception as err: - LOG.error(_LE('Failed to record metering data: %s.'), err) - # raise the exception to propagate it up in the chain. - raise + self.event_conn = storage.get_connection_from_config(self.conf) def record_events(self, events): if not isinstance(events, list): diff --git a/ceilometer/dispatcher/file.py b/ceilometer/dispatcher/file.py deleted file mode 100644 index a4da54d0..00000000 --- a/ceilometer/dispatcher/file.py +++ /dev/null @@ -1,85 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers - -from oslo_config import cfg - -from ceilometer import dispatcher - -OPTS = [ - cfg.StrOpt('file_path', - help='Name and the location of the file to record ' - 'meters.'), - cfg.IntOpt('max_bytes', - default=0, - help='The max size of the file.'), - cfg.IntOpt('backup_count', - default=0, - help='The max number of the files to keep.'), -] - -cfg.CONF.register_opts(OPTS, group="dispatcher_file") - - -class FileDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - """Dispatcher class for recording metering data to a file. - - The dispatcher class which logs each meter and/or event into a file - configured in ceilometer configuration file. An example configuration may - look like the following: - - [dispatcher_file] - file_path = /tmp/meters - - To enable this dispatcher, the following section needs to be present in - ceilometer.conf file - - [DEFAULT] - meter_dispatchers = file - event_dispatchers = file - """ - - def __init__(self, conf): - super(FileDispatcher, self).__init__(conf) - self.log = None - - # if the directory and path are configured, then log to the file - if self.conf.dispatcher_file.file_path: - dispatcher_logger = logging.Logger('dispatcher.file') - dispatcher_logger.setLevel(logging.INFO) - # create rotating file handler which logs meters - rfh = logging.handlers.RotatingFileHandler( - self.conf.dispatcher_file.file_path, - maxBytes=self.conf.dispatcher_file.max_bytes, - backupCount=self.conf.dispatcher_file.backup_count, - encoding='utf8') - - rfh.setLevel(logging.INFO) - # Only wanted the meters to be saved in the file, not the - # project root logger. - dispatcher_logger.propagate = False - dispatcher_logger.addHandler(rfh) - self.log = dispatcher_logger - - def record_metering_data(self, data): - if self.log: - self.log.info(data) - - def record_events(self, events): - if self.log: - self.log.info(events) diff --git a/ceilometer/dispatcher/gnocchi.py b/ceilometer/dispatcher/gnocchi.py deleted file mode 100644 index 4e2b011b..00000000 --- a/ceilometer/dispatcher/gnocchi.py +++ /dev/null @@ -1,469 +0,0 @@ -# -# Copyright 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import defaultdict -from hashlib import md5 -import itertools -import operator -import re -import threading -import uuid - -from gnocchiclient import client -from gnocchiclient import exceptions as gnocchi_exc -from gnocchiclient import utils as gnocchi_utils -from keystoneauth1 import exceptions as ka_exceptions -from keystoneauth1 import session as ka_session -from oslo_config import cfg -from oslo_log import log -from oslo_utils import fnmatch -import requests -import retrying -import six -from stevedore import extension - -from ceilometer import declarative -from ceilometer import dispatcher -from ceilometer.i18n import _, _LE, _LW -from ceilometer import keystone_client - -NAME_ENCODED = __name__.encode('utf-8') -CACHE_NAMESPACE = uuid.UUID(bytes=md5(NAME_ENCODED).digest()) -LOG = log.getLogger(__name__) - -dispatcher_opts = [ - cfg.BoolOpt('filter_service_activity', - default=True, - help='Filter out samples generated by Gnocchi ' - 'service activity'), - cfg.StrOpt('filter_project', - default='gnocchi', - help='Gnocchi project used to filter out samples ' - 'generated by Gnocchi service activity'), - cfg.StrOpt('url', - deprecated_for_removal=True, - help='URL to Gnocchi. default: autodetection'), - cfg.StrOpt('archive_policy', - help='The archive policy to use when the dispatcher ' - 'create a new metric.'), - cfg.StrOpt('resources_definition_file', - default='gnocchi_resources.yaml', - help=_('The Yaml file that defines mapping between samples ' - 'and gnocchi resources/metrics')), -] - -cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi") - - -def cache_key_mangler(key): - """Construct an opaque cache key.""" - if six.PY2: - key = key.encode('utf-8') - return uuid.uuid5(CACHE_NAMESPACE, key).hex - - -class ResourcesDefinition(object): - - MANDATORY_FIELDS = {'resource_type': six.string_types, - 'metrics': list} - - def __init__(self, definition_cfg, default_archive_policy, plugin_manager): - self._default_archive_policy = default_archive_policy - self.cfg = definition_cfg - - for field, field_type in self.MANDATORY_FIELDS.items(): - if field not in self.cfg: - raise declarative.ResourceDefinitionException( - _LE("Required field %s not specified") % field, self.cfg) - if not isinstance(self.cfg[field], field_type): - raise declarative.ResourceDefinitionException( - _LE("Required field %(field)s should be a %(type)s") % - {'field': field, 'type': field_type}, self.cfg) - - self._attributes = {} - for name, attr_cfg in self.cfg.get('attributes', {}).items(): - self._attributes[name] = declarative.Definition(name, attr_cfg, - plugin_manager) - - self.metrics = {} - for t in self.cfg['metrics']: - archive_policy = self.cfg.get('archive_policy', - self._default_archive_policy) - if archive_policy is None: - self.metrics[t] = {} - else: - self.metrics[t] = dict(archive_policy_name=archive_policy) - - def match(self, metric_name): - for t in self.cfg['metrics']: - if fnmatch.fnmatch(metric_name, t): - return True - return False - - def attributes(self, sample): - attrs = {} - for name, definition in self._attributes.items(): - value = definition.parse(sample) - if value is not None: - attrs[name] = value - return attrs - - -def get_gnocchiclient(conf): - requests_session = requests.session() - for scheme in requests_session.adapters.keys(): - requests_session.mount(scheme, ka_session.TCPKeepAliveAdapter( - pool_block=True)) - - session = keystone_client.get_session(requests_session=requests_session) - return client.Client('1', session, - interface=conf.service_credentials.interface, - region_name=conf.service_credentials.region_name, - endpoint_override=conf.dispatcher_gnocchi.url) - - -class LockedDefaultDict(defaultdict): - """defaultdict with lock to handle threading - - Dictionary only deletes if nothing is accessing dict and nothing is holding - lock to be deleted. If both cases are not true, it will skip delete. - """ - def __init__(self, *args, **kwargs): - self.lock = threading.Lock() - super(LockedDefaultDict, self).__init__(*args, **kwargs) - - def __getitem__(self, key): - with self.lock: - return super(LockedDefaultDict, self).__getitem__(key) - - def pop(self, key, *args): - with self.lock: - key_lock = super(LockedDefaultDict, self).__getitem__(key) - if key_lock.acquire(False): - try: - super(LockedDefaultDict, self).pop(key, *args) - finally: - key_lock.release() - - -class GnocchiDispatcher(dispatcher.MeterDispatcherBase): - """Dispatcher class for recording metering data into database. - - The dispatcher class records each meter into the gnocchi service - configured in ceilometer configuration file. An example configuration may - look like the following: - - [dispatcher_gnocchi] - url = http://localhost:8041 - archive_policy = low - - To enable this dispatcher, the following section needs to be present in - ceilometer.conf file - - [DEFAULT] - meter_dispatchers = gnocchi - """ - def __init__(self, conf): - super(GnocchiDispatcher, self).__init__(conf) - self.conf = conf - self.filter_service_activity = ( - conf.dispatcher_gnocchi.filter_service_activity) - self._ks_client = keystone_client.get_client() - self.resources_definition = self._load_resources_definitions(conf) - - self.cache = None - try: - import oslo_cache - oslo_cache.configure(self.conf) - # NOTE(cdent): The default cache backend is a real but - # noop backend. We don't want to use that here because - # we want to avoid the cache pathways entirely if the - # cache has not been configured explicitly. - if 'null' not in self.conf.cache.backend: - cache_region = oslo_cache.create_region() - self.cache = oslo_cache.configure_cache_region( - self.conf, cache_region) - self.cache.key_mangler = cache_key_mangler - except ImportError: - pass - except oslo_cache.exception.ConfigurationError as exc: - LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) - - self._gnocchi_project_id = None - self._gnocchi_project_id_lock = threading.Lock() - self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) - - self._gnocchi = get_gnocchiclient(conf) - # Convert retry_interval secs to msecs for retry decorator - retries = conf.storage.max_retries - - @retrying.retry(wait_fixed=conf.storage.retry_interval * 1000, - stop_max_attempt_number=(retries if retries >= 0 - else None)) - def _get_connection(): - self._gnocchi.capabilities.list() - - try: - _get_connection() - except Exception: - LOG.error(_LE('Failed to connect to Gnocchi.')) - raise - - @classmethod - def _load_resources_definitions(cls, conf): - plugin_manager = extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin') - data = declarative.load_definitions( - {}, conf.dispatcher_gnocchi.resources_definition_file) - resource_defs = [] - for resource in data.get('resources', []): - try: - resource_defs.append(ResourcesDefinition( - resource, - conf.dispatcher_gnocchi.archive_policy, plugin_manager)) - except Exception as exc: - LOG.error(_LE("Failed to load resource due to error %s") % - exc) - return resource_defs - - @property - def gnocchi_project_id(self): - if self._gnocchi_project_id is not None: - return self._gnocchi_project_id - with self._gnocchi_project_id_lock: - if self._gnocchi_project_id is None: - try: - project = self._ks_client.projects.find( - name=self.conf.dispatcher_gnocchi.filter_project) - except ka_exceptions.NotFound: - LOG.warning(_LW('gnocchi project not found in keystone,' - ' ignoring the filter_service_activity ' - 'option')) - self.filter_service_activity = False - return None - except Exception: - LOG.exception('fail to retrieve user of Gnocchi service') - raise - self._gnocchi_project_id = project.id - LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) - return self._gnocchi_project_id - - def _is_swift_account_sample(self, sample): - return bool([rd for rd in self.resources_definition - if rd.cfg['resource_type'] == 'swift_account' - and rd.match(sample['counter_name'])]) - - def _is_gnocchi_activity(self, sample): - return (self.filter_service_activity and self.gnocchi_project_id and ( - # avoid anything from the user used by gnocchi - sample['project_id'] == self.gnocchi_project_id or - # avoid anything in the swift account used by gnocchi - (sample['resource_id'] == self.gnocchi_project_id and - self._is_swift_account_sample(sample)) - )) - - def _get_resource_definition(self, metric_name): - for rd in self.resources_definition: - if rd.match(metric_name): - return rd - - def record_metering_data(self, data): - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - # NOTE(sileht): skip sample generated by gnocchi itself - data = [s for s in data if not self._is_gnocchi_activity(s)] - - # FIXME(sileht): This method bulk the processing of samples - # grouped by resource_id and metric_name but this is not - # efficient yet because the data received here doesn't often - # contains a lot of different kind of samples - # So perhaps the next step will be to pool the received data from - # message bus. - data.sort(key=lambda s: (s['resource_id'], s['counter_name'])) - - resource_grouped_samples = itertools.groupby( - data, key=operator.itemgetter('resource_id')) - - gnocchi_data = {} - measures = {} - stats = dict(measures=0, resources=0, metrics=0) - for resource_id, samples_of_resource in resource_grouped_samples: - stats['resources'] += 1 - metric_grouped_samples = itertools.groupby( - list(samples_of_resource), - key=operator.itemgetter('counter_name')) - - # NOTE(sileht): We convert resource id to Gnocchi format - # because batch_resources_metrics_measures exception - # returns this id and not the ceilometer one - gnocchi_id = gnocchi_utils.encode_resource_id(resource_id) - res_info = gnocchi_data[gnocchi_id] = {} - for metric_name, samples in metric_grouped_samples: - stats['metrics'] += 1 - - samples = list(samples) - rd = self._get_resource_definition(metric_name) - if rd is None: - LOG.warning(_LW("metric %s is not handled by Gnocchi") % - metric_name) - continue - if rd.cfg.get("ignore"): - continue - - res_info['resource_type'] = rd.cfg['resource_type'] - res_info.setdefault("resource", {}).update({ - "id": resource_id, - "user_id": samples[0]['user_id'], - "project_id": samples[0]['project_id'], - "metrics": rd.metrics, - }) - - for sample in samples: - res_info.setdefault("resource_extra", {}).update( - rd.attributes(sample)) - m = measures.setdefault(gnocchi_id, {}).setdefault( - metric_name, []) - m.append({'timestamp': sample['timestamp'], - 'value': sample['counter_volume']}) - unit = sample['counter_unit'] - metric = sample['counter_name'] - res_info['resource']['metrics'][metric]['unit'] = unit - - stats['measures'] += len(measures[gnocchi_id][metric_name]) - res_info["resource"].update(res_info["resource_extra"]) - - try: - self.batch_measures(measures, gnocchi_data, stats) - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - except Exception as e: - LOG.error(six.text_type(e), exc_info=True) - - for gnocchi_id, info in gnocchi_data.items(): - resource = info["resource"] - resource_type = info["resource_type"] - resource_extra = info["resource_extra"] - if not resource_extra: - continue - try: - self._if_not_cached("update", resource_type, resource, - self._update_resource, resource_extra) - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - except Exception as e: - LOG.error(six.text_type(e), exc_info=True) - - RE_UNKNOW_METRICS = re.compile("Unknown metrics: (.*) \(HTTP 400\)") - RE_UNKNOW_METRICS_LIST = re.compile("([^/ ,]*)/([^,]*)") - - def batch_measures(self, measures, resource_infos, stats): - # NOTE(sileht): We don't care about error here, we want - # resources metadata always been updated - try: - self._gnocchi.metric.batch_resources_metrics_measures(measures) - except gnocchi_exc.BadRequest as e: - m = self.RE_UNKNOW_METRICS.match(six.text_type(e)) - if m is None: - raise - - # NOTE(sileht): Create all missing resources and metrics - metric_list = self.RE_UNKNOW_METRICS_LIST.findall(m.group(1)) - gnocchi_ids_freshly_handled = set() - for gnocchi_id, metric_name in metric_list: - if gnocchi_id in gnocchi_ids_freshly_handled: - continue - resource = resource_infos[gnocchi_id]['resource'] - resource_type = resource_infos[gnocchi_id]['resource_type'] - try: - self._if_not_cached("create", resource_type, resource, - self._create_resource) - except gnocchi_exc.ResourceAlreadyExists: - metric = {'resource_id': resource['id'], - 'name': metric_name} - metric.update(resource["metrics"][metric_name]) - try: - self._gnocchi.metric.create(metric) - except gnocchi_exc.NamedMetricAlreadyExists: - # NOTE(sileht): metric created in the meantime - pass - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - # We cannot post measures for this metric - del measures[gnocchi_id][metric_name] - if not measures[gnocchi_id]: - del measures[gnocchi_id] - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - # We cannot post measures for this resource - del measures[gnocchi_id] - gnocchi_ids_freshly_handled.add(gnocchi_id) - else: - gnocchi_ids_freshly_handled.add(gnocchi_id) - - # NOTE(sileht): we have created missing resources/metrics, - # now retry to post measures - self._gnocchi.metric.batch_resources_metrics_measures(measures) - - # FIXME(sileht): take care of measures removed in stats - LOG.debug("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", stats) - - def _create_resource(self, resource_type, resource): - self._gnocchi.resource.create(resource_type, resource) - LOG.debug('Resource %s created', resource["id"]) - - def _update_resource(self, resource_type, resource, resource_extra): - self._gnocchi.resource.update(resource_type, - resource["id"], - resource_extra) - LOG.debug('Resource %s updated', resource["id"]) - - def _if_not_cached(self, operation, resource_type, resource, method, - *args, **kwargs): - if self.cache: - cache_key = resource['id'] - attribute_hash = self._check_resource_cache(cache_key, resource) - hit = False - if attribute_hash: - with self._gnocchi_resource_lock[cache_key]: - # NOTE(luogangyi): there is a possibility that the - # resource was already built in cache by another - # ceilometer-collector when we get the lock here. - attribute_hash = self._check_resource_cache(cache_key, - resource) - if attribute_hash: - method(resource_type, resource, *args, **kwargs) - self.cache.set(cache_key, attribute_hash) - else: - hit = True - LOG.debug('resource cache recheck hit for ' - '%s %s', operation, cache_key) - self._gnocchi_resource_lock.pop(cache_key, None) - else: - hit = True - LOG.debug('Resource cache hit for %s %s', operation, cache_key) - if hit and operation == "create": - raise gnocchi_exc.ResourceAlreadyExists() - else: - method(resource_type, resource, *args, **kwargs) - - def _check_resource_cache(self, key, resource_data): - cached_hash = self.cache.get(key) - attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics", - resource_data.items()))) - if not cached_hash or cached_hash != attribute_hash: - return attribute_hash - else: - return None diff --git a/ceilometer/dispatcher/http.py b/ceilometer/dispatcher/http.py deleted file mode 100644 index b69c473d..00000000 --- a/ceilometer/dispatcher/http.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from oslo_config import cfg -from oslo_log import log -import requests - -from ceilometer import dispatcher -from ceilometer.i18n import _LE - -LOG = log.getLogger(__name__) - -http_dispatcher_opts = [ - cfg.StrOpt('target', - default='', - help='The target where the http request will be sent. ' - 'If this is not set, no data will be posted. For ' - 'example: target = http://hostname:1234/path'), - cfg.StrOpt('event_target', - help='The target for event data where the http request ' - 'will be sent to. If this is not set, it will default ' - 'to same as Sample target.'), - cfg.IntOpt('timeout', - default=5, - help='The max time in seconds to wait for a request to ' - 'timeout.'), -] - -cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http") - - -class HttpDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - """Dispatcher class for posting metering/event data into a http target. - - To enable this dispatcher, the following option needs to be present in - ceilometer.conf file:: - - [DEFAULT] - meter_dispatchers = http - event_dispatchers = http - - Dispatcher specific options can be added as follows:: - - [dispatcher_http] - target = www.example.com - event_target = www.example.com - timeout = 2 - """ - - def __init__(self, conf): - super(HttpDispatcher, self).__init__(conf) - self.headers = {'Content-type': 'application/json'} - self.timeout = self.conf.dispatcher_http.timeout - self.target = self.conf.dispatcher_http.target - self.event_target = (self.conf.dispatcher_http.event_target or - self.target) - - def record_metering_data(self, data): - if self.target == '': - # if the target was not set, do not do anything - LOG.error(_LE('Dispatcher target was not set, no meter will ' - 'be posted. Set the target in the ceilometer.conf ' - 'file.')) - return - - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - - for meter in data: - LOG.debug( - 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', - {'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']}) - try: - # Every meter should be posted to the target - res = requests.post(self.target, - data=json.dumps(meter), - headers=self.headers, - timeout=self.timeout) - LOG.debug('Message posting finished with status code ' - '%d.', res.status_code) - except Exception as err: - LOG.exception(_LE('Failed to record metering data: %s.'), err) - - def record_events(self, events): - if not isinstance(events, list): - events = [events] - - for event in events: - res = None - try: - res = requests.post(self.event_target, data=event, - headers=self.headers, - timeout=self.timeout) - res.raise_for_status() - except Exception: - error_code = res.status_code if res else 'unknown' - LOG.exception(_LE('Status Code: %{code}s. Failed to' - 'dispatch event: %{event}s'), - {'code': error_code, 'event': event}) diff --git a/ceilometer/energy/__init__.py b/ceilometer/energy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/energy/kwapi.py b/ceilometer/energy/kwapi.py deleted file mode 100644 index 9bb8caa8..00000000 --- a/ceilometer/energy/kwapi.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import requests -import six - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - - -LOG = log.getLogger(__name__) - -SERVICE_OPTS = [ - cfg.StrOpt('kwapi', - default='energy', - help='Kwapi service type.'), -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') - - -class KwapiClient(object): - """Kwapi API client.""" - - def __init__(self, url, token=None): - """Initializes client.""" - self.url = url - self.token = token - - def iter_probes(self): - """Returns a list of dicts describing all probes.""" - probes_url = self.url + '/probes/' - headers = {} - if self.token is not None: - headers = {'X-Auth-Token': self.token} - timeout = cfg.CONF.http_timeout - request = requests.get(probes_url, headers=headers, timeout=timeout) - message = request.json() - probes = message['probes'] - for key, value in six.iteritems(probes): - probe_dict = value - probe_dict['id'] = key - yield probe_dict - - -class _Base(plugin_base.PollsterBase): - """Base class for the Kwapi pollster, derived from PollsterBase.""" - - @property - def default_discovery(self): - return 'endpoint:%s' % cfg.CONF.service_types.kwapi - - @staticmethod - def get_kwapi_client(ksclient, endpoint): - """Returns a KwapiClient configured with the proper url and token.""" - return KwapiClient(endpoint, keystone_client.get_auth_token(ksclient)) - - CACHE_KEY_PROBE = 'kwapi.probes' - - def _iter_probes(self, ksclient, cache, endpoint): - """Iterate over all probes.""" - key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE) - if key not in cache: - cache[key] = self._get_probes(ksclient, endpoint) - return iter(cache[key]) - - def _get_probes(self, ksclient, endpoint): - try: - client = self.get_kwapi_client(ksclient, endpoint) - except exceptions.EndpointNotFound: - LOG.debug("Kwapi endpoint not found") - return [] - return list(client.iter_probes()) - - -class EnergyPollster(_Base): - """Measures energy consumption.""" - def get_samples(self, manager, cache, resources): - """Returns all samples.""" - for endpoint in resources: - for probe in self._iter_probes(manager.keystone, cache, endpoint): - yield sample.Sample( - name='energy', - type=sample.TYPE_CUMULATIVE, - unit='kWh', - volume=probe['kwh'], - user_id=None, - project_id=None, - resource_id=probe['id'], - resource_metadata={} - ) - - -class PowerPollster(_Base): - """Measures power consumption.""" - def get_samples(self, manager, cache, resources): - """Returns all samples.""" - for endpoint in resources: - for probe in self._iter_probes(manager.keystone, cache, endpoint): - yield sample.Sample( - name='power', - type=sample.TYPE_GAUGE, - unit='W', - volume=probe['w'], - user_id=None, - project_id=None, - resource_id=probe['id'], - resource_metadata={} - ) diff --git a/ceilometer/event/converter.py b/ceilometer/event/converter.py deleted file mode 100644 index 6806909c..00000000 --- a/ceilometer/event/converter.py +++ /dev/null @@ -1,294 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import fnmatch -from oslo_utils import timeutils -import six - -from ceilometer import declarative -from ceilometer.event.storage import models -from ceilometer.i18n import _ - -OPTS = [ - cfg.StrOpt('definitions_cfg_file', - default="event_definitions.yaml", - help="Configuration file for event definitions." - ), - cfg.BoolOpt('drop_unmatched_notifications', - default=False, - help='Drop notifications if no event definition matches. ' - '(Otherwise, we convert them with just the default traits)'), - cfg.MultiStrOpt('store_raw', - default=[], - help='Store the raw notification for select priority ' - 'levels (info and/or error). By default, raw details are ' - 'not captured.') -] - -cfg.CONF.register_opts(OPTS, group='event') - -LOG = log.getLogger(__name__) - - -class TraitDefinition(declarative.Definition): - def __init__(self, name, trait_cfg, plugin_manager): - super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager) - type_name = (trait_cfg.get('type', 'text') - if isinstance(trait_cfg, dict) else 'text') - self.trait_type = models.Trait.get_type_by_name(type_name) - if self.trait_type is None: - raise declarative.EventDefinitionException( - _("Invalid trait type '%(type)s' for trait %(trait)s") - % dict(type=type_name, trait=name), self.cfg) - - def to_trait(self, notification_body): - value = self.parse(notification_body) - if value is None: - return None - - # NOTE(mdragon): some openstack projects (mostly Nova) emit '' - # for null fields for things like dates. - if self.trait_type != models.Trait.TEXT_TYPE and value == '': - return None - - value = models.Trait.convert_value(self.trait_type, value) - return models.Trait(self.name, self.trait_type, value) - - -class EventDefinition(object): - - DEFAULT_TRAITS = dict( - service=dict(type='text', fields='publisher_id'), - request_id=dict(type='text', fields='_context_request_id'), - project_id=dict(type='text', fields=['payload.tenant_id', - '_context_tenant']), - user_id=dict(type='text', fields=['payload.user_id', - '_context_user_id']), - # TODO(dikonoor):tenant_id is old terminology and should - # be deprecated - tenant_id=dict(type='text', fields=['payload.tenant_id', - '_context_tenant']), - ) - - def __init__(self, definition_cfg, trait_plugin_mgr): - self._included_types = [] - self._excluded_types = [] - self.traits = dict() - self.cfg = definition_cfg - self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw] - - try: - event_type = definition_cfg['event_type'] - traits = definition_cfg['traits'] - except KeyError as err: - raise declarative.EventDefinitionException( - _("Required field %s not specified") % err.args[0], self.cfg) - - if isinstance(event_type, six.string_types): - event_type = [event_type] - - for t in event_type: - if t.startswith('!'): - self._excluded_types.append(t[1:]) - else: - self._included_types.append(t) - - if self._excluded_types and not self._included_types: - self._included_types.append('*') - - for trait_name in self.DEFAULT_TRAITS: - self.traits[trait_name] = TraitDefinition( - trait_name, - self.DEFAULT_TRAITS[trait_name], - trait_plugin_mgr) - for trait_name in traits: - self.traits[trait_name] = TraitDefinition( - trait_name, - traits[trait_name], - trait_plugin_mgr) - - def included_type(self, event_type): - for t in self._included_types: - if fnmatch.fnmatch(event_type, t): - return True - return False - - def excluded_type(self, event_type): - for t in self._excluded_types: - if fnmatch.fnmatch(event_type, t): - return True - return False - - def match_type(self, event_type): - return (self.included_type(event_type) - and not self.excluded_type(event_type)) - - @property - def is_catchall(self): - return '*' in self._included_types and not self._excluded_types - - @staticmethod - def _extract_when(body): - """Extract the generated datetime from the notification.""" - # NOTE: I am keeping the logic the same as it was in the collector, - # However, *ALL* notifications should have a 'timestamp' field, it's - # part of the notification envelope spec. If this was put here because - # some openstack project is generating notifications without a - # timestamp, then that needs to be filed as a bug with the offending - # project (mdragon) - when = body.get('timestamp', body.get('_context_timestamp')) - if when: - return timeutils.normalize_time(timeutils.parse_isotime(when)) - - return timeutils.utcnow() - - def to_event(self, notification_body): - event_type = notification_body['event_type'] - message_id = notification_body['message_id'] - when = self._extract_when(notification_body) - - traits = (self.traits[t].to_trait(notification_body) - for t in self.traits) - # Only accept non-None value traits ... - traits = [trait for trait in traits if trait is not None] - raw = (notification_body - if notification_body.get('priority') in self.raw_levels else {}) - event = models.Event(message_id, event_type, when, traits, raw) - return event - - -class NotificationEventsConverter(object): - """Notification Event Converter - - The NotificationEventsConverter handles the conversion of Notifications - from openstack systems into Ceilometer Events. - - The conversion is handled according to event definitions in a config file. - - The config is a list of event definitions. Order is significant, a - notification will be processed according to the LAST definition that - matches it's event_type. (We use the last matching definition because that - allows you to use YAML merge syntax in the definitions file.) - Each definition is a dictionary with the following keys (all are - required): - - - event_type: this is a list of notification event_types this definition - will handle. These can be wildcarded with unix shell glob (not regex!) - wildcards. - An exclusion listing (starting with a '!') will exclude any types listed - from matching. If ONLY exclusions are listed, the definition will match - anything not matching the exclusions. - This item can also be a string, which will be taken as equivalent to 1 - item list. - - Examples: - - * ['compute.instance.exists'] will only match - compute.instance.exists notifications - * "compute.instance.exists" Same as above. - * ["image.create", "image.delete"] will match - image.create and image.delete, but not anything else. - * "compute.instance.*" will match - compute.instance.create.start but not image.upload - * ['*.start','*.end', '!scheduler.*'] will match - compute.instance.create.start, and image.delete.end, - but NOT compute.instance.exists or - scheduler.run_instance.start - * '!image.*' matches any notification except image - notifications. - * ['*', '!image.*'] same as above. - - - traits: (dict) The keys are trait names, the values are the trait - definitions. Each trait definition is a dictionary with the following - keys: - - - type (optional): The data type for this trait. (as a string) - Valid options are: 'text', 'int', 'float' and 'datetime', defaults to - 'text' if not specified. - - fields: a path specification for the field(s) in the notification you - wish to extract. The paths can be specified with a dot syntax - (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is - also supported. - In either case, if the key for the field you are looking for contains - special characters, like '.', it will need to be quoted (with double - or single quotes) like so:: - - "payload.image_meta.'org.openstack__1__architecture'" - - The syntax used for the field specification is a variant of JSONPath, - and is fairly flexible. - (see: https://github.com/kennknowles/python-jsonpath-rw for more info) - Specifications can be written to match multiple possible fields, the - value for the trait will be derived from the matching fields that - exist and have a non-null (i.e. is not None) values in the - notification. - By default the value will be the first such field. (plugins can alter - that, if they wish) - - This configuration value is normally a string, for convenience, it can - be specified as a list of specifications, which will be OR'ed together - (a union query in jsonpath terms) - - plugin (optional): (dictionary) with the following keys: - - - name: (string) name of a plugin to load - - parameters: (optional) Dictionary of keyword args to pass - to the plugin on initialization. See documentation on each plugin to - see what arguments it accepts. - - For convenience, this value can also be specified as a string, which is - interpreted as a plugin name, which will be loaded with no parameters. - - """ - - def __init__(self, events_config, trait_plugin_mgr, add_catchall=True): - self.definitions = [ - EventDefinition(event_def, trait_plugin_mgr) - for event_def in reversed(events_config)] - if add_catchall and not any(d.is_catchall for d in self.definitions): - event_def = dict(event_type='*', traits={}) - self.definitions.append(EventDefinition(event_def, - trait_plugin_mgr)) - - def to_event(self, notification_body): - event_type = notification_body['event_type'] - message_id = notification_body['message_id'] - edef = None - for d in self.definitions: - if d.match_type(event_type): - edef = d - break - - if edef is None: - msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') - % dict(type=event_type, msgid=message_id)) - if cfg.CONF.event.drop_unmatched_notifications: - LOG.debug(msg) - else: - # If drop_unmatched_notifications is False, this should - # never happen. (mdragon) - LOG.error(msg) - return None - - return edef.to_event(notification_body) - - -def setup_events(trait_plugin_mgr): - """Setup the event definitions from yaml config file.""" - return NotificationEventsConverter( - declarative.load_definitions([], cfg.CONF.event.definitions_cfg_file), - trait_plugin_mgr, - add_catchall=not cfg.CONF.event.drop_unmatched_notifications) diff --git a/ceilometer/event/endpoint.py b/ceilometer/event/endpoint.py deleted file mode 100644 index bd78bc91..00000000 --- a/ceilometer/event/endpoint.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2012-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from stevedore import extension - -from ceilometer.event import converter as event_converter -from ceilometer.i18n import _LE -from ceilometer import messaging - -LOG = log.getLogger(__name__) - - -class EventsNotificationEndpoint(object): - def __init__(self, manager): - super(EventsNotificationEndpoint, self).__init__() - LOG.debug('Loading event definitions') - self.event_converter = event_converter.setup_events( - extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin')) - self.manager = manager - - def info(self, notifications): - """Convert message at info level to Ceilometer Event. - - :param notifications: list of notifications - """ - return self.process_notification('info', notifications) - - def error(self, notifications): - """Convert message at error level to Ceilometer Event. - - :param notifications: list of notifications - """ - return self.process_notification('error', notifications) - - def process_notification(self, priority, notifications): - for notification in notifications: - # NOTE: the rpc layer currently rips out the notification - # delivery_info, which is critical to determining the - # source of the notification. This will have to get added back - # later. - notification = messaging.convert_to_old_notification_format( - priority, notification) - try: - event = self.event_converter.to_event(notification) - if event is not None: - with self.manager.publisher() as p: - p(event) - except Exception: - if not cfg.CONF.notification.ack_on_event_error: - return oslo_messaging.NotificationResult.REQUEUE - LOG.error(_LE('Fail to process a notification'), exc_info=True) - return oslo_messaging.NotificationResult.HANDLED diff --git a/ceilometer/event/storage/impl_mongodb.py b/ceilometer/event/storage/impl_mongodb.py index 892ce4bf..11a7edaa 100644 --- a/ceilometer/event/storage/impl_mongodb.py +++ b/ceilometer/event/storage/impl_mongodb.py @@ -18,7 +18,6 @@ import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage -from ceilometer.storage import impl_mongodb from ceilometer.storage.mongo import utils as pymongo_utils LOG = log.getLogger(__name__) @@ -52,6 +51,31 @@ class Connection(pymongo_base.Connection): # needed. self.upgrade() + @staticmethod + def update_ttl(ttl, ttl_index_name, index_field, coll): + """Update or create time_to_live indexes. + + :param ttl: time to live in seconds. + :param ttl_index_name: name of the index we want to update or create. + :param index_field: field with the index that we need to update. + :param coll: collection which indexes need to be updated. + """ + indexes = coll.index_information() + if ttl <= 0: + if ttl_index_name in indexes: + coll.drop_index(ttl_index_name) + return + + if ttl_index_name in indexes: + return coll.database.command( + 'collMod', coll.name, + index={'keyPattern': {index_field: pymongo.ASCENDING}, + 'expireAfterSeconds': ttl}) + + coll.create_index([(index_field, pymongo.ASCENDING)], + expireAfterSeconds=ttl, + name=ttl_index_name) + def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): @@ -65,8 +89,7 @@ class Connection(pymongo_base.Connection): name='event_type_idx' ) ttl = cfg.CONF.database.event_time_to_live - impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp', - self.db.event) + self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event) def clear(self): self.conn.drop_database(self.db.name) diff --git a/ceilometer/event/storage/impl_sqlalchemy.py b/ceilometer/event/storage/impl_sqlalchemy.py index b53e0f18..0326c1c6 100644 --- a/ceilometer/event/storage/impl_sqlalchemy.py +++ b/ceilometer/event/storage/impl_sqlalchemy.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import datetime -import os from oslo_config import cfg from oslo_db import exception as dbexc @@ -137,12 +136,8 @@ class Connection(base.Connection): self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): - # NOTE(gordc): to minimise memory, only import migration when needed - from oslo_db.sqlalchemy import migration - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - '..', '..', 'storage', 'sqlalchemy', - 'migrate_repo') - migration.db_sync(self._engine_facade.get_engine(), path) + engine = self._engine_facade.get_engine() + models.Base.metadata.create_all(engine) def clear(self): engine = self._engine_facade.get_engine() diff --git a/ceilometer/event/trait_plugins.py b/ceilometer/event/trait_plugins.py deleted file mode 100644 index a4b5fa1d..00000000 --- a/ceilometer/event/trait_plugins.py +++ /dev/null @@ -1,230 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from debtcollector import moves -from oslo_log import log -from oslo_utils import timeutils -import six - -from ceilometer.i18n import _LW - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class TraitPluginBase(object): - """Base class for plugins. - - It converts notification fields to Trait values. - """ - - support_return_all_values = False - """If True, an exception will be raised if the user expect - the plugin to return one trait per match_list, but - the plugin doesn't allow/support that. - """ - - def __init__(self, **kw): - """Setup the trait plugin. - - For each Trait definition a plugin is used on in a conversion - definition, a new instance of the plugin will be created, and - initialized with the parameters (if any) specified in the - config file. - - :param kw: the parameters specified in the event definitions file. - - """ - super(TraitPluginBase, self).__init__() - - @moves.moved_method('trait_values', version=6.0, removal_version="?") - def trait_value(self, match_list): - pass - - def trait_values(self, match_list): - """Convert a set of fields to one or multiple Trait values. - - This method is called each time a trait is attempted to be extracted - from a notification. It will be called *even if* no matching fields - are found in the notification (in that case, the match_list will be - empty). If this method returns None, the trait *will not* be added to - the event. Any other value returned by this method will be used as - the value for the trait. Values returned will be coerced to the - appropriate type for the trait. - - :param match_list: A list (may be empty if no matches) of *tuples*. - Each tuple is (field_path, value) where field_path is the jsonpath - for that specific field. - - Example:: - - trait's fields definition: ['payload.foobar', - 'payload.baz', - 'payload.thing.*'] - notification body: - { - 'message_id': '12345', - 'publisher': 'someservice.host', - 'payload': { - 'foobar': 'test', - 'thing': { - 'bar': 12, - 'boing': 13, - } - } - } - match_list will be: [('payload.foobar','test'), - ('payload.thing.bar',12), - ('payload.thing.boing',13)] - - Here is a plugin that emulates the default (no plugin) behavior: - - .. code-block:: python - - class DefaultPlugin(TraitPluginBase): - "Plugin that returns the first field value." - - def __init__(self, **kw): - super(DefaultPlugin, self).__init__() - - def trait_value(self, match_list): - if not match_list: - return None - return [ match[1] for match in match_list] - """ - - # For backwards compatibility for the renamed method. - return [self.trait_value(match_list)] - - -class SplitterTraitPlugin(TraitPluginBase): - """Plugin that splits a piece off of a string value.""" - - support_return_all_values = True - - def __init__(self, separator=".", segment=0, max_split=None, **kw): - """Setup how do split the field. - - :param separator: String to split on. default "." - :param segment: Which segment to return. (int) default 0 - :param max_split: Limit number of splits. Default: None (no limit) - """ - LOG.warning(_LW('split plugin is deprecated, ' - 'add ".`split(%(sep)s, %(segment)d, ' - '%(max_split)d)`" to your jsonpath instead') % - dict(sep=separator, - segment=segment, - max_split=(-1 if max_split is None - else max_split))) - - self.separator = separator - self.segment = segment - self.max_split = max_split - super(SplitterTraitPlugin, self).__init__(**kw) - - def trait_values(self, match_list): - return [self._trait_value(match) - for match in match_list] - - def _trait_value(self, match): - value = six.text_type(match[1]) - if self.max_split is not None: - values = value.split(self.separator, self.max_split) - else: - values = value.split(self.separator) - try: - return values[self.segment] - except IndexError: - return None - - -class BitfieldTraitPlugin(TraitPluginBase): - """Plugin to set flags on a bitfield.""" - def __init__(self, initial_bitfield=0, flags=None, **kw): - """Setup bitfield trait. - - :param initial_bitfield: (int) initial value for the bitfield - Flags that are set will be OR'ed with this. - :param flags: List of dictionaries defining bitflags to set depending - on data in the notification. Each one has the following - keys: - path: jsonpath of field to match. - bit: (int) number of bit to set (lsb is bit 0) - value: set bit if corresponding field's value - matches this. If value is not provided, - bit will be set if the field exists (and - is non-null), regardless of its value. - - """ - self.initial_bitfield = initial_bitfield - if flags is None: - flags = [] - self.flags = flags - super(BitfieldTraitPlugin, self).__init__(**kw) - - def trait_values(self, match_list): - matches = dict(match_list) - bitfield = self.initial_bitfield - for flagdef in self.flags: - path = flagdef['path'] - bit = 2 ** int(flagdef['bit']) - if path in matches: - if 'value' in flagdef: - if matches[path] == flagdef['value']: - bitfield |= bit - else: - bitfield |= bit - return [bitfield] - - -class TimedeltaPluginMissedFields(Exception): - def __init__(self): - msg = ('It is required to use two timestamp field with Timedelta ' - 'plugin.') - super(TimedeltaPluginMissedFields, self).__init__(msg) - - -class TimedeltaPlugin(TraitPluginBase): - """Setup timedelta meter volume of two timestamps fields. - - Example:: - - trait's fields definition: ['payload.created_at', - 'payload.launched_at'] - value is been created as total seconds between 'launched_at' and - 'created_at' timestamps. - """ - # TODO(idegtiarov): refactor code to have meter_plugins separate from - # trait_plugins - - def trait_value(self, match_list): - if len(match_list) != 2: - LOG.warning(_LW('Timedelta plugin is required two timestamp fields' - ' to create timedelta value.')) - return - start, end = match_list - try: - start_time = timeutils.parse_isotime(start[1]) - end_time = timeutils.parse_isotime(end[1]) - except Exception as err: - LOG.warning(_LW('Failed to parse date from set fields, both ' - 'fields %(start)s and %(end)s must be datetime: ' - '%(err)s') % - dict(start=start[0], end=end[0], err=err) - ) - return - return abs((end_time - start_time).total_seconds()) diff --git a/ceilometer/exchange_control.py b/ceilometer/exchange_control.py deleted file mode 100644 index 717cdc12..00000000 --- a/ceilometer/exchange_control.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -EXCHANGE_OPTS = [ - cfg.StrOpt('heat_control_exchange', - default='heat', - help="Exchange name for Heat notifications"), - cfg.StrOpt('glance_control_exchange', - default='glance', - help="Exchange name for Glance notifications."), - cfg.StrOpt('keystone_control_exchange', - default='keystone', - help="Exchange name for Keystone notifications."), - cfg.StrOpt('cinder_control_exchange', - default='cinder', - help="Exchange name for Cinder notifications."), - cfg.StrOpt('sahara_control_exchange', - default='sahara', - help="Exchange name for Data Processing notifications."), - cfg.StrOpt('swift_control_exchange', - default='swift', - help="Exchange name for Swift notifications."), - cfg.StrOpt('magnum_control_exchange', - default='magnum', - help="Exchange name for Magnum notifications."), - cfg.StrOpt('trove_control_exchange', - default='trove', - help="Exchange name for DBaaS notifications."), - cfg.StrOpt('zaqar_control_exchange', - default='zaqar', - help="Exchange name for Messaging service notifications."), - cfg.StrOpt('dns_control_exchange', - default='central', - help="Exchange name for DNS service notifications."), -] diff --git a/ceilometer/hardware/__init__.py b/ceilometer/hardware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/hardware/discovery.py b/ceilometer/hardware/discovery.py deleted file mode 100644 index b15896c7..00000000 --- a/ceilometer/hardware/discovery.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer import nova_client - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('url_scheme', - default='snmp://', - help='URL scheme to use for hardware nodes.'), - cfg.StrOpt('readonly_user_name', - default='ro_snmp_user', - help='SNMPd user name of all nodes running in the cloud.'), - cfg.StrOpt('readonly_user_password', - default='password', - help='SNMPd password of all the nodes running in the cloud.', - secret=True), -] -cfg.CONF.register_opts(OPTS, group='hardware') - - -class NodesDiscoveryTripleO(plugin_base.DiscoveryBase): - def __init__(self): - super(NodesDiscoveryTripleO, self).__init__() - self.nova_cli = nova_client.Client() - self.last_run = None - self.instances = {} - - @staticmethod - def _address(instance, field): - return instance.addresses['ctlplane'][0].get(field) - - def discover(self, manager, param=None): - """Discover resources to monitor. - - instance_get_all will return all instances if last_run is None, - and will return only the instances changed since the last_run time. - """ - try: - instances = self.nova_cli.instance_get_all(self.last_run) - except Exception: - # NOTE(zqfan): instance_get_all is wrapped and will log exception - # when there is any error. It is no need to raise it again and - # print one more time. - return [] - - for instance in instances: - if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', - 'error']: - self.instances.pop(instance.id, None) - else: - self.instances[instance.id] = instance - self.last_run = timeutils.utcnow(True).isoformat() - - resources = [] - for instance in self.instances.values(): - try: - ip_address = self._address(instance, 'addr') - final_address = ( - cfg.CONF.hardware.url_scheme + - cfg.CONF.hardware.readonly_user_name + ':' + - cfg.CONF.hardware.readonly_user_password + '@' + - ip_address) - - resource = { - 'resource_id': instance.id, - 'resource_url': final_address, - 'mac_addr': self._address(instance, - 'OS-EXT-IPS-MAC:mac_addr'), - 'image_id': instance.image['id'], - 'flavor_id': instance.flavor['id'] - } - - resources.append(resource) - except KeyError: - LOG.error(_("Couldn't obtain IP address of " - "instance %s") % instance.id) - - return resources diff --git a/ceilometer/hardware/inspector/__init__.py b/ceilometer/hardware/inspector/__init__.py deleted file mode 100644 index 7e83d028..00000000 --- a/ceilometer/hardware/inspector/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from stevedore import driver - - -def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'): - """Get inspector driver and load it. - - :param parsed_url: urlparse.SplitResult object for the inspector - :param namespace: Namespace to use to look for drivers. - """ - loaded_driver = driver.DriverManager(namespace, parsed_url.scheme) - return loaded_driver.driver() diff --git a/ceilometer/hardware/inspector/base.py b/ceilometer/hardware/inspector/base.py deleted file mode 100644 index 9963085a..00000000 --- a/ceilometer/hardware/inspector/base.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2014 ZHAW SoE -# -# Authors: Lucas Graf -# Toni Zehnder -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector abstraction for read-only access to hardware components""" - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Inspector(object): - @abc.abstractmethod - def inspect_generic(self, host, cache, extra_metadata, param): - """A generic inspect function. - - :param host: the target host - :param cache: cache passed from the pollster - :param extra_metadata: extra dict to be used as metadata - :param param: a dict of inspector specific param - :return: an iterator of (value, metadata, extra) - :return value: the sample value - :return metadata: dict to construct sample's metadata - :return extra: dict of extra metadata to help constructing sample - """ - - def prepare_params(self, param): - """Parse the params to a format which the inspector itself recognizes. - - :param param: inspector params from meter definition file - :return: a dict of param which the inspector recognized - """ - return {} diff --git a/ceilometer/hardware/inspector/snmp.py b/ceilometer/hardware/inspector/snmp.py deleted file mode 100644 index 88220658..00000000 --- a/ceilometer/hardware/inspector/snmp.py +++ /dev/null @@ -1,313 +0,0 @@ -# -# Copyright 2014 ZHAW SoE -# Copyright 2014 Intel Corp -# -# Authors: Lucas Graf -# Toni Zehnder -# Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector for collecting data over SNMP""" - -import copy -from pysnmp.entity.rfc3413.oneliner import cmdgen - -import six - -from ceilometer.hardware.inspector import base - - -class SNMPException(Exception): - pass - - -def parse_snmp_return(ret, is_bulk=False): - """Check the return value of snmp operations - - :param ret: a tuple of (errorIndication, errorStatus, errorIndex, data) - returned by pysnmp - :param is_bulk: True if the ret value is from GetBulkRequest - :return: a tuple of (err, data) - err: True if error found, or False if no error found - data: a string of error description if error found, or the - actual return data of the snmp operation - """ - err = True - (errIndication, errStatus, errIdx, varBinds) = ret - if errIndication: - data = errIndication - elif errStatus: - if is_bulk: - varBinds = varBinds[-1] - data = "%s at %s" % (errStatus.prettyPrint(), - errIdx and varBinds[int(errIdx) - 1] or "?") - else: - err = False - data = varBinds - return err, data - - -EXACT = 'type_exact' -PREFIX = 'type_prefix' - - -class SNMPInspector(base.Inspector): - # Default port - _port = 161 - - _CACHE_KEY_OID = "snmp_cached_oid" - - # NOTE: The following mapping has been moved to the yaml file identified - # by the config options hardware.meter_definitions_file. However, we still - # keep the description here for code reading purpose. - - """ - - The following mapping define how to construct - (value, metadata, extra) returned by inspect_generic - MAPPING = { - 'identifier: { - 'matching_type': EXACT or PREFIX, - 'metric_oid': (oid, value_converter) - 'metadata': { - metadata_name1: (oid1, value_converter), - metadata_name2: (oid2, value_converter), - }, - 'post_op': special func to modify the return data, - }, - } - - For matching_type of EXACT, each item in the above mapping will - return exact one (value, metadata, extra) tuple. The value would be - returned from SNMP request GetRequest for oid of 'metric_oid', the - metadata dict would be constructed based on the returning from SNMP - GetRequest for oids of 'metadata'. - - For matching_type of PREFIX, SNMP request GetBulkRequest - would be sent to get values for oids of 'metric_oid' and - 'metadata' of each item in the above mapping. And each item might - return multiple (value, metadata, extra) tuples, e.g. - Suppose we have the following mapping: - MAPPING = { - 'disk.size.total': { - 'matching_type': PREFIX, - 'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int) - 'metadata': { - 'device': ("1.3.6.1.4.1.2021.9.1.3", str), - 'path': ("1.3.6.1.4.1.2021.9.1.2", str), - }, - 'post_op': None, - }, - and the SNMP have the following oid/value(s): - { - '1.3.6.1.4.1.2021.9.1.6.1': 19222656, - '1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2", - '1.3.6.1.4.1.2021.9.1.2.1': "/" - '1.3.6.1.4.1.2021.9.1.6.2': 808112, - '1.3.6.1.4.1.2021.9.1.3.2': "tmpfs", - '1.3.6.1.4.1.2021.9.1.2.2': "/run", - } - So here we'll return 2 instances of (value, metadata, extra): - (19222656, {'device': "/dev/sda2", 'path': "/"}, None) - (808112, {'device': "tmpfs", 'path': "/run"}, None) - - The post_op is assumed to be implemented by new metric developer. It - could be used to add additional special metadata(e.g. ip address), or - it could be used to add information into extra dict to be returned - to construct the pollster how to build final sample, e.g. - extra.update('project_id': xy, 'user_id': zw) - """ - - def __init__(self): - super(SNMPInspector, self).__init__() - self._cmdGen = cmdgen.CommandGenerator() - - def _query_oids(self, host, oids, cache, is_bulk): - # send GetRequest or GetBulkRequest to get oids values and - # populate the values into cache - authData = self._get_auth_strategy(host) - transport = cmdgen.UdpTransportTarget((host.hostname, - host.port or self._port)) - oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) - - if is_bulk: - ret = self._cmdGen.bulkCmd(authData, - transport, - 0, 100, - *oids, - lookupValues=True) - else: - ret = self._cmdGen.getCmd(authData, - transport, - *oids, - lookupValues=True) - (error, data) = parse_snmp_return(ret, is_bulk) - if error: - raise SNMPException("An error occurred, oids %(oid)s, " - "host %(host)s, %(err)s" % - dict(oid=oids, - host=host.hostname, - err=data)) - # save result into cache - if is_bulk: - for var_bind_table_row in data: - for name, val in var_bind_table_row: - oid_cache[str(name)] = val - else: - for name, val in data: - oid_cache[str(name)] = val - - @staticmethod - def find_matching_oids(oid_cache, oid, match_type, find_one=True): - matched = [] - if match_type == PREFIX: - for key in oid_cache.keys(): - if key.startswith(oid): - matched.append(key) - if find_one: - break - else: - if oid in oid_cache: - matched.append(oid) - return matched - - @staticmethod - def get_oid_value(oid_cache, oid_def, suffix=''): - oid, converter = oid_def - value = oid_cache[oid + suffix] - if converter: - value = converter(value) - return value - - @classmethod - def construct_metadata(cls, oid_cache, meta_defs, suffix=''): - metadata = {} - for key, oid_def in six.iteritems(meta_defs): - metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix) - return metadata - - @classmethod - def _find_missing_oids(cls, meter_def, cache): - # find oids have not been queried and cached - new_oids = [] - oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {}) - # check metric_oid - if not cls.find_matching_oids(oid_cache, - meter_def['metric_oid'][0], - meter_def['matching_type']): - new_oids.append(meter_def['metric_oid'][0]) - for metadata in meter_def['metadata'].values(): - if not cls.find_matching_oids(oid_cache, - metadata[0], - meter_def['matching_type']): - new_oids.append(metadata[0]) - return new_oids - - def inspect_generic(self, host, cache, extra_metadata, param): - # the snmp definition for the corresponding meter - meter_def = param - # collect oids that needs to be queried - oids_to_query = self._find_missing_oids(meter_def, cache) - # query oids and populate into caches - if oids_to_query: - self._query_oids(host, oids_to_query, cache, - meter_def['matching_type'] == PREFIX) - # construct (value, metadata, extra) - oid_cache = cache[self._CACHE_KEY_OID] - # find all oids which needed to construct final sample values - # for matching type of EXACT, only 1 sample would be generated - # for matching type of PREFIX, multiple samples could be generated - oids_for_sample_values = self.find_matching_oids( - oid_cache, - meter_def['metric_oid'][0], - meter_def['matching_type'], - False) - input_extra_metadata = extra_metadata - - for oid in oids_for_sample_values: - suffix = oid[len(meter_def['metric_oid'][0]):] - value = self.get_oid_value(oid_cache, - meter_def['metric_oid'], - suffix) - # get the metadata for this sample value - metadata = self.construct_metadata(oid_cache, - meter_def['metadata'], - suffix) - extra_metadata = copy.deepcopy(input_extra_metadata) or {} - # call post_op for special cases - if meter_def['post_op']: - func = getattr(self, meter_def['post_op'], None) - if func: - value = func(host, cache, meter_def, - value, metadata, extra_metadata, - suffix) - yield (value, metadata, extra_metadata) - - def _post_op_memory_avail_to_used(self, host, cache, meter_def, - value, metadata, extra, suffix): - _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" - if _memory_total_oid not in cache[self._CACHE_KEY_OID]: - self._query_oids(host, [_memory_total_oid], cache, False) - value = int(cache[self._CACHE_KEY_OID][_memory_total_oid]) - value - return value - - def _post_op_net(self, host, cache, meter_def, - value, metadata, extra, suffix): - # add ip address into metadata - _interface_ip_oid = "1.3.6.1.2.1.4.20.1.2" - oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) - if not self.find_matching_oids(oid_cache, - _interface_ip_oid, - PREFIX): - # populate the oid into cache - self._query_oids(host, [_interface_ip_oid], cache, True) - ip_addr = '' - for k, v in six.iteritems(oid_cache): - if k.startswith(_interface_ip_oid) and v == int(suffix[1:]): - ip_addr = k.replace(_interface_ip_oid + ".", "") - metadata.update(ip=ip_addr) - # update resource_id for each nic interface - self._suffix_resource_id(host, metadata, 'name', extra) - return value - - def _post_op_disk(self, host, cache, meter_def, - value, metadata, extra, suffix): - self._suffix_resource_id(host, metadata, 'device', extra) - return value - - @staticmethod - def _suffix_resource_id(host, metadata, key, extra): - prefix = metadata.get(key) - if prefix: - res_id = extra.get('resource_id') or host.hostname - res_id = res_id + ".%s" % metadata.get(key) - extra.update(resource_id=res_id) - - @staticmethod - def _get_auth_strategy(host): - if host.password: - auth_strategy = cmdgen.UsmUserData(host.username, - authKey=host.password) - else: - auth_strategy = cmdgen.CommunityData(host.username or 'public') - return auth_strategy - - def prepare_params(self, param): - processed = {} - processed['matching_type'] = param['matching_type'] - processed['metric_oid'] = (param['oid'], eval(param['type'])) - processed['post_op'] = param.get('post_op', None) - processed['metadata'] = {} - for k, v in six.iteritems(param.get('metadata', {})): - processed['metadata'][k] = (v['oid'], eval(v['type'])) - return processed diff --git a/ceilometer/hardware/pollsters/__init__.py b/ceilometer/hardware/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/hardware/pollsters/data/snmp.yaml b/ceilometer/hardware/pollsters/data/snmp.yaml deleted file mode 100644 index 60f84af1..00000000 --- a/ceilometer/hardware/pollsters/data/snmp.yaml +++ /dev/null @@ -1,189 +0,0 @@ ---- - -metric: -# cpu - - name: hardware.cpu.load.1min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.1" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.load.5min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.2" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.load.15min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.3" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.util - unit: "%" - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.9.0" - type: "int" -# disk - - name: hardware.disk.size.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.4.1.2021.9.1.6" - type: "int" - metadata: &disk_metadata - path: - oid: "1.3.6.1.4.1.2021.9.1.2" - type: "str" - device: - oid: "1.3.6.1.4.1.2021.9.1.3" - type: "str" - post_op: "_post_op_disk" - - - name: hardware.disk.size.used - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.4.1.2021.9.1.8" - type: "int" - metadata: *disk_metadata - post_op: "_post_op_disk" -# memory - - name: hardware.memory.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.5.0" - type: "int" - - - name: hardware.memory.used - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.6.0" - type: "int" - post_op: "_post_op_memory_avail_to_used" - - - name: hardware.memory.swap.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.3.0" - type: "int" - - - name: hardware.memory.swap.avail - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.4.0" - type: "int" - - - name: hardware.memory.buffer - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.14.0" - type: "int" - - - name: hardware.memory.cached - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.15.0" - type: "int" -# network interface - - name: hardware.network.incoming.bytes - unit: B - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.10" - type: "int" - metadata: &net_metadata - name: - oid: "1.3.6.1.2.1.2.2.1.2" - type: "str" - speed: - oid: "1.3.6.1.2.1.2.2.1.5" - type: "lambda x: int(x) / 8" - mac: - oid: "1.3.6.1.2.1.2.2.1.6" - type: "lambda x: x.prettyPrint().replace('0x', '')" - post_op: "_post_op_net" - - - name: hardware.network.outgoing.bytes - unit: B - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.16" - type: "int" - metadata: *net_metadata - post_op: "_post_op_net" - - - name: hardware.network.outgoing.errors - unit: packet - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.20" - type: "int" - metadata: *net_metadata - post_op: "_post_op_net" -#network aggregate - - name: hardware.network.ip.outgoing.datagrams - unit: datagrams - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.2.1.4.10.0" - type: "int" - - - name: hardware.network.ip.incoming.datagrams - unit: datagrams - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.2.1.4.3.0" - type: "int" -#system stats - - name: hardware.system_stats.cpu.idle - unit: "%" - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.11.0" - type: "int" - - - name: hardware.system_stats.io.outgoing.blocks - unit: blocks - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.57.0" - type: "int" - - - name: hardware.system_stats.io.incoming.blocks - unit: blocks - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.58.0" - type: "int" diff --git a/ceilometer/hardware/pollsters/generic.py b/ceilometer/hardware/pollsters/generic.py deleted file mode 100644 index 86245204..00000000 --- a/ceilometer/hardware/pollsters/generic.py +++ /dev/null @@ -1,218 +0,0 @@ -# -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import pkg_resources - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import netutils -import six - -from ceilometer.agent import plugin_base -from ceilometer import declarative -from ceilometer.hardware import inspector as insloader -from ceilometer.hardware.pollsters import util -from ceilometer.i18n import _LE, _LW -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('meter_definitions_file', - default="snmp.yaml", - help="Configuration file for defining hardware snmp meters." - ), -] - -cfg.CONF.register_opts(OPTS, group='hardware') - -LOG = log.getLogger(__name__) - - -class MeterDefinition(object): - required_fields = ['name', 'unit', 'type'] - - def __init__(self, definition_cfg): - self.cfg = definition_cfg - for fname, fval in self.cfg.items(): - if (isinstance(fname, six.string_types) and - (fname in self.required_fields or - fname.endswith('_inspector'))): - setattr(self, fname, fval) - else: - LOG.warning(_LW("Ignore unrecognized field %s"), fname) - for fname in self.required_fields: - if not getattr(self, fname, None): - raise declarative.MeterDefinitionException( - _LE("Missing field %s") % fname, self.cfg) - if self.type not in sample.TYPES: - raise declarative.MeterDefinitionException( - _LE("Unrecognized type value %s") % self.type, self.cfg) - - -class GenericHardwareDeclarativePollster(plugin_base.PollsterBase): - CACHE_KEY = 'hardware.generic' - mapping = None - - def __init__(self): - super(GenericHardwareDeclarativePollster, self).__init__() - self.inspectors = {} - - def _update_meter_definition(self, definition): - self.meter_definition = definition - self.cached_inspector_params = {} - - @property - def default_discovery(self): - return 'tripleo_overcloud_nodes' - - @staticmethod - def _parse_resource(res): - """Parse resource from discovery. - - Either URL can be given or dict. Dict has to contain at least - keys 'resource_id' and 'resource_url', all the dict keys will be stored - as metadata. - - :param res: URL or dict containing all resource info. - :return parsed_url, resource_id, metadata: Returns parsed URL used for - SNMP query, unique identifier of the resource and metadata - of the resource. - """ - parsed_url, resource_id, metadata = (None, None, None) - if isinstance(res, dict): - if 'resource_url' not in res or 'resource_id' not in res: - LOG.error(_LE('Passed resource dict must contain keys ' - 'resource_id and resource_url.')) - else: - metadata = res - parsed_url = netutils.urlsplit(res['resource_url']) - resource_id = res['resource_id'] - else: - metadata = {} - parsed_url = netutils.urlsplit(res) - resource_id = res - - return parsed_url, resource_id, metadata - - def _get_inspector(self, parsed_url): - if parsed_url.scheme not in self.inspectors: - try: - driver = insloader.get_inspector(parsed_url) - self.inspectors[parsed_url.scheme] = driver - except Exception as err: - LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"), - dict(name=parsed_url.scheme, - err=err)) - raise err - return self.inspectors[parsed_url.scheme] - - def get_samples(self, manager, cache, resources=None): - """Return an iterable of Sample instances from polling the resources. - - :param manager: The service manager invoking the plugin - :param cache: A dictionary for passing data between plugins - :param resources: end point to poll data from - """ - resources = resources or [] - h_cache = cache.setdefault(self.CACHE_KEY, {}) - sample_iters = [] - - # Get the meter identifiers to poll - identifier = self.meter_definition.name - - for resource in resources: - parsed_url, res, extra_metadata = self._parse_resource(resource) - if parsed_url is None: - LOG.error(_LE("Skip invalid resource %s"), resource) - continue - ins = self._get_inspector(parsed_url) - try: - # Call hardware inspector to poll for the data - i_cache = h_cache.setdefault(res, {}) - - # Prepare inspector parameters and cache it for performance - param_key = parsed_url.scheme + '.' + identifier - inspector_param = self.cached_inspector_params.get(param_key) - if not inspector_param: - param = getattr(self.meter_definition, - parsed_url.scheme + '_inspector', {}) - inspector_param = ins.prepare_params(param) - self.cached_inspector_params[param_key] = inspector_param - - if identifier not in i_cache: - i_cache[identifier] = list(ins.inspect_generic( - host=parsed_url, - cache=i_cache, - extra_metadata=extra_metadata, - param=inspector_param)) - # Generate samples - if i_cache[identifier]: - sample_iters.append(self.generate_samples( - parsed_url, - i_cache[identifier])) - except Exception as err: - LOG.exception(_LE('inspector call failed for %(ident)s ' - 'host %(host)s: %(err)s'), - dict(ident=identifier, - host=parsed_url.hostname, - err=err)) - return itertools.chain(*sample_iters) - - def generate_samples(self, host_url, data): - """Generate a list of Sample from the data returned by inspector - - :param host_url: host url of the endpoint - :param data: list of data returned by the corresponding inspector - """ - samples = [] - definition = self.meter_definition - for (value, metadata, extra) in data: - s = util.make_sample_from_host(host_url, - name=definition.name, - sample_type=definition.type, - unit=definition.unit, - volume=value, - res_metadata=metadata, - extra=extra, - name_prefix=None) - samples.append(s) - return samples - - @classmethod - def build_pollsters(cls): - if not cls.mapping: - definition_cfg = declarative.load_definitions( - {}, cfg.CONF.hardware.meter_definitions_file, - pkg_resources.resource_filename(__name__, "data/snmp.yaml")) - cls.mapping = load_definition(definition_cfg) - - pollsters = [] - for name in cls.mapping: - pollster = cls() - pollster._update_meter_definition(cls.mapping[name]) - pollsters.append((name, pollster)) - return pollsters - - -def load_definition(config_def): - mappings = {} - for meter_def in config_def.get('metric', []): - try: - meter = MeterDefinition(meter_def) - mappings[meter.name] = meter - except declarative.DefinitionException as e: - errmsg = _LE("Error loading meter definition: %s") - LOG.error(errmsg, e.brief_message) - return mappings diff --git a/ceilometer/hardware/pollsters/util.py b/ceilometer/hardware/pollsters/util.py deleted file mode 100644 index 5a68c658..00000000 --- a/ceilometer/hardware/pollsters/util.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2013 ZHAW SoE -# Copyright 2014 Intel Corp. -# -# Authors: Lucas Graf -# Toni Zehnder -# Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from six.moves.urllib import parse as urlparse - -from ceilometer import sample - - -def get_metadata_from_host(host_url): - return {'resource_url': urlparse.urlunsplit(host_url)} - - -def make_resource_metadata(res_metadata=None, host_url=None): - resource_metadata = dict() - if res_metadata is not None: - metadata = copy.copy(res_metadata) - resource_metadata.update(metadata) - resource_metadata.update(get_metadata_from_host(host_url)) - return resource_metadata - - -def make_sample_from_host(host_url, name, sample_type, unit, volume, - project_id=None, user_id=None, resource_id=None, - res_metadata=None, extra=None, - name_prefix='hardware'): - - extra = extra or {} - resource_metadata = make_resource_metadata(res_metadata, host_url) - resource_metadata.update(extra) - - res_id = resource_id or extra.get('resource_id') or host_url.hostname - if name_prefix: - name = name_prefix + '.' + name - return sample.Sample( - name=name, - type=sample_type, - unit=unit, - volume=volume, - user_id=user_id or extra.get('user_id'), - project_id=project_id or extra.get('project_id'), - resource_id=res_id, - resource_metadata=resource_metadata, - source='hardware', - ) diff --git a/ceilometer/image/__init__.py b/ceilometer/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/image/glance.py b/ceilometer/image/glance.py deleted file mode 100644 index e25aaedd..00000000 --- a/ceilometer/image/glance.py +++ /dev/null @@ -1,129 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with images -""" - -from __future__ import absolute_import - -import glanceclient -from oslo_config import cfg - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - - -OPTS = [ - cfg.IntOpt('glance_page_size', - default=0, - help="Number of items to request in " - "each paginated Glance API request " - "(parameter used by glanceclient). " - "If this is less than or equal to 0, " - "page size is not specified " - "(default value in glanceclient is used)."), -] - -SERVICE_OPTS = [ - cfg.StrOpt('glance', - default='image', - help='Glance service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') - - -class _Base(plugin_base.PollsterBase): - - @property - def default_discovery(self): - return 'endpoint:%s' % cfg.CONF.service_types.glance - - @staticmethod - def get_glance_client(ksclient, endpoint): - # hard-code v1 glance API version selection while v2 API matures - return glanceclient.Client('1', - session=keystone_client.get_session(), - endpoint=endpoint, - auth=ksclient.session.auth) - - def _get_images(self, ksclient, endpoint): - client = self.get_glance_client(ksclient, endpoint) - page_size = cfg.CONF.glance_page_size - kwargs = {} - if page_size > 0: - kwargs['page_size'] = page_size - return client.images.list(filters={"is_public": None}, **kwargs) - - def _iter_images(self, ksclient, cache, endpoint): - """Iterate over all images.""" - key = '%s-images' % endpoint - if key not in cache: - cache[key] = list(self._get_images(ksclient, endpoint)) - return iter(cache[key]) - - @staticmethod - def extract_image_metadata(image): - return dict((k, getattr(image, k)) - for k in - [ - "status", - "is_public", - "name", - "deleted", - "container_format", - "created_at", - "disk_format", - "updated_at", - "properties", - "min_disk", - "protected", - "checksum", - "deleted_at", - "min_ram", - "size", ]) - - -class ImagePollster(_Base): - def get_samples(self, manager, cache, resources): - for endpoint in resources: - for image in self._iter_images(manager.keystone, cache, endpoint): - yield sample.Sample( - name='image', - type=sample.TYPE_GAUGE, - unit='image', - volume=1, - user_id=None, - project_id=image.owner, - resource_id=image.id, - resource_metadata=self.extract_image_metadata(image), - ) - - -class ImageSizePollster(_Base): - def get_samples(self, manager, cache, resources): - for endpoint in resources: - for image in self._iter_images(manager.keystone, cache, endpoint): - yield sample.Sample( - name='image.size', - type=sample.TYPE_GAUGE, - unit='B', - volume=image.size, - user_id=None, - project_id=image.owner, - resource_id=image.id, - resource_metadata=self.extract_image_metadata(image), - ) diff --git a/ceilometer/ipmi/__init__.py b/ceilometer/ipmi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/notifications/__init__.py b/ceilometer/ipmi/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/notifications/ironic.py b/ceilometer/ipmi/notifications/ironic.py deleted file mode 100644 index 51a00fef..00000000 --- a/ceilometer/ipmi/notifications/ironic.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# Copyright 2014 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing hardware sensor data sample messages from -notification events. -""" - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging as messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('ironic_exchange', - default='ironic', - help='Exchange name for Ironic notifications.'), -] - - -cfg.CONF.register_opts(OPTS) - - -# Map unit name to SI -UNIT_MAP = { - 'Watts': 'W', - 'Volts': 'V', -} - - -def validate_reading(data): - """Some sensors read "Disabled".""" - return data != 'Disabled' - - -def transform_id(data): - return data.lower().replace(' ', '_') - - -def parse_reading(data): - try: - volume, unit = data.split(' ', 1) - unit = unit.rsplit(' ', 1)[-1] - return float(volume), UNIT_MAP.get(unit, unit) - except ValueError: - raise InvalidSensorData('unable to parse sensor reading: %s' % - data) - - -class InvalidSensorData(ValueError): - pass - - -class SensorNotification(plugin_base.NotificationBase): - """A generic class for extracting samples from sensor data notifications. - - A notification message can contain multiple samples from multiple - sensors, all with the same basic structure: the volume for the sample - is found as part of the value of a 'Sensor Reading' key. The unit - is in the same value. - - Subclasses exist solely to allow flexibility with stevedore configuration. - """ - - event_types = ['hardware.ipmi.*'] - metric = None - - def get_targets(self, conf): - """oslo.messaging.TargetS for this plugin.""" - return [messaging.Target(topic=topic, - exchange=conf.ironic_exchange) - for topic in self.get_notification_topics(conf)] - - def _get_sample(self, message): - try: - return (payload for _, payload - in message['payload'][self.metric].items()) - except KeyError: - return [] - - @staticmethod - def _package_payload(message, payload): - # NOTE(chdent): How much of the payload should we keep? - payload['node'] = message['payload']['node_uuid'] - info = {'publisher_id': message['publisher_id'], - 'timestamp': message['payload']['timestamp'], - 'event_type': message['payload']['event_type'], - 'user_id': message['payload'].get('user_id'), - 'project_id': message['payload'].get('project_id'), - 'payload': payload} - return info - - def process_notification(self, message): - """Read and process a notification. - - The guts of a message are in dict value of a 'payload' key - which then itself has a payload key containing a dict of - multiple sensor readings. - - If expected keys in the payload are missing or values - are not in the expected form for transformations, - KeyError and ValueError are caught and the current - sensor payload is skipped. - """ - payloads = self._get_sample(message['payload']) - for payload in payloads: - try: - # Provide a fallback resource_id in case parts are missing. - resource_id = 'missing id' - try: - resource_id = '%(nodeid)s-%(sensorid)s' % { - 'nodeid': message['payload']['node_uuid'], - 'sensorid': transform_id(payload['Sensor ID']) - } - except KeyError as exc: - raise InvalidSensorData('missing key in payload: %s' % exc) - - info = self._package_payload(message, payload) - - try: - sensor_reading = info['payload']['Sensor Reading'] - except KeyError as exc: - raise InvalidSensorData( - "missing 'Sensor Reading' in payload" - ) - - if validate_reading(sensor_reading): - volume, unit = parse_reading(sensor_reading) - yield sample.Sample.from_notification( - name='hardware.ipmi.%s' % self.metric.lower(), - type=sample.TYPE_GAUGE, - unit=unit, - volume=volume, - resource_id=resource_id, - message=info, - user_id=info['user_id'], - project_id=info['project_id']) - - except InvalidSensorData as exc: - LOG.warning( - 'invalid sensor data for %(resource)s: %(error)s' % - dict(resource=resource_id, error=exc) - ) - continue - - -class TemperatureSensorNotification(SensorNotification): - metric = 'Temperature' - - -class CurrentSensorNotification(SensorNotification): - metric = 'Current' - - -class FanSensorNotification(SensorNotification): - metric = 'Fan' - - -class VoltageSensorNotification(SensorNotification): - metric = 'Voltage' diff --git a/ceilometer/ipmi/platform/__init__.py b/ceilometer/ipmi/platform/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/platform/exception.py b/ceilometer/ipmi/platform/exception.py deleted file mode 100644 index bc8c13dc..00000000 --- a/ceilometer/ipmi/platform/exception.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NodeManagerException(Exception): - pass - - -class IPMIException(Exception): - pass diff --git a/ceilometer/ipmi/platform/intel_node_manager.py b/ceilometer/ipmi/platform/intel_node_manager.py deleted file mode 100644 index 21da987a..00000000 --- a/ceilometer/ipmi/platform/intel_node_manager.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Node manager engine to collect power and temperature of compute node. - -Intel Node Manager Technology enables the datacenter IT to monitor and control -actual server power, thermal and compute utilization behavior through industry -defined standard IPMI. This file provides Node Manager engine to get simple -system power and temperature data based on ipmitool. -""" - -import binascii -import collections -import tempfile -import time - -from oslo_config import cfg -import six - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import ipmitool - - -OPTS = [ - cfg.IntOpt('node_manager_init_retry', - default=3, - help='Number of retries upon Intel Node ' - 'Manager initialization failure') -] - - -CONF = cfg.CONF -CONF.register_opts(OPTS, group='ipmi') - -IPMICMD = {"sdr_dump": "sdr dump", - "sdr_info": "sdr info", - "sensor_dump": "sdr -v"} -IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01", - "get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00", - "init_sensor_agent": "raw 0x0a 0x2c 0x01", - "init_complete": "raw 0x0a 0x2c 0x00", - "init_sensor_agent_status": "raw 0x0a 0x2c 0x00", - "read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00", - "read_inlet_temperature": - "raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00", - "read_outlet_temperature": - "raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00", - "read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00", - "read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05", - "read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"} - -MANUFACTURER_ID_INTEL = ['57', '01', '00'] -INTEL_PREFIX = '5701000d01' - -# The template dict are made according to the spec. It contains the expected -# length of each item. And it can be used to parse the output of IPMI command. - -ONE_RETURN_TEMPLATE = {"ret": 1} - -BMC_INFO_TEMPLATE = collections.OrderedDict() -BMC_INFO_TEMPLATE['Device_ID'] = 1 -BMC_INFO_TEMPLATE['Device_Revision'] = 1 -BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1 -BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1 -BMC_INFO_TEMPLATE['IPMI_Version'] = 1 -BMC_INFO_TEMPLATE['Additional_Device_support'] = 1 -BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3 -BMC_INFO_TEMPLATE['Product_ID'] = 2 -BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4 - -NM_STATISTICS_TEMPLATE = collections.OrderedDict() -NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3 -NM_STATISTICS_TEMPLATE['Current_value'] = 2 -NM_STATISTICS_TEMPLATE['Minimum_value'] = 2 -NM_STATISTICS_TEMPLATE['Maximum_value'] = 2 -NM_STATISTICS_TEMPLATE['Average_value'] = 2 -NM_STATISTICS_TEMPLATE['Time_stamp'] = 4 -NM_STATISTICS_TEMPLATE['Report_period'] = 4 -NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1 - -NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict() -NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Additinal_Device_support'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3 -NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1 - -NM_GET_VERSION_TEMPLATE = collections.OrderedDict() -NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3 -NM_GET_VERSION_TEMPLATE['NM_Version'] = 1 -NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1 -NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1 -NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1 -NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1 - -NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict() -NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3 -NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8 -NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8 -NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8 - -NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict() -NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3 -NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2 - - -def _hex(list=None): - """Format the return value in list into hex.""" - - list = list or [] - if list: - list.reverse() - return int(''.join(list), 16) - - return 0 - - -class NodeManager(object): - """The python implementation of Intel Node Manager engine using ipmitool - - The class implements the engine to read power and temperature of - compute node. It uses ipmitool to execute the IPMI command and parse - the output into dict. - """ - _inited = False - _instance = None - - def __new__(cls, *args, **kwargs): - """Singleton to avoid duplicated initialization.""" - if not cls._instance: - cls._instance = super(NodeManager, cls).__new__(cls, *args, - **kwargs) - return cls._instance - - def __init__(self): - if not (self._instance and self._inited): - # As singleton, only the 1st NM pollster would trigger its - # initialization. nm_version indicate init result, and is shared - # across all pollsters - self._inited = True - self.nm_version = 0 - self.channel_slave = '' - - self.nm_version = self.check_node_manager() - - @staticmethod - def _parse_slave_and_channel(file_path): - """Parse the dumped file to get slave address and channel number. - - :param file_path: file path of dumped SDR file. - :return: slave address and channel number of target device or None if - not found. - """ - prefix = INTEL_PREFIX - # According to Intel Node Manager spec, section 4.5, for Intel NM - # discovery OEM SDR records are type C0h. It contains manufacture ID - # and OEM data in the record body. - # 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6 - # is Intel NM device slave address and channel number/sensor owner LUN. - with open(file_path, 'rb') as bin_fp: - data_str = binascii.hexlify(bin_fp.read()) - - if six.PY3: - data_str = data_str.decode('ascii') - oem_id_index = data_str.find(prefix) - if oem_id_index != -1: - ret = data_str[oem_id_index + len(prefix): - oem_id_index + len(prefix) + 4] - # Byte 5 is slave address. [7:4] from byte 6 is channel - # number, so just pick ret[2] here. - return (ret[0:2], ret[2]) - - @ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE) - def get_device_id(self): - """IPMI command GET_DEVICE_ID.""" - return IPMIRAWCMD["get_device_id"] - - @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) - def _init_sensor_agent(self): - """Run initialization agent.""" - return IPMIRAWCMD["init_sensor_agent"] - - @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) - def _init_sensor_agent_process(self): - """Check the status of initialization agent.""" - return IPMIRAWCMD["init_sensor_agent_status"] - - @ipmitool.execute_ipmi_cmd() - def _dump_sdr_file(self, data_file=""): - """Dump SDR into a file.""" - return IPMICMD["sdr_dump"] + " " + data_file - - @ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE) - def _node_manager_get_device_id(self): - """GET_DEVICE_ID command in Intel Node Manager - - Different from IPMI command GET_DEVICE_ID, it contains more information - of Intel Node Manager. - """ - return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"] - - @ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE) - def _node_manager_get_version(self): - """GET_NODE_MANAGER_VERSION command in Intel Node Manager - - Byte 4 of the response: - 01h - Intel NM 1.0 - 02h - Intel NM 1.5 - 03h - Intel NM 2.0 - 04h - Intel NM 2.5 - 05h - Intel NM 3.0 - """ - return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_power_all(self): - """Get the power consumption of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_inlet_temperature(self): - """Get the inlet temperature info of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_outlet_temperature(self): - """Get the outlet temperature info of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_airflow(self): - """Get the volumetric airflow of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow'] - - @ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE) - def _read_cups_utilization(self): - """Get the average CUPS utilization of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization'] - - @ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE) - def _read_cups_index(self): - """Get the CUPS Index of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index'] - - def read_power_all(self): - return self._read_power_all() if self.nm_version > 0 else {} - - def read_inlet_temperature(self): - return self._read_inlet_temperature() if self.nm_version > 0 else {} - - def read_outlet_temperature(self): - return self._read_outlet_temperature() if self.nm_version >= 5 else {} - - def read_airflow(self): - # only available after NM 3.0 - return self._read_airflow() if self.nm_version >= 5 else {} - - def read_cups_utilization(self): - # only available after NM 3.0 - return self._read_cups_utilization() if self.nm_version >= 5 else {} - - def read_cups_index(self): - # only available after NM 3.0 - return self._read_cups_index() if self.nm_version >= 5 else {} - - def init_node_manager(self): - if self._init_sensor_agent_process()['ret'] == ['01']: - return - # Run sensor initialization agent - for i in range(CONF.ipmi.node_manager_init_retry): - self._init_sensor_agent() - time.sleep(1) - if self._init_sensor_agent_process()['ret'] == ['01']: - return - - raise nmexcept.NodeManagerException(_('Node Manager init failed')) - - def discover_slave_channel(self): - """Discover target slave address and channel number.""" - file_path = tempfile.mkstemp()[1] - self._dump_sdr_file(data_file=file_path) - ret = self._parse_slave_and_channel(file_path) - slave_address = ''.join(['0x', ret[0]]) - channel = ''.join(['0x', ret[1]]) - # String of channel and slave_address - self.channel_slave = '-b ' + channel + ' -t ' + slave_address - - def node_manager_version(self): - """Intel Node Manager capability checking - - This function is used to detect if compute node support Intel Node - Manager(return version number) or not(return -1) and parse out the - slave address and channel number of node manager. - """ - self.manufacturer_id = self.get_device_id()['Manufacturer_ID'] - if MANUFACTURER_ID_INTEL != self.manufacturer_id: - # If the manufacturer is not Intel, just set False and return. - return 0 - - self.discover_slave_channel() - support = self._node_manager_get_device_id()['Implemented_firmware'] - # According to Intel Node Manager spec, return value of GET_DEVICE_ID, - # bits 3 to 0 shows if Intel NM implemented or not. - if int(support[0], 16) & 0xf == 0: - return 0 - - return _hex(self._node_manager_get_version()['NM_Version']) - - def check_node_manager(self): - """Intel Node Manager init and check - - This function is used to initialize Intel Node Manager and check the - capability without throwing exception. It's safe to call it on - non-NodeManager platform. - """ - try: - self.init_node_manager() - nm_version = self.node_manager_version() - except (nmexcept.NodeManagerException, nmexcept.IPMIException): - return 0 - return nm_version diff --git a/ceilometer/ipmi/platform/ipmi_sensor.py b/ceilometer/ipmi/platform/ipmi_sensor.py deleted file mode 100644 index e6d32f19..00000000 --- a/ceilometer/ipmi/platform/ipmi_sensor.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2014 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""IPMI sensor to collect various sensor data of compute node""" - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer.ipmi.platform import ipmitool - -IPMICMD = {"sdr_dump": "sdr dump", - "sdr_info": "sdr info", - "sensor_dump": "sdr -v", - "sensor_dump_temperature": "sdr -v type Temperature", - "sensor_dump_current": "sdr -v type Current", - "sensor_dump_fan": "sdr -v type Fan", - "sensor_dump_voltage": "sdr -v type Voltage"} - -# Requires translation of output into dict -DICT_TRANSLATE_TEMPLATE = {"translate": 1} - - -class IPMISensor(object): - """The python implementation of IPMI sensor using ipmitool - - The class implements the IPMI sensor to get various sensor data of - compute node. It uses ipmitool to execute the IPMI command and parse - the output into dict. - """ - _inited = False - _instance = None - - def __new__(cls, *args, **kwargs): - """Singleton to avoid duplicated initialization.""" - if not cls._instance: - cls._instance = super(IPMISensor, cls).__new__(cls, *args, - **kwargs) - return cls._instance - - def __init__(self): - if not (self._instance and self._inited): - self.ipmi_support = False - self._inited = True - - self.ipmi_support = self.check_ipmi() - - @ipmitool.execute_ipmi_cmd() - def _get_sdr_info(self): - """Get the SDR info.""" - return IPMICMD['sdr_info'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_all(self): - """Get the sensor data for type.""" - return IPMICMD['sensor_dump'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_temperature(self): - """Get the sensor data for Temperature.""" - return IPMICMD['sensor_dump_temperature'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_voltage(self): - """Get the sensor data for Voltage.""" - return IPMICMD['sensor_dump_voltage'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_current(self): - """Get the sensor data for Current.""" - return IPMICMD['sensor_dump_current'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_fan(self): - """Get the sensor data for Fan.""" - return IPMICMD['sensor_dump_fan'] - - def read_sensor_any(self, sensor_type=''): - """Get the sensor data for type.""" - if not self.ipmi_support: - return {} - - mapping = {'': self._read_sensor_all, - 'Temperature': self._read_sensor_temperature, - 'Fan': self._read_sensor_fan, - 'Voltage': self._read_sensor_voltage, - 'Current': self._read_sensor_current} - - try: - return mapping[sensor_type]() - except KeyError: - raise ipmiexcept.IPMIException(_('Wrong sensor type')) - - def check_ipmi(self): - """IPMI capability checking - - This function is used to detect if compute node is IPMI capable - platform. Just run a simple IPMI command to get SDR info for check. - """ - try: - self._get_sdr_info() - except ipmiexcept.IPMIException: - return False - return True diff --git a/ceilometer/ipmi/platform/ipmitool.py b/ceilometer/ipmi/platform/ipmitool.py deleted file mode 100644 index 7b049588..00000000 --- a/ceilometer/ipmi/platform/ipmitool.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utils to run ipmitool for data collection""" -from oslo_concurrency import processutils - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer import utils - - -# Following 2 functions are copied from ironic project to handle ipmitool's -# sensor data output. Need code clean and sharing in future. -# Check ironic/drivers/modules/ipmitool.py - - -def _get_sensor_type(sensor_data_dict): - # Have only three sensor type name IDs: 'Sensor Type (Analog)' - # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' - - for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', - 'Sensor Type (Threshold)'): - try: - return sensor_data_dict[key].split(' ', 1)[0] - except KeyError: - continue - - raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," - "unknown sensor type")) - - -def _process_sensor(sensor_data): - sensor_data_fields = sensor_data.split('\n') - sensor_data_dict = {} - for field in sensor_data_fields: - if not field: - continue - kv_value = field.split(':') - if len(kv_value) != 2: - continue - sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() - - return sensor_data_dict - - -def _translate_output(output): - """Translate the return value into JSON dict - - :param output: output of the execution of IPMI command(sensor reading) - """ - sensors_data_dict = {} - - sensors_data_array = output.split('\n\n') - for sensor_data in sensors_data_array: - sensor_data_dict = _process_sensor(sensor_data) - if not sensor_data_dict: - continue - - sensor_type = _get_sensor_type(sensor_data_dict) - - # ignore the sensors which have no current 'Sensor Reading' data - sensor_id = sensor_data_dict['Sensor ID'] - if 'Sensor Reading' in sensor_data_dict: - sensors_data_dict.setdefault(sensor_type, - {})[sensor_id] = sensor_data_dict - - # get nothing, no valid sensor data - if not sensors_data_dict: - raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," - "No data retrieved from given input")) - return sensors_data_dict - - -def _parse_output(output, template): - """Parse the return value of IPMI command into dict - - :param output: output of the execution of IPMI command - :param template: a dict that contains the expected items of - IPMI command and its length. - """ - ret = {} - index = 0 - if not (output and template): - return ret - - if "translate" in template: - ret = _translate_output(output) - else: - output_list = output.strip().replace('\n', '').split(' ') - if sum(template.values()) != len(output_list): - raise ipmiexcept.IPMIException(_("ipmitool output " - "length mismatch")) - for item in template.items(): - index_end = index + item[1] - update_value = output_list[index: index_end] - ret[item[0]] = update_value - index = index_end - return ret - - -def execute_ipmi_cmd(template=None): - """Decorator for the execution of IPMI command. - - It parses the output of IPMI command into dictionary. - """ - - template = template or [] - - def _execute_ipmi_cmd(f): - def _execute(self, **kwargs): - args = ['ipmitool'] - command = f(self, **kwargs) - args.extend(command.split(" ")) - try: - (out, __) = utils.execute(*args, run_as_root=True) - except processutils.ProcessExecutionError: - raise ipmiexcept.IPMIException(_("running ipmitool failure")) - return _parse_output(out, template) - return _execute - - return _execute_ipmi_cmd diff --git a/ceilometer/ipmi/pollsters/__init__.py b/ceilometer/ipmi/pollsters/__init__.py deleted file mode 100644 index 9ebbf230..00000000 --- a/ceilometer/ipmi/pollsters/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Pollsters for IPMI and Intel Node Manager -""" - -from oslo_config import cfg - -OPTS = [ - cfg.IntOpt('polling_retry', - default=3, - help='Tolerance of IPMI/NM polling failures ' - 'before disable this pollster. ' - 'Negative indicates retrying forever.') -] - -cfg.CONF.register_opts(OPTS, group='ipmi') diff --git a/ceilometer/ipmi/pollsters/node.py b/ceilometer/ipmi/pollsters/node.py deleted file mode 100644 index 8540cc4c..00000000 --- a/ceilometer/ipmi/pollsters/node.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -import six - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer import sample - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') -CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', - group='ipmi') - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(plugin_base.PollsterBase): - - def setup_environment(self): - super(_Base, self).setup_environment() - self.nodemanager = node_manager.NodeManager() - self.polling_failures = 0 - - # Do not load this extension if no NM support - if self.nodemanager.nm_version == 0: - raise plugin_base.ExtensionLoadError() - - @property - def default_discovery(self): - return 'local_node' - - def get_value(self, stats): - """Get value from statistics.""" - return node_manager._hex(stats["Current_value"]) - - @abc.abstractmethod - def read_data(self, cache): - """Return data sample for IPMI.""" - - def get_samples(self, manager, cache, resources): - # Only one resource for Node Manager pollster - try: - stats = self.read_data(cache) - except nmexcept.IPMIException: - self.polling_failures += 1 - LOG.warning(_('Polling %(name)s failed for %(cnt)s times!') - % ({'name': self.NAME, - 'cnt': self.polling_failures})) - if 0 <= CONF.ipmi.polling_retry < self.polling_failures: - LOG.warning(_('Pollster for %s is disabled!') % self.NAME) - raise plugin_base.PollsterPermanentError(resources) - else: - return - - self.polling_failures = 0 - - metadata = { - 'node': CONF.host - } - - if stats: - data = self.get_value(stats) - - yield sample.Sample( - name=self.NAME, - type=self.TYPE, - unit=self.UNIT, - volume=data, - user_id=None, - project_id=None, - resource_id=CONF.host, - resource_metadata=metadata) - - -class InletTemperaturePollster(_Base): - # Note(ildikov): The new meter name should be - # "hardware.ipmi.node.inlet_temperature". As currently there - # is no meter deprecation support in the code, we should use the - # old name in order to avoid confusion. - NAME = "hardware.ipmi.node.temperature" - TYPE = sample.TYPE_GAUGE - UNIT = "C" - - def read_data(self, cache): - return self.nodemanager.read_inlet_temperature() - - -class OutletTemperaturePollster(_Base): - NAME = "hardware.ipmi.node.outlet_temperature" - TYPE = sample.TYPE_GAUGE - UNIT = "C" - - def read_data(self, cache): - return self.nodemanager.read_outlet_temperature() - - -class PowerPollster(_Base): - NAME = "hardware.ipmi.node.power" - TYPE = sample.TYPE_GAUGE - UNIT = "W" - - def read_data(self, cache): - return self.nodemanager.read_power_all() - - -class AirflowPollster(_Base): - NAME = "hardware.ipmi.node.airflow" - TYPE = sample.TYPE_GAUGE - UNIT = "CFM" - - def read_data(self, cache): - return self.nodemanager.read_airflow() - - -class CUPSIndexPollster(_Base): - NAME = "hardware.ipmi.node.cups" - TYPE = sample.TYPE_GAUGE - UNIT = "CUPS" - - def read_data(self, cache): - return self.nodemanager.read_cups_index() - - def get_value(self, stats): - return node_manager._hex(stats["CUPS_Index"]) - - -class _CUPSUtilPollsterBase(_Base): - CACHE_KEY_CUPS = 'CUPS' - - def read_data(self, cache): - i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {}) - if not i_cache: - i_cache.update(self.nodemanager.read_cups_utilization()) - return i_cache - - -class CPUUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.cpu_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["CPU_Utilization"]) - - -class MemUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.mem_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["Mem_Utilization"]) - - -class IOUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.io_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["IO_Utilization"]) diff --git a/ceilometer/ipmi/pollsters/sensor.py b/ceilometer/ipmi/pollsters/sensor.py deleted file mode 100644 index 249913ce..00000000 --- a/ceilometer/ipmi/pollsters/sensor.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer.ipmi.notifications import ironic as parser -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer.ipmi.platform import ipmi_sensor -from ceilometer import sample - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') -CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', - group='ipmi') - -LOG = log.getLogger(__name__) - - -class InvalidSensorData(ValueError): - pass - - -class SensorPollster(plugin_base.PollsterBase): - METRIC = None - - def setup_environment(self): - super(SensorPollster, self).setup_environment() - self.ipmi = ipmi_sensor.IPMISensor() - self.polling_failures = 0 - - # Do not load this extension if no IPMI support - if not self.ipmi.ipmi_support: - raise plugin_base.ExtensionLoadError() - - @property - def default_discovery(self): - return 'local_node' - - @staticmethod - def _get_sensor_types(data, sensor_type): - try: - return (sensor_type_data for _, sensor_type_data - in data[sensor_type].items()) - except KeyError: - return [] - - def get_samples(self, manager, cache, resources): - # Only one resource for IPMI pollster - try: - stats = self.ipmi.read_sensor_any(self.METRIC) - except ipmiexcept.IPMIException: - self.polling_failures += 1 - LOG.warning(_( - 'Polling %(mtr)s sensor failed for %(cnt)s times!') - % ({'mtr': self.METRIC, - 'cnt': self.polling_failures})) - if 0 <= CONF.ipmi.polling_retry < self.polling_failures: - LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) - raise plugin_base.PollsterPermanentError(resources) - else: - return - - self.polling_failures = 0 - - sensor_type_data = self._get_sensor_types(stats, self.METRIC) - - for sensor_data in sensor_type_data: - # Continue if sensor_data is not parseable. - try: - sensor_reading = sensor_data['Sensor Reading'] - sensor_id = sensor_data['Sensor ID'] - except KeyError: - continue - - if not parser.validate_reading(sensor_reading): - continue - - try: - volume, unit = parser.parse_reading(sensor_reading) - except parser.InvalidSensorData: - continue - - resource_id = '%(host)s-%(sensor-id)s' % { - 'host': CONF.host, - 'sensor-id': parser.transform_id(sensor_id) - } - - metadata = { - 'node': CONF.host - } - - yield sample.Sample( - name='hardware.ipmi.%s' % self.METRIC.lower(), - type=sample.TYPE_GAUGE, - unit=unit, - volume=volume, - user_id=None, - project_id=None, - resource_id=resource_id, - resource_metadata=metadata) - - -class TemperatureSensorPollster(SensorPollster): - METRIC = 'Temperature' - - -class CurrentSensorPollster(SensorPollster): - METRIC = 'Current' - - -class FanSensorPollster(SensorPollster): - METRIC = 'Fan' - - -class VoltageSensorPollster(SensorPollster): - METRIC = 'Voltage' diff --git a/ceilometer/keystone_client.py b/ceilometer/keystone_client.py deleted file mode 100644 index 7731176e..00000000 --- a/ceilometer/keystone_client.py +++ /dev/null @@ -1,78 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from keystoneauth1 import loading as ka_loading -from keystoneclient.v3 import client as ks_client_v3 -from oslo_config import cfg -from oslo_log import log - -LOG = log.getLogger(__name__) - -CFG_GROUP = "service_credentials" - - -def get_session(requests_session=None): - """Get a ceilometer service credentials auth session.""" - auth_plugin = ka_loading.load_auth_from_conf_options(cfg.CONF, CFG_GROUP) - session = ka_loading.load_session_from_conf_options( - cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session - ) - return session - - -def get_client(trust_id=None, requests_session=None): - """Return a client for keystone v3 endpoint, optionally using a trust.""" - session = get_session(requests_session=requests_session) - return ks_client_v3.Client(session=session, trust_id=trust_id) - - -def get_service_catalog(client): - return client.session.auth.get_access(client.session).service_catalog - - -def get_auth_token(client): - return client.session.auth.get_access(client.session).auth_token - - -CLI_OPTS = [ - cfg.StrOpt('region-name', - deprecated_group="DEFAULT", - deprecated_name="os-region-name", - default=os.environ.get('OS_REGION_NAME'), - help='Region name to use for OpenStack service endpoints.'), - cfg.StrOpt('interface', - default=os.environ.get( - 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', - 'public')), - deprecated_name="os-endpoint-type", - choices=('public', 'internal', 'admin', 'auth', 'publicURL', - 'internalURL', 'adminURL'), - help='Type of endpoint in Identity service catalog to use for ' - 'communication with OpenStack services.'), -] - -cfg.CONF.register_cli_opts(CLI_OPTS, group=CFG_GROUP) - - -def register_keystoneauth_opts(conf): - ka_loading.register_auth_conf_options(conf, CFG_GROUP) - ka_loading.register_session_conf_options( - conf, CFG_GROUP, - deprecated_opts={'cacert': [ - cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), - cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] - }) diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index 09ccf25e..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,138 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Monika Wolf , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-03 03:36+0000\n" -"Last-Translator: Monika Wolf \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "Inspector %(name)s kann nicht geladen werden: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "" -"Die Verwendung des residenten Speichers für %(id)s konnte nicht abgerufen " -"werden: %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "" -"Dispatcher konnte %s nicht verarbeiten. Erneut in Warteschlange stellen." - -msgid "Error connecting to coordination backend." -msgstr "Fehler beim Herstellen einer Verbindung zum Koordinierungs-Back-End." - -msgid "Error getting group membership info from coordination backend." -msgstr "" -"Fehler beim Abrufen von Mitgliedschaftsinformationen vom Koordinierungs-Back-" -"End." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "" -"Fehler beim Beitreten zur Partitionierungsgruppe %s. Operation wird " -"wiederholt." - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "Fehler beim Verarbeiten des Ereignisses und es wird gelöscht: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "" -"Fehler beim Senden eines Überwachungssignals an das Koordinierungs-Back-End." - -msgid "Fail to process a notification" -msgstr "Eine Benachrichtigung konnte nicht verarbeitet werden." - -msgid "Fail to process notification" -msgstr "Benachrichtigung konnte nicht verarbeitet werden." - -msgid "Failed to connect to Gnocchi." -msgstr "Fehler beim Herstellen einer Verbindung zu Gnocchi." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "Fehler beim Herstellen einer Verbindung zum Kafka-Service: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "" -"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " -"Später erneut versuchen: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "" -"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " -"Später erneut versuchen: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "Fehler beim Laden der Ressource aufgrund des folgenden Fehlers %s" - -#, python-format -msgid "Failed to record event: %s" -msgstr "Das Ereignis konnte nicht aufgezeichnet werden: %s" - -#, python-format -msgid "Invalid type %s specified" -msgstr "Ungültigen Typ %s angegeben" - -#, python-format -msgid "Missing field %s" -msgstr "Fehlendes Feld %s" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "" -"Das übergebene Ressourcenwörterverzeichnis muss die Schlüssel für " -"resource_id und resource_url enthalten." - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "Erforderliches Feld %(field)s muss %(type)s sein." - -#, python-format -msgid "Required field %s not specified" -msgstr "Erforderliches Feld %s nicht angegeben." - -#, python-format -msgid "Required fields %s not specified" -msgstr "Erforderliche Felder %s nicht angegeben." - -#, python-format -msgid "Skip invalid resource %s" -msgstr "Ungültige Ressource %s überspringen" - -#, python-format -msgid "Skipping %(name)s, keystone issue: %(exc)s" -msgstr "%(name)s wird übersprungen, Keystone-Problem: %(exc)s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "Statuscode: %{code}s. Fehler beim Versenden des Ereignisses: %{event}s" - -#, python-format -msgid "Unable to load changed event pipeline: %s" -msgstr "Die geänderte Ereignispipeline konnte nicht geladen werden: %s" - -#, python-format -msgid "Unable to load changed pipeline: %s" -msgstr "Die geänderte Pipeline konnte nicht geladen werden: %s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "Nicht erkannter Typwert %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "Inspector-Aufruf fehlgeschlagen für %(ident)s Host %(host)s: %(err)s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index e40dedff..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,145 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Frank Kloeker , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-06 06:20+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "%d Ereignisse aus Datenbank entfernt" - -#, python-format -msgid "%d samples removed from database" -msgstr "%d Beispiele aus Datenbank entfernt" - -msgid "Configuration:" -msgstr "Konfiguration:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "Verbindung mit %(db)s auf %(nodelist)s wird hergestellt" - -msgid "Coordination backend started successfully." -msgstr "Das Koordinierungs-Back-End wurde erfolgreich gestartet." - -#, python-format -msgid "Definitions: %s" -msgstr "Definitionen: %s" - -msgid "Detected change in pipeline configuration." -msgstr "Es wurde eine Änderung in der Pipelinekonfiguration festgestellt." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "Löschen von Ereignisdaten mit TTL %d" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "Löschen von Messdaten mit TTL %d" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "Doppeltes Ereignis erkannt. Wird übersprungen: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "Abgelaufene Daten für residente Ressource und für Messdefinition" - -#, python-format -msgid "Index %s will be recreate." -msgstr "Index %s wird erneut erstellt. " - -#, python-format -msgid "Joined partitioning group %s" -msgstr "Partitionierungsgruppe %s beigetreten." - -#, python-format -msgid "Left partitioning group %s" -msgstr "Partitionierungsgruppe %s verlassen." - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "" -"Es wurde kein Grenzwert angegeben. Der Ergebnissatz wird auf %(limit)d " -"beschränkt." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "" -"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankereignisdaten ist " -"deaktiviert." - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "" -"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankstichprobendaten " -"ist deaktiviert." - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"Pipeline %(pipeline)s: Konfiguration von Transformerinstanz %(name)s mit " -"Parameter %(param)s" - -#, python-format -msgid "Pipeline config: %s" -msgstr "Pipelinekonfiguration: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "Die Pipelinekonfigurationsdatei wurde aktualisiert." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "Abfrage von Pollster %(poll)s im Kontext von %(src)s" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "Veröffentlichungsrichtlinie auf %s gesetzt" - -msgid "Reconfiguring polling tasks." -msgstr "Polling-Tasks werden neu konfiguriert." - -msgid "Reloading notification agent and listeners." -msgstr "Benachrichtigungsagent und Listener werden erneut geladen." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "" -"Pollster %(name)s überspringen, keine %(p_context)sressourcen in diesem " -"Zyklus gefunden." - -#, python-format -msgid "Starting server in PID %s" -msgstr "Starten von Server in PID %s" - -#, python-format -msgid "Swift endpoint not found: %s" -msgstr "Swift-Endpunkt konnte nicht gefunden werden: %s" - -msgid "detected decoupled pipeline config format" -msgstr "entkoppeltes Pipeline-Konfigurationsformat erkannt" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "" -"Messung von Daten %(counter_name)s für %(resource_id)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "" -"Bereitstellung auf 0.0.0.0:%(sport)s, Ansicht unter http://127.0.0.1:" -"%(vport)s" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "Bereitstellung auf http://%(host)s:%(port)s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po deleted file mode 100644 index 5b444022..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po +++ /dev/null @@ -1,125 +0,0 @@ -# Monika Wolf , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-04 10:08+0000\n" -"Last-Translator: Monika Wolf \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "" -"Cannot extract tasks because agent failed to join group properly. Rejoining " -"group." -msgstr "" -"Extrahieren der Tasks nicht möglich, da der Agent nicht ordnungsgemäß in die " -"Gruppe eingebunden werden konnte. Operation zum Wiedereinbinden in die " -"Gruppe wird durchgeführt." - -#, python-format -msgid "" -"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " -"%(exc)s" -msgstr "" -"Die %(pollster)s-Daten für %(instance_id)s können nicht untersucht werden. " -"Behebbare Ursache: %(exc)s" - -#, python-format -msgid "Dropping out of time order sample: %s" -msgstr "" -"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" - -#, python-format -msgid "Dropping sample with no predecessor: %s" -msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" - -#, python-format -msgid "Failed to load any dispatchers for %s" -msgstr "Es konnten keine Dispatcher für %s geladen werden." - -#, python-format -msgid "Ignore unrecognized field %s" -msgstr "Nicht erkanntes Feld %s ignorieren" - -#, python-format -msgid "Invalid status, skipping IP address %s" -msgstr "Ungültiger Status. IP-Adresse %s wird übersprungen." - -msgid "Negative delta detected, dropping value" -msgstr "Negatives Delta erkannt. Wert wird verworfen." - -#, python-format -msgid "No endpoints found for service %s" -msgstr "Es wurden keine Endpunkte für den Service %s gefunden." - -msgid "" -"Non-metric meters may be collected. It is highly advisable to disable these " -"meters using ceilometer.conf or the pipeline.yaml" -msgstr "" -"Es werden möglicherweise nicht metrische Daten erfasst. Es wird dringend " -"empfohlen, diese Zähler über die Datei ceilometer.conf oder pipeline.yaml zu " -"inaktivieren." - -#, python-format -msgid "" -"Skipping %(name)s, %(service_type)s service is not registered in keystone" -msgstr "" -"%(name)s wird übersprungen. Der Service %(service_type)s ist nicht in " -"Keystone registriert." - -#, python-format -msgid "Skipping duplicate meter definition %s" -msgstr "Doppelte Messdefinition %s wird übersprungen." - -msgid "" -"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " -"aodh endpoint." -msgstr "" -"Die ceilometer-api wurde mit aktiviertem aodh gestartet. Alarm-URLs werden " -"an den aodh-Endpunkt umgeleitet. " - -msgid "" -"ceilometer-api started with gnocchi enabled. The resources/meters/samples " -"URLs are disabled." -msgstr "" -"Die ceilometer-api wurde mit aktiviertem Gnocchi gestartet. Die URLs für " -"resources/meters/samples sind inaktiviert." - -#, python-format -msgid "event signature invalid, discarding event: %s" -msgstr "Ereignissignatur ungültig. Ereignis wird verworfen: %s" - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " -"volume (volume: None), the sample will be dropped" -msgstr "" -"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " -"enthält keinen Datenträger (volume: None). Die Stichprobe wird gelöscht." - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " -"volume which is not a number (volume: %(counter_volume)s), the sample will " -"be dropped" -msgstr "" -"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " -"enthält einen Datenträger ohne Zahl (volume: %(counter_volume)s). Die " -"Stichprobe wird gelöscht." - -msgid "" -"pecan_debug cannot be enabled, if workers is > 1, the value is overrided " -"with False" -msgstr "" -"pecan_debug kann nicht aktiviert werden, wenn Worker > 1 ist. Der Wert wird " -"mit False überschrieben." - -#, python-format -msgid "unable to configure oslo_cache: %s" -msgstr "Konfigurieren von oslo_cache nicht möglich: %s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer.po deleted file mode 100644 index aad2eaf4..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,522 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Carsten Duch , 2014 -# Christian Berendt , 2014 -# Ettore Atalan , 2014 -# Andreas Jaeger , 2016. #zanata -# Frank Kloeker , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-06 06:19+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: de\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: German\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s nicht gefunden" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' " -"verwenden" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Tabelle %(table_name)s kann nicht erstellt werden, da sie bereits vorhanden " -"ist. Fehler wird ignoriert" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Fortfahren nach Fehler von %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "" -"Es konnte keine Verbindung zum untergeordneten Host hergestellt werden: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Abruf von CPU-Auslastung nicht möglich für %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Abruf von Speicherbelegung nicht möglich für %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "CPU-Auslastung für VM %s konnte nicht abgerufen werden" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "IP-Adresse von Instanz %s konnte nicht abgerufen werden" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Fehler von libvirt während Suche nach Instanz : " -"[Fehlercode %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Fehler bei Auswertung der HTTP-Antwort %s" - -msgid "Error stopping pollster." -msgstr "Fehler beim Stoppen des Pollster." - -msgid "Event" -msgstr "Ereignis" - -msgid "Expression evaluated to a NaN value!" -msgstr "Ausdruck ergab einen NaN-Wert!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Fehler beim Importieren der Erweiterung für %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Fehler beim Überprüfen von Daten der Instanz , " -"Domänenstatus ist ABGESCHALTET." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Fehler beim Überprüfen der Speicherbelegung von %(instance_uuid)s, " -"Informationen können nicht von libvirt abgerufen werden: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Fehler beim Überprüfen der Speicherbelegung von Instanz , Informationen können nicht von libvirt abgerufen werden." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Es konnten keine Benachrichtigungshandler für %s geladen werden" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "" -"%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " -"einreihen" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filterausdruck nicht gültig: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Instanz %(name)s (%(instance_id)s) wird ignoriert: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Instanz %(name)s wird ignoriert: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Loadbalancer %(loadbalancer_id)s wird ignoriert." - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Pool %(pool_id)s wird ignoriert." - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Ungültige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " -"Spalte: %(column)s." - -#, python-format -msgid "Invalid aggregation function: %s" -msgstr "Ungültige Aggreation Funktion: %s" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Ungültiger Zeitraum %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Ungültiger Traittyp '%(type)s' für Trait %(trait)s" - -msgid "Limit must be positive" -msgstr "Grenzwert muss positiv sein" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Mehr als ein Ereignis mit der ID %s vom Speichertreiber zurückgegeben" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Mehrere VMs %s in XenServer gefunden" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"Angabe von connection_url und connection_password für die Verwendung " -"erforderlich" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Kein Plug-in mit dem Namen %(plugin)s verfügbar für %(name)s." - -msgid "Node Manager init failed" -msgstr "Initialisierung von Knoten-Manager fehlgeschlagen" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Operator %(operator)s wird nicht unterstützt. Für das Feld %(field)s ist " -"nur der Gleichheitsoperator verfügbar." - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Operator %(operator)s wird nicht unterstützt. Unterstützte Operatoren: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' für %(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "Zeitraum muss positiv sein." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s nach Fehler von Publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Fortsetzen nach Fehler von Publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"Pipeline %(pipeline)s: Fehler bei Flushoperation für Transformer %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Beendigung nach Fehler von Transformer %(trans)s für " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in angegeben, aber kein Plug-in-Name für %s angegeben." - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Pollster für %s ist inaktiviert!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Verhindern Sie, dass Pollster %(name)s Quelle %(source)s weiterhin abfragt!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Maximale Länge von local_queue für Publisher ist überschritten, die %d " -"ältesten Beispiele werden gelöscht" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " -"setzen" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW-AdminOps-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"Anforderung konnte keine Verbindung mit OpenDaylight über NorthBound REST-" -"API herstellen" - -#, python-format -msgid "Required field %s not specified" -msgstr "Erforderliches Feld %s nicht angegeben" - -msgid "Resource" -msgstr "Resource" - -msgid "Sample" -msgstr "Beispiel" - -msgid "Samples should be included in request body" -msgstr "Beispiele sollten in Anforderungshauptteil enthalten sein" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Laden der Ausnahme für %s überspringen" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Zeichenfolge %s ist kein gültiger Wert für 'isotime'" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Die YAML-Datei mit der Definition der Zuordnung zwischen Beispielen und " -"gnocchi-Ressourcen/Metriken" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten " -"Datentypen lautet: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Das Feld 'fields' ist erforderlich für %s" - -msgid "The path for the file publisher is required" -msgstr "Der Pfad für den Datei-Publisher ist erforderlich" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UPD: Von %s gesendete Daten konnten nicht dekodiert werden" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Messgröße kann nicht gespeichert werden" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "" -"Es kann keine Verbindung zum Datenbankserver hergestellt werden: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " -"werden." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Ressourcen können nicht gefunden werden: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Auswertung nicht möglich für Ausdruck %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Publisher %s kann nicht geladen werden" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Hypervisorinspector %s kann nicht geladen werden" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Es kann keine erneute Verbindung zur primären mongodb nach %(retries)d " -"Versuchen hergestellt werden. Abbruch." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Es kann keine erneute Verbindung zur primären mongodb hergestellt werden: " -"%(errmsg)s. Erneuter Versuch in %(retry_interval)d Sekunden." - -msgid "Unable to send sample over UDP" -msgstr "Beispiel kann nicht über UDP gesendet werden" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " -"Datentyp %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Unbekannte Erkennungserweiterung: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Unbekannter Metadatentyp. Schlüssel (%s) wird nicht abfragbar sein." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Loadbalancer %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Firewall %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Listener %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Mitglied %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Pool %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für VIP %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für VPN %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s in VMware vSphere nicht gefunden" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s in XenServer nicht gefunden" - -msgid "Wrong sensor type" -msgstr "Falscher Sensortyp" - -msgid "XenAPI not installed" -msgstr "XenAPI nicht installiert" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Alarm-URLs sind nicht verfügbar, wenn Aodh inaktiviert oder nicht verfügbar " -"ist." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "Abruf von CPU-Zeit nicht möglich für %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"Wenn Gnocci aktiviert ist, kann die Option 'direct' nicht den Wert 'true' " -"haben. " - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "" -"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" - -msgid "ipmitool output length mismatch" -msgstr "Abweichung bei ipmitool-Ausgabelänge" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes und backup_count sollten Zahlen sein." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " -"Eingabe abgerufen" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" - -msgid "running ipmitool failure" -msgstr "Fehler beim Ausführen von ipmitool" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index fa38d329..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,132 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Eugènia Torrella , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 11:52+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language-Team: Spanish\n" -"Language: es\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "No se ha podido cargar el inspector %(name)s: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "No se ha podido obtener el uso de memoria residente para %(id)s: %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "El asignador no ha podido manejar el %s, vuelva a ponerlo en la cola." - -msgid "Error connecting to coordination backend." -msgstr "Error de conexión con el servidor coordinador." - -msgid "Error getting group membership info from coordination backend." -msgstr "" -"Error al obtener información de pertenencia a grupos del servidor " -"coordinador." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "Error al unirse al grupo de partición %s, se está reintentando" - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "Se ha producido un error al procesar el suceso y se descartará: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "Error al enviar una señal de latido al servidor coordinador." - -msgid "Fail to process a notification" -msgstr "Error al procesar una notificación" - -msgid "Fail to process notification" -msgstr "No se ha podido procesar la notificación" - -msgid "Failed to connect to Gnocchi." -msgstr "No se ha podido conectar con Gnocchi." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "No se ha podido conectar con el servicio Kafka: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "" -"No se ha podido establecer conexión con la base de datos con el propósito " -"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "" -"No se ha podido establecer conexión con la base de datos con el propósito " -"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "No se ha podido cargar el recurso debido a un error: %s" - -#, python-format -msgid "Failed to record event: %s" -msgstr "No se ha podido registrar el suceso: %s" - -msgid "Failed to retry to send sample data with max_retry times" -msgstr "" -"No se ha podido volver a intentar enviar datos de ejemplo max_retry veces" - -msgid "" -"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " -"not part of group and cannot take tasks" -msgstr "" -"ID de grupo: %{group_id}s, Miembros: %{members}s, Yo: %{me}s: El agente " -"actual no forma parte del grupo y no puede coger tareas" - -#, python-format -msgid "Invalid type %s specified" -msgstr "Se ha especificado un tipo no válido: %s" - -#, python-format -msgid "Missing field %s" -msgstr "Falta el campo %s" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "" -"El dicionario de recursos que se pase debe contener las claves resource_id y " -"resource_url" - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "El campo obligatorio %(field)s s debería ser un %(type)s" - -#, python-format -msgid "Required field %s not specified" -msgstr "No se ha especificado el campo obligatorio %s" - -#, python-format -msgid "Required fields %s not specified" -msgstr "No se han especificado los campos obligatorios %s" - -#, python-format -msgid "Skip invalid resource %s" -msgstr "Omitir el recurso no válido %s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "" -"Código de estado: %{code}s. No se ha podido asignar el suceso: %{event}s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "Valor de tipo no reconocido %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "Error en la llamada al inspector del host %(ident)s %(host)s: %(err)s" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index 5a8e6a4d..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,139 +0,0 @@ -# Eugènia Torrella , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev57\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-18 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 02:45+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language-Team: Spanish\n" -"Language: es\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "Se han eliminado %d sucesos de la base de datos" - -#, python-format -msgid "%d samples removed from database" -msgstr "Se han eliminado %d ejemplos de la base de datos" - -msgid "Configuration:" -msgstr "Configuración:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "Se está estableciendo conexión con %(db)s en %(nodelist)s" - -msgid "Coordination backend started successfully." -msgstr "El servidor coordinador se ha iniciado satisfactoriamente." - -#, python-format -msgid "Definitions: %s" -msgstr "Definiciones: %s" - -msgid "Detected change in pipeline configuration." -msgstr "Se ha detectado un cambio en la configuración de la interconexión." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "Descartando datos de sucesos con TTL %d" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "Descartando datos de calibración con TTL %d" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "Se ha detectado un suceso duplicado, se omitirá: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "El recurso residual y los datos de definición del medidor han caducado" - -#, python-format -msgid "Index %s will be recreate." -msgstr "Se volverá a crear el índice %s." - -#, python-format -msgid "Joined partitioning group %s" -msgstr "Se ha unido al grupo de partición %s" - -#, python-format -msgid "Left partitioning group %s" -msgstr "Ha dejado el grupo de partición %s" - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "" -"No se ha proporcionado ningún valor límite, el conjunto de resultados estará " -"limitado a %(limit)d." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "" -"No hay nada que limpiar, el tiempo de vida de sucesos de base de datos está " -"inhabilitado" - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "" -"No hay nada que limpiar, el tiempo de vida de medición de base de datos está " -"inhabilitado" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"Interconexión %(pipeline)s: Configure la instancia de transformador %(name)s " -"con el parámetro %(param)s" - -#, python-format -msgid "Pipeline config: %s" -msgstr "Configuración de interconexión: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "Se ha actualizado el archivo de configuración de la interconexión." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "Sondeando pollster %(poll)s en el contexto de %(src)s" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "Política de publicación establecida en %s" - -msgid "Reconfiguring polling tasks." -msgstr "Reconfigurando las tareas de sondeo." - -msgid "Reloading notification agent and listeners." -msgstr "Recargando la notificación, el agente y los escuchas." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "" -"Omitir pollster %(name)s, ningún recurso de %(p_context)s ha encontrado " -"este ciclo" - -#, python-format -msgid "Starting server in PID %s" -msgstr "Iniciando servidor en PID %s" - -msgid "detected decoupled pipeline config format" -msgstr "" -"se ha detectado un formato de configuración de interconexión desacoplado" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "" -"datos de medición %(counter_name)s para %(resource_id)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "sirviendo en 0.0.0.0:%(sport)s, vista en http://127.0.0.1:%(vport)s" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "sirviendo en http://%(host)s:%(port)s" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer.po deleted file mode 100644 index a071f451..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,511 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Rafael Rivero , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-24 11:03+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s No encontrado" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"El transformador aritmético debe utilizar al menos un medidor en la " -"expresión '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"No se puede crear la tabla %(table_name)s, ya existe. Se ignorará el error." - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continuar después de error desde %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "No se ha podido conectar con el host esclavo: %s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "No se puede conectar a XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "No se ha podido obtener CPU Util para %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "No se ha podido obtener el uso de memoria para %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "No se puede obtener la utilización de CPU de VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "No se ha podido obtener la dirección IP de la instancia %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Descartando la notificación %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Error de libvirt al buscar la instancia : [Código " -"de error %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Error analizándo respuesta HTTP: %s." - -msgid "Error stopping pollster." -msgstr "Error al detener el pollster." - -msgid "Event" -msgstr "Suceso" - -msgid "Expression evaluated to a NaN value!" -msgstr "La expresión se ha evaluado en un valor NaN." - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "No se ha podido importar la extensión para %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"No se ha podido analizar el uso de memoria de %(instance_uuid)s, no se puede " -"obtener información de libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"No se ha podido analizar el uso de memoria de la instancia , no se puede obtener información de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "No se ha podido cargar ningún manejador de notificación para %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "No se ha podido analizar el valor de indicación de fecha y hora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "No se han podido publicar los puntos de datos %d, descartándolos" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "No se han podido publicar los puntos de datos %d, póngalos en cola" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expresión de filtro no válida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Ignorando la instancia %(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Ignorando la instancia %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Se ignorará el equilibrador de carga %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Se ignorará la agrupación %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintaxis de YAML no válida en archivo de definiciones %(file)s en la línea: " -"%(line)s, columna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Periodo no válido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" - -msgid "Limit must be positive" -msgstr "El límite debe ser positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Se ha devuelto más de un suceso con el %s del controlador de almacenamiento" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Se han encontrado varias VM %s en XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"Debe especificar el url_conexión y la contraseña_conexión para utilizar" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "No hay ningún plug-in denominado %(plugin)s disponible para %(name)s" - -msgid "Node Manager init failed" -msgstr "El inicio de Gestor de nodos ha fallado" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "No está autorizado para acceder a %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "La API OpenDaylitght ha devuelto %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"El operador %(operator)s no se admite. Solo hay disponible el operador de " -"igualdad para el campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"El operador %(operator)s no está admitido. Los operadores admitidos son: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expresión de ordenar por no válida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Error de análisis en especificación de JSONPath '%(jsonpath)s' para " -"%(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "El período debe ser positivo." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"Interconexión %(pipeline)s: %(status)s tras el error de la aplicación de " -"publicación %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "" -"Interconexión %(pipeline)s: Continúe tras el error de la aplicación de " -"publicación %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Interconexión %(pipeline)s: Error al vaciar el transformador %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Interconexión %(pipeline)s: Salga tras error del transformador %(trans)s " -"para %(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "" -"Se ha especificado un plug-in, pero no se ha proporcionado ningún nombre de " -"plug-in para %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "¡El Pollster para %s está inhabilitado!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "¡Impedir pollster %(name)s para el origen de sondeo %(source)s ahora!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Se supera la longitud máxima de aplicación de publicación local_queue, " -"descartando los ejemplos más antiguos %d" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"No se conoce la política de publicación (%s) forzar para tomar el valor " -"predeterminado" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"La solicitud no ha podido conectar con OpenDaylight con la API REST " -"NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo necesario %s no especificado" - -msgid "Resource" -msgstr "Recurso" - -msgid "Sample" -msgstr "Muestra" - -msgid "Samples should be included in request body" -msgstr "Los ejemplos se deben incluir en el cuerpo de la solicitud" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Omitir la extensión de carga para %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La serie %s no es una hora iso válida" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"El archivo Yaml que define la correlación entre los ejemplos y recursos/" -"métricas gnocchi" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"El tipo de datos %(type)s no es compatible. La lista de tipo de datos " -"admitido es: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "El campo 'campos' es obligatorio para %s" - -msgid "The path for the file publisher is required" -msgstr "" -"La vía de acceso para la aplicación de publicación de archivos es necesaria" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: no se pueden decodificar los datos enviados por %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: no se puede almacenar el medidor" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "No se ha podido conectar con el servidor de base de datos: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"No se ha podido convertir el valor %(value)s al tipo de datos esperado " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "No se pueden descubrir recursos: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "No se puede evaluar la expresión %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "No se puede cargar la aplicación de publicación %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "No se puede cargar el inspector de hipervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"No se ha podido volver a conectar con la mongodb primaria después de " -"%(retries)d intentos. Se va a abandonar." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"No se ha podido volver a conectar con la mongodb primaria: %(errmsg)s. Se " -"volverá a intentar en %(retry_interval)d segundos." - -msgid "Unable to send sample over UDP" -msgstr "No se ha podido enviar una muestra sobre UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Excepción inesperada al convertir %(value)s al tipo de dato esperado " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Extensión de descubrimiento desconocida: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Tipo de metadatos desconocido. La clave (%s) no se podrá consultar." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga " -"%(id)s, se omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en la agrupación %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en vip %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s no se ha encontrado en VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "No se han encontrado VM %s en XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo de sensor incorrecto" - -msgid "XenAPI not installed" -msgstr "XenAPI no está instalado" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Error de YAML al leer el archivo de definiciones %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "no se ha podido obtener tiempo de CPU para %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"la opción directo no puede estar definida como true cuando Gnocchi esté " -"habilitado." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "saliendo del ejemplo de orden de tiempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "eliminando la muestra sin predecesor: %s" - -msgid "ipmitool output length mismatch" -msgstr "la longitud de salida de ipmitool no coincide" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes y backup_count deben ser números." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningún " -"dato de la entrada" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido" - -msgid "running ipmitool failure" -msgstr "fallo de ejecución de ipmitool" diff --git a/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po b/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po deleted file mode 100644 index deff1930..00000000 --- a/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,516 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Corinne Verheyde , 2013 -# CHABERT Loic , 2013 -# Christophe kryskool , 2013 -# Corinne Verheyde , 2013-2014 -# EVEILLARD , 2013-2014 -# Francesco Vollero , 2015 -# Jonathan Dupart , 2014 -# CHABERT Loic , 2013 -# Maxime COQUEREL , 2014 -# Nick Barcet , 2013 -# Nick Barcet , 2013 -# Andrew Melim , 2014 -# Patrice LACHANCE , 2013 -# Patrice LACHANCE , 2013 -# Rémi Le Trocquer , 2014 -# EVEILLARD , 2013 -# Corinne Verheyde , 2013 -# Corinne Verheyde , 2013 -# Andreas Jaeger , 2016. #zanata -# Angelique Pillal , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-29 08:24+0000\n" -"Last-Translator: Angelique Pillal \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s n'a pas été trouvé" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Le transformateur arithmétique doit utiliser au moins un mètre dans " -"l'expression '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Impossible de créer la table %(table_name)s car elle existe déjà. Erreur " -"ignorée" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continue après l'erreur %(name)s: %(error)s " - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Impossible de se connecter à l'hôte slave: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Connexion impossible XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Ne peut pas recevoir l'utilisation CPU pour %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "" -"Impossible de récupérer l'utilisation de la mémoire pour %(id)s : %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Impossible d'obtenir l'utilisation CPU de la VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Impossible d'obtenir l'adresse IP de l'instance %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Erreur lors de l'analyse syntaxique de la réponse: %s" - -msgid "Error stopping pollster." -msgstr "Erreur lors de l'arrêt du sondeur." - -msgid "Event" -msgstr "Événement" - -msgid "Expression evaluated to a NaN value!" -msgstr "Expression évaluée avec une valeur not-a-number !" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Echec de l'importation de l'extension pour %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Echec de l'inspection des données de l'instance . " -"Le domaine est à l'état SHUTOFF (INTERRUPTION)." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Echec de l'inspection de l'utilisation de la mémoire de %(instance_uuid)s. " -"Impossible d'obtenir des informations de libvirt : %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Echec de l'inspection de l'utilisation de la mémoire de l'instance . Impossible d'obtenir des informations de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Échec du chargement de tous les gestionnaires de notification pour %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Echec de la publication des points de données %d. Suppression en cours" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "" -"Echec de la publication des points de données %d. Mettez-les en file " -"d'attente" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filtre de l'expression n'est pas valide: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "L'instance %(name)s est ignorée (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "instance %(name)s: %(error)s ignoré" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Loadbalancer %(loadbalancer_id)s ignoré" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Pool %(pool_id)s ignoré" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Syntaxe YAML non valide dans le fichier de définitions %(file)s à la ligne : " -"%(line)s, colonne : %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Période %(period)s non valide : %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" - -msgid "Limit must be positive" -msgstr "La limite doit être positive" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Plus d'un événement avec l'identificateur %s a été renvoyé à partir du " -"pilote de stockage" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Plusieurs machines virtuelles %s trouvées dans XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "Il faut indiquer connection_url et connection_password pour utiliser" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Aucun plugin nommé %(plugin)s n'est disponible pour %(name)s" - -msgid "Node Manager init failed" -msgstr "Echec de l'initialisation du gestionnaire de noeud" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorisé à accéder %(aspect)s %(id)s " - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "L'API OpenDaylight a renvoyé %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "L'API Opencontrail a renvoyé %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Opérateur %(operator)s non supporté. Seul l'opérateur égalité est disponible " -"pour le champ %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"L'opérateur %(operator)s n'est pas supporté. Les opérateurs supportés sont: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'expression de tri n'est pas valide : %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Erreur d'analyse dans la spécification JSONPath '%(jsonpath)s' pour " -"%(name)s : %(err)s" - -msgid "Period must be positive." -msgstr "La période doit être positive." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"Pipeline %(pipeline)s : statut %(status)s après erreur du diffuseur %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Reprise après une erreur de l'éditeur %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Pipeline %(pipeline)s: Erreur à la purge du transformateur %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Sortie après erreur du transformateur %(trans)s pour " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plugin spécifié, mais aucun nom de plugin n'est fourni pour %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "L'interrogation du capteur %(mtr)s a échoué %(cnt)s fois !" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Sondage de %(name)s %(cnt)s fois en échec!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Le pollster pour %s est désactivé !" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "Empêcher le pollster %(name)s d'interroger la source %(source)s !" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"La longueur maximale de local_queue du diffuseur est dépassée, suppression " -"des %d échantillons les plus anciens" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "La politique de publication est inconnue (%s) forcé le défaut" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "L'API AdminOps RGW a renvoyé %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"La demande n'a pas réussi à se connecter à OpenDaylight avec l'API REST " -"NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Champ requis %s non spécifiée" - -msgid "Resource" -msgstr "Ressource" - -msgid "Sample" -msgstr "Echantillon" - -msgid "Samples should be included in request body" -msgstr "Des exemples doivent être inclus dans le corps de demande" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Passer le chargement de l'extension pour %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La chaine de caractère %s n'est pas valide isotime" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Fichier Yaml qui définit le mappage entre les exemples et les ressources " -"gnocchi /les métriques" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Le type de données %(type)s n'est pas supporté. Les types de données " -"supportés sont: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Le champ 'fields' est requis pour %s" - -msgid "The path for the file publisher is required" -msgstr "Le chemin du éditeur de fichier est obligatoire " - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: Impossible de décoder les données envoyées par %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Impossible de stocker les mesures" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Impossible de se connecter au serveur de base de données : %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossible de convertir la valeur %(value)s vers le type de données attendu " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Impossible de découvrir les ressources: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Impossible d'évaluer l'expression %(expr)s : %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossible de charger l'éditeur %s " - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossible de télécharger l'inspecteur hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Impossible de se reconnecter au serveur mongodb principal après %(retries)d " -"tentatives. Abandon." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Impossible de se reconnecter au serveur mongodb principal : %(errmsg)s. " -"Nouvelle tentative dans %(retry_interval)d secondes." - -msgid "Unable to send sample over UDP" -msgstr "Impossible d'envoyer l'échantillon en UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exception inattendue lors de la conversion de %(value)s dans le type de " -"donnée attendue %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Découverte d'une extension inconnue: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Type de métadonnées inconnu, la clé (%s) n'est pas requêtable" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Statut %(stat)s inconnu reçu sur le Load Balancer %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le listener %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le membre %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le pool %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Etat %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, échantillon ignoré" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "La machine virtuelle %s est introuvable dans VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s non trouvé dans XenServer" - -msgid "Wrong sensor type" -msgstr "Type de détecteur incorrect" - -msgid "XenAPI not installed" -msgstr "XenAPI n'est pas installé" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Erreur YAML lors de la lecture du fichier de définitions %(file)s" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Les URL d'alarmes ne sont pas disponibles lorsque Aodh est désactivé ou non " -"disponible." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "impossible d'obtenir le temps UC pour %(id)s : %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "L'option directe ne peut pas être à vrai si Gnocchi est activé." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "suppression de l'exemple de classement dans le temps : %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "abandon de l'échantillon sans prédécesseur: %s" - -msgid "ipmitool output length mismatch" -msgstr "Non-concordance de longueur de la sortie ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes et backup_count doivent etre des chiffres." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"Echec de l'analyse des données du détecteur IPMI, aucune donnée extraite à " -"partir de l'entrée fournie" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"Echec de l'analyse des données du détecteur IPMI, type de détecteur inconnu" - -msgid "running ipmitool failure" -msgstr "Echec d'exécution d'ipmitool" diff --git a/ceilometer/locale/it/LC_MESSAGES/ceilometer.po b/ceilometer/locale/it/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 6dba5a8e..00000000 --- a/ceilometer/locale/it/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,505 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Stefano Maffulli , 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 02:04+0000\n" -"Last-Translator: Alessandra \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Italian\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s non trovato" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Il trasformatore aritmetico deve utilizzare almeno un contatore " -"nell'espressione '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Impossibile creare la tabella %(table_name)s la tabella già esiste. " -"Ignorare l'errore" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continua dopo errore da %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Impossibile connettersi all'host slave: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Impossibile connettersi a XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Impossibile ricevere CPU Util per %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Impossibile ricevere l'Uso della Memoria per %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Impossibile conoscere l'utilizzo CPU della VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Impossibile ottenere l'indirizzo IP dell'istanza %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Errore durante l'analisi della risposta HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Errore durante l'arresto del sondaggio. " - -msgid "Event" -msgstr "Evento" - -msgid "Expression evaluated to a NaN value!" -msgstr "Espressione valutata a un valore NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Impossibile importare l'estensione per %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Impossibile ispezionare i dati dell'istanza , " -"stato dominio SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Impossibile ispezionare l'utilizzo della memoria da parte di " -"%(instance_uuid)s, impossibile ottenere informazioni da libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Impossibile ispezionare l'utilizzo della memoria da parte dell'istanza , impossibile ottenere informazioni da libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Impossibile caricare eventuali gestori di notifica per %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Impossibile analizzare il valore data/ora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Impossibile pubblicare %d datapoint, eliminati" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Espressione del filtro non valida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "L'istanza %(name)s (%(instance_id)s) viene ignorata: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Si sta ignorando l'istanza %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Ignora loadbalancer %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Ignora pool %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " -"%(line)s, colonna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Periodo non valido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "" -"Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" - -msgid "Limit must be positive" -msgstr "Il limite deve essere un positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Più di un evento con id %s restituito dal driver di archiviazione" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Più VM %s trovate in XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"È necessario specificare connection_url e connection_password da utilizzare" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" - -msgid "Node Manager init failed" -msgstr "Inizializzazione gestore nodi non riuscita" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "L'API OpenDaylitght ha restituito %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Operatore %(operator)s non è supportato. Solo gli operatori di uguaglianza " -"sono disponibili per il campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Operatore %(operator)s non è supportato. Gli operatori supportati sono: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'espressione ordina per non è valida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " -"%(err)s" - -msgid "Period must be positive." -msgstr "Il periodo deve essere positivo" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s dopo errore da publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Continuare dopo errore da publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"Pipeline %(pipeline)s: errore durante lo scaricamento del trasformatore " -"%(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Uscita dopo errore del trasformatore %(trans)s per " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Pollster per %s disabilitato!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Impedire al pollster %(name)s di eseguire il polling dell'origine %(source)s." - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"La lunghezza local_queue massima del publisher è stata superata, " -"eliminazione di esempi %d meno recenti" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"La politica di pubblicazione è sconosciuta (%s), applicazione del valore " -"predefinito" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"Richiesta di collegamento a OpenDaylight con API NorthBound REST non riuscita" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo richiesto %s non specificato" - -msgid "Resource" -msgstr "Risorsa" - -msgid "Sample" -msgstr "Esempio" - -msgid "Samples should be included in request body" -msgstr "I campioni devono essere inclusi nel corpo della richiesta " - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Ignora caricamento dell'estensione per %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La stringa %s non è un orario standard (isotime) valido" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Il file Yaml che definisce l'associazione tra i campioni e le risorse " -"gnocchi/metriche" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " -"supportati è: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Il campo 'fields' è obbligatorio per %s" - -msgid "The path for the file publisher is required" -msgstr "Il percorso per il publisher di file è obbligatorio" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: impossibile decodificare i dati inviati da %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: impossibile memorizzare il contatore" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Impossibile connettersi al server di database: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossibile convertire il valore %(value)s nel tipo di dati previsto " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Impossibile rilevare le risorse: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossibile caricare il publisher %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossibile caricare il programma di controllo hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Impossibile riconnettersi al mongodb primario dopo %(retries)d tentativi. " -"L'operazione viene interrotta." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Impossibile connettersi al mongodb primario: %(errmsg)s. Prossimo tentativo " -"tra %(retry_interval)d secondi." - -msgid "Unable to send sample over UDP" -msgstr "Impossibile inviare l'esempio su UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Eccezione non prevista durante la conversione di %(value)s per il tipo di " -"dati previsto %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Estensione di rilevamento sconosciuta: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "" -"Tipo di metadati sconosciuto. La chiave (%s) non potrà essere sottoposta a " -"query." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, " -"ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s non trovata in VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s non trovata in XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo di sensore errato" - -msgid "XenAPI not installed" -msgstr "XenAPI non installato" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Errore YAML durante la lettura del file definizioni %(file)s" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"alarm URLs non è disponibile con Aodh perché disabilitato oppure non " -"disponibile " - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "impossibile ricevere l'ora CPU per %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "L'opzione direct non può essere true quando Gnocchi è abilitato." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "rilascio campione ordinamento fuori tempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "eliminazione in corso dell'esempio senza predecessore: %s" - -msgid "ipmitool output length mismatch" -msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes e backup_count devono essere numeri." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " -"dall'input fornito" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" - -msgid "running ipmitool failure" -msgstr "errore nell'esecuzione ipmitool" diff --git a/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 1c44ae93..00000000 --- a/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,506 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Tomoyuki KATO , 2013 -# Andreas Jaeger , 2016. #zanata -# 笹原 昌美 , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-16 11:33+0000\n" -"Last-Translator: 笹原 昌美 \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Japanese\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s が見つかりません" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"演算変換プログラムは、式 '%s' で少なくとも 1 つのメーターを使用する必要があり" -"ます" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"テーブル %(table_name)s は既に存在するため、作成できません。エラーを無視しま" -"す" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "%(name)s からのエラーの後で続行します: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "スレーブホストに接続できませんでした: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "XenAPI に接続できませんでした: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "%(id)s の CPU 使用率を取得できませんでした: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s のメモリー使用量を取得できませんでした: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "VM %s のCPU 使用率を取得できませんでした" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "インスタンス %s の IP アドレスを取得できませんでした" - -#, fuzzy, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "通知 %(type)s を除去しています (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"インスタンス の検索中に libvirt でエラーが発生しま" -"した: [エラーコード %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "HTTP 応答を解析している際にエラーが発生しました: %s" - -msgid "Error stopping pollster." -msgstr "pollster 停止エラー。" - -msgid "Event" -msgstr "イベント" - -msgid "Expression evaluated to a NaN value!" -msgstr "式が NaN 値に評価されました。" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "%(name)s の拡張機能のインポートに失敗しました: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"インスタンス のデータを検査できませんでした。ドメ" -"イン状態は SHUTOFF です。" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"%(instance_uuid)s のメモリー使用状況を検査できませんでした。libvirt から情報" -"を取得できません: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"インスタンス のメモリー使用状況を検査できませんで" -"した。libvirt から情報を取得できません。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "%s の通知ハンドラーをロードできませんでした" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "タイムスタンプ値 %s を解析できませんでした" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d データポイントの公開に失敗しました。これらは廃棄されます" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "%d データポイントの公開に失敗しました。これらをキューに入れてください" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "フィルター式が無効です: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "インスタンス %(name)s (%(instance_id)s) を無視しています: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "インスタンス %(name)s を無視しています: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "ロードバランサー %(loadbalancer_id)s を無視しています" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "プール %(pool_id)s を無視しています" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"%(line)s 行目の %(column)s 列で定義ファイル %(file)s の YAML 構文 が無効で" -"す。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "無効な期間 %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特性 %(trait)s の特性タイプ '%(type)s' が無効です" - -msgid "Limit must be positive" -msgstr "上限は正の値でなければなりません" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "ストレージドライバーから id %s のイベントが複数返されました" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "複数の VM %s が XenServer に見つかりました" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"connection_url と、使用する connection_password を指定する必要があります" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "%(name)s に使用できる %(plugin)s という名前のプラグインがありません" - -msgid "Node Manager init failed" -msgstr "ノードマネージャーの初期化に失敗しました" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s にアクセスする権限がありません" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylitght API から %(status)s %(reason)s が返されました" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API から %(status)s %(reason)s が返されました" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"演算子 %(operator)s はサポートされていません。フィールド %(field)s で使用でき" -"るのは等価演算子のみです。" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"演算子 %(operator)s はサポートされていません。サポートされている演算子は " -"%(supported)s です。" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "order-by 式が無効です: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"%(name)s に関する JSONPath の指定 '%(jsonpath)s' のエラーを解析します: " -"%(err)s" - -msgid "Period must be positive." -msgstr "期間は正の数でなければなりません。" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの発生後の " -"%(status)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "" -"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの後で続行しま" -"す" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"パイプライン %(pipeline)s: 変換プログラム %(trans)s をフラッシュするときにエ" -"ラーが発生しました" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"パイプライン %(pipeline)s: %(smp)s について変換プログラム %(trans)s からエ" -"ラーが発生した後に終了します" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "プラグインが指定されていますが、%s にプラグイン名が提供されていません" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "センサー %(mtr)s のポーリングが %(cnt)s 回失敗しました" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "ポーリング %(name)s が %(cnt)s 回失敗しました" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s の pollster が無効になっています" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"pollster %(name)s がこれ以上ソース %(source)s をポーリングしないようにしてく" -"ださい" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"パブリッシャー local_queue 最大長を超えました。古い方から %d 個のサンプルを除" -"去します" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "パブリッシュポリシーが不明です (%s)。強制的にデフォルトに設定されます" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API から %(status)s %(reason)s が返されました" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "NorthBound REST API を使用した OpenDaylight への接続要求が失敗しました" - -#, python-format -msgid "Required field %s not specified" -msgstr "必須フィールド %s が指定されていません" - -msgid "Resource" -msgstr "リソース" - -msgid "Sample" -msgstr "サンプル" - -msgid "Samples should be included in request body" -msgstr "サンプルは要求本文に含まれる必要があります" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "%s の拡張機能のロードをスキップします" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "文字列 %s は無効な isotime です" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"サンプルと gnocchi のリソース/メトリクス間のマッピングを定義する Yaml ファイ" -"ル" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"データ型 %(type)s はサポートされていません。サポートされているデータ型のリス" -"ト: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s にはフィールド 'fields' が必要です" - -msgid "The path for the file publisher is required" -msgstr "ファイルパブリッシャーのパスが必要です" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: %s から送信されたデータをデコードできません" - -msgid "UDP: Unable to store meter" -msgstr "UDP: メーターを保存できません" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "データベースサーバーに接続できません: %(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "値 %(value)s を、想定されるデータ型 %(type)s に変換できません。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "リソースを検出できません: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "式 %(expr)s を評価できません: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "パブリッシャー %s をロードできません" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "ハイパーバイザーインスペクターをロードできません: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"%(retries)d 回の再試行後、1 次 mongodb に再接続できません。中止します。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"プライマリー mongodb に再接続できません: %(errmsg)s。%(retry_interval)d 秒以" -"内に再試行します。" - -msgid "Unable to send sample over UDP" -msgstr "UDP 経由でサンプルを送信できません" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s を想定されるデータ型 %(type)s に変換する際に、想定しない例外が発生" -"しました。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "不明なディスカバリーエクステンション: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "不明なメタデータ種別です。キー (%s) は照会不可になります。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"ロードバランサー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス" -"キップします" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"ファイアウォール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス" -"キップします" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"リスナー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま" -"す" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"メンバー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま" -"す" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"プール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"仮想 IP %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"vpn %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VMware vSphere で VM %s が見つかりません" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s が XenServer に見つかりません" - -msgid "Wrong sensor type" -msgstr "センサー種別が正しくありません" - -msgid "XenAPI not installed" -msgstr "XenAPI がインストールされていません" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "定義ファイル %(file)s での読み取りの YAML エラー" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Aodh が無効化されるか使用不可の場合、URL が使用できないことを警告します。" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "%(id)s の CPU 時間を取得できませんでした: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"Gnocchi を有効化した場合は、direct オプションを True に設定することはできませ" -"ん。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "期限切れのオーダーサンプルを廃棄しています: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "サンプル (先行なし) を廃棄しています: %s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 出力の長さが一致しません" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes と backup_count は数値でなければなりません。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"IPMI センサーデータの解析に失敗しました。指定された入力からデータが取得されま" -"せんでした" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "IPMI センサーデータの解析に失敗しました。不明なセンサー種別です。" - -msgid "running ipmitool failure" -msgstr "ipmitool の実行に失敗しました" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index 0d0ad486..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,135 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:38+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "%(name)s 검사기를 로드할 수 없음: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s의 상주 메모리 사용을 가져올 수 없음 : %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "디스패처에서 %s을(를) 처리하지 못하여 다시 대기열에 둡니다." - -msgid "Error connecting to coordination backend." -msgstr "조정 백엔드를 연결하는 중에 오류가 발생했습니다." - -msgid "Error getting group membership info from coordination backend." -msgstr "조정 백엔드에서 그룹 멤버십 정보를 가져오는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "" -"파티션 지정 그룹 %s을(를) 결합하는 중에 오류가 발생하여, 다시 시도 중입니다." - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "이벤트 처리 중 오류가 발생하므로 삭제됨: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "하트비트를 조정 백엔드에서 보내는 중에 오류가 발생했습니다." - -msgid "Fail to process a notification" -msgstr "알림을 처리하는 데 실패" - -msgid "Fail to process notification" -msgstr "알림을 처리하는 데 실패" - -msgid "Failed to connect to Gnocchi." -msgstr "Gnocchi에 연결하지 못했습니다." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "Kafka 서비스에 연결하는 데 실패: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "%s 오류로 인해 자원을 로드하는 데 실패" - -#, python-format -msgid "Failed to record event: %s" -msgstr "이벤트를 기록하는 데 실패: %s" - -msgid "Failed to retry to send sample data with max_retry times" -msgstr "샘플 데이터를 max_retry 횟수만큼 보내는 데 실패" - -msgid "" -"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " -"not part of group and cannot take tasks" -msgstr "" -"그룹 ID: %{group_id}s, 멤버: %{members}s, 사용자: %{me}s: 현재 에이전트가 그" -"룹의 일부가 아니므로 작업을 수행할 수 없음" - -#, python-format -msgid "Invalid type %s specified" -msgstr "올바르지 않은 유형 %s이(가) 지정됨" - -#, python-format -msgid "Missing field %s" -msgstr "%s 필드 누락" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "전달된 자원 dict에 키 resource_id와 resource_url이 포함되어야 합니다." - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "필수 필드 %(field)s은(는) %(type)s이어야 함" - -#, python-format -msgid "Required field %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -#, python-format -msgid "Required fields %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -#, python-format -msgid "Skip invalid resource %s" -msgstr "올바르지 않은 자원 %s 건너뛰기" - -#, python-format -msgid "Skipping %(name)s, keystone issue: %(exc)s" -msgstr "%(name)s 건너뛰기, keystone 문제: %(exc)s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "상태 코드: %{code}s. 이벤트를 디스패치하는 데 실패: %{event}s" - -#, python-format -msgid "Unable to load changed event pipeline: %s" -msgstr "변경된 이벤트 파이프라인을 로드할 수 없음: %s" - -#, python-format -msgid "Unable to load changed pipeline: %s" -msgstr "변경된 파이프라인을 로드할 수 없음: %s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "인식되지 않은 유형 값 %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "%(ident)s 호스트 %(host)s의 검사기 호출에 실패: %(err)s" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index d3fe6a27..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,128 +0,0 @@ -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev57\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-18 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:39+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "데이터베이스에서 %d 이벤트가 제거됨" - -#, python-format -msgid "%d samples removed from database" -msgstr "데이터베이스에서 %d 샘플이 제거됨" - -msgid "Configuration:" -msgstr "구성:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "%(nodelist)s에서 %(db)s에 연결 중 " - -msgid "Coordination backend started successfully." -msgstr "조정 백엔드가 성공적으로 시작되었습니다." - -#, python-format -msgid "Definitions: %s" -msgstr "정의: %s" - -msgid "Detected change in pipeline configuration." -msgstr "파이프라인 구성의 변경을 발견했습니다." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "TTL이 %d인 이벤트 데이터 삭제" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "TTL이 %d인 측정 데이터 삭제" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "중복 이벤트가 발견되어 해당 이벤트를 건너뜀: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "잔여 자원 및 측정 정의 데이터 만료됨" - -#, python-format -msgid "Index %s will be recreate." -msgstr "%s 인덱스가 다시 생성됩니다." - -#, python-format -msgid "Joined partitioning group %s" -msgstr "결합된 파티션 그룹 %s" - -#, python-format -msgid "Left partitioning group %s" -msgstr "남은 파티션 그룹 %s" - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "한계 값이 제공되지 않음, 결과 세트가 %(limit)d(으)로 제한됩니다." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "정리할 사항이 없음, 데이터베이스 이벤트 지속 시간(TTL)이 사용되지 않음" - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "정리할 사항이 없음, 데이터베이스 측정 지속 시간(TTL)이 사용되지 않음" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"파이프라인 %(pipeline)s: %(param)s 매개변수로 변환기 인스턴스 %(name)s 설정 " - -#, python-format -msgid "Pipeline config: %s" -msgstr "파이프라인 구성: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "파이프라인 구성 파일이 업데이트되었습니다." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "%(src)s 컨텍스트의 의견조사자 %(poll)s 폴링" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "공개 정책이 %s(으)로 설정됨" - -msgid "Reconfiguring polling tasks." -msgstr "폴링 작업을 재구성합니다." - -msgid "Reloading notification agent and listeners." -msgstr "알림 에이전트와 리스너를 다시 로드합니다." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "Pollster %(name)s 건너뛰기, %(p_context)s 자원에서 이 주기를 발견함" - -#, python-format -msgid "Starting server in PID %s" -msgstr "PID %s의 서버 시작" - -msgid "detected decoupled pipeline config format" -msgstr "비결합 파이프라인 구성 형식 발견" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "%(resource_id)s의 측정 데이터 %(counter_name)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "0.0.0.0:%(sport)s에서 전달 중, http://127.0.0.1:%(vport)s에서 보기" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "http://%(host)s:%(port)s에서 전달 중" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po deleted file mode 100644 index 26ff24ac..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po +++ /dev/null @@ -1,155 +0,0 @@ -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:34+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "Can't connect to keystone, assuming aodh is disabled and retry later." -msgstr "" -"Keystone에 연결할 수 없습니다 . Aodh가 사용되지 않는다고 가정하여 나중에 다" -"시 시도합니다." - -msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" -msgstr "" -"Keystone에 연결할 수 없습니다. Gnocchi가 사용되지 않는다고 가정하여 나중에 다" -"시 시도합니다." - -msgid "" -"Cannot extract tasks because agent failed to join group properly. Rejoining " -"group." -msgstr "" -"에이전트가 적절하게 그룹을 결합하지 못했으므로 작업을 추출할 수 없습니다. 그" -"룹을 다시 결합합니다." - -#, python-format -msgid "" -"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " -"%(exc)s" -msgstr "" -"%(instance_id)s의 %(pollster)s 데이터를 검사할 수 없습니다. 치명적이지 않은 " -"이유: %(exc)s" - -#, python-format -msgid "Dropping out of time order sample: %s" -msgstr "시간 순서 샘플에서 삭제: %s" - -#, python-format -msgid "Dropping sample with no predecessor: %s" -msgstr "선행 작업이 없는 샘플 삭제: %s" - -#, python-format -msgid "Duplicated values: %s found in CLI options, auto de-duplicated" -msgstr "중복된 값: CLI 옵션에 %s이(가) 있습니다. 자동으로 중복이 해제됩니다." - -#, python-format -msgid "Failed to load any dispatchers for %s" -msgstr "%s의 디스패처를 로드하는 데 실패" - -#, python-format -msgid "" -"Failed to parse date from set fields, both fields %(start)s and %(end)s must " -"be datetime: %(err)s" -msgstr "" -"설정 필드에서 데이터를 구문 분석하는 데 실패, 두 필드 %(start)s 와 %(end)s은" -"(는) 모두 datetime임: %(err)s" - -#, python-format -msgid "Ignore unrecognized field %s" -msgstr "인식되지 않는 필드 %s 무시" - -#, python-format -msgid "Invalid status, skipping IP address %s" -msgstr "올바르지 않은 상태, IP 주소 %s 건너뛰기" - -msgid "Negative delta detected, dropping value" -msgstr "음수의 델타가 발견되어 값을 삭제함" - -#, python-format -msgid "No endpoints found for service %s" -msgstr "%s 서비스의 엔드포인트를 찾을 수 없음" - -msgid "" -"Non-metric meters may be collected. It is highly advisable to disable these " -"meters using ceilometer.conf or the pipeline.yaml" -msgstr "" -"비측정 미터를 수집할 수 없습니다. celometer.conf 또는 pipeline.yaml을 사용하" -"여 이러한 미터를 사용하지 않게 설정하는 것이 좋습니다." - -#, python-format -msgid "" -"Skipping %(name)s, %(service_type)s service is not registered in keystone" -msgstr " %(name)s, %(service_type)s 서비스 건너뛰기는 keystone에 등록되지 않음" - -#, python-format -msgid "Skipping duplicate meter definition %s" -msgstr "중복 측정 정의 %s 건너뛰기" - -msgid "" -"Timedelta plugin is required two timestamp fields to create timedelta value." -msgstr "" -"Timedelta 플러그인에서 timedelta 값을 생성하려면 두 개의 시간소인 필드가 필요" -"합니다." - -msgid "" -"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " -"aodh endpoint." -msgstr "" -"Aodh가 사용된 상태로 ceilometer-api가 시작되었습니다. 알람 URL이 aodh 엔드포" -"인트로 경로가 재지정됩니다." - -msgid "" -"ceilometer-api started with gnocchi enabled. The resources/meters/samples " -"URLs are disabled." -msgstr "" -"Gnocchi를 사용한 상태로 ceilometer-api가 시작되었습니다. 자원/측정/샘플 URL" -"을 사용하지 않습니다." - -#, python-format -msgid "event signature invalid, discarding event: %s" -msgstr "이벤트 서명이 올바르지 않아 이벤트를 삭제함: %s" - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " -"volume (volume: None), the sample will be dropped" -msgstr "" -"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 볼륨" -"(volume: None)이 없으므로 샘플이 삭제됩니다." - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " -"volume which is not a number (volume: %(counter_volume)s), the sample will " -"be dropped" -msgstr "" -"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 번호" -"(volume: %(counter_volume)s)가 아닌 볼륨이 있으므로, 샘플이 삭제됩니다." - -msgid "" -"pecan_debug cannot be enabled, if workers is > 1, the value is overrided " -"with False" -msgstr "" -"pecan_debug를 사용하도록 설정할 수 없습니다. 작업자가 > 1이면 값이 False로 겹" -"쳐씁니다." - -#, python-format -msgid "" -"split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " -"%(max_split)d)`\" to your jsonpath instead" -msgstr "" -"분할 플러그인은 더 이상 사용되지 않음, 대신 \".`split(%(sep)s, %(segment)d, " -"%(max_split)d)`\"을(를) jsonpath에 추가" - -#, python-format -msgid "unable to configure oslo_cache: %s" -msgstr "oslo_cache를 구성할 수 없음: %s" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po deleted file mode 100644 index fd8c29b2..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,484 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Seong-ho Cho , 2014 -# Seunghyo Chun , 2013 -# Seunghyo Chun , 2013 -# Sungjin Kang , 2013 -# Sungjin Kang , 2013 -# Andreas Jaeger , 2016. #zanata -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:44+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s을(를) 찾을 수 없음" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "'%s' 표현식에서 산술 변환기는 하나 이상의 미터를 사용해야 함" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "%(table_name)s 테이블을 작성할 수 없음, 이미 존재합니다. 오류 무시" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "%(name)s에서 오류 후 계속: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "슬레이브 호스트를 연결할 수 없음: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "XenAPI를 연결할 수 없음: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "%(id)s에 대해 CPU Util을 가져올 수 없음: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s에 대한 메모리 사용을 가져올 수 없음: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "VM %s CPU 이용률을 가져올 수 없음" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "%s 인스턴스의 IP 주소를 얻을 수 없음" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "알림 %(type)s 삭제 중(uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"인스턴스 검색 중 libvirt에서 오류 발생: [오류 코" -"드 %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "HTTP 응답 구문 분석 중 오류 발생: %s" - -msgid "Error stopping pollster." -msgstr "의견조사자를 중지하는 중에 오류가 발생했습니다. " - -msgid "Event" -msgstr "이벤트" - -msgid "Expression evaluated to a NaN value!" -msgstr "표현식이 NaN 값으로 평가되었습니다!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "%(name)s 확장자를 가져오는 데 실패함: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"인스턴스 <이름=%(name)s, id=%(id)s>의 데이터 검사 실패, 도메인 상태가 SHUTOFF" -"입니다." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"%(instance_uuid)s의 메모리 사용량 검사 실패, libvirt에서 정보를 가져올 수 없" -"음: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"인스턴스 <이름=%(name)s, id=%(id)s>의 메모리 사용량 검사 실패, libvirt에서 정" -"보를 가져올 수 없습니다." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "%s의 알림 핸들러 로드 실패" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "시간소인 값 %s 구문 분석 실패" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d 데이터포인트 공개 실패. 이를 삭제하는 중" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "%d 데이터포인트 공개 실패. 이를 큐에 대기시킴" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "필터 표현식이 올바르지 않음: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "인스턴스 %(name)s (%(instance_id)s) 무시 중: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "인스턴스 %(name)s 무시 중: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "로드 밸런서 %(loadbalancer_id)s 무시" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "풀 %(pool_id)s 무시" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"다음에서 정의 파일 %(file)s의 올바르지 않은 YAML 구문: 행: %(line)s, 열: " -"%(column)s" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "올바르지 않은 기간 %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "특성 %(trait)s에 대한 올바르지 않은 특성 유형 '%(type)s'" - -msgid "Limit must be positive" -msgstr "제한 값은 양수여야 합니다." - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "ID가 %s인 둘 이상의 이벤트가 스토리지 드라이버에서 리턴됨" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "여러 VM %s을(를) XenServer에서 찾음 " - -msgid "Must specify connection_url, and connection_password to use" -msgstr "사용할 connection_url 및 connection_password를 지정해야 함 " - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "%(name)s에 대해 %(plugin)s(이)라는 플러그인을 사용할 수 없음" - -msgid "Node Manager init failed" -msgstr "노드 관리자 초기화 실패" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s에 대한 액세스 권한이 부여되지 않음" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylitght API가 %(status)s 리턴: %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API가 %(status)s 리턴: %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"연산자 %(operator)s이(가) 지원되지 않습니다. 필드 %(field)s에는 등호 연산자" -"만 사용할 수 있습니다." - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"연산자 %(operator)s이(가) 지원되지 않습니다. 지원되는 연산자는 %(supported)s" -"입니다. " - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Order-by 표현식이 올바르지 않음: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -" %(name)s에 대한 JSONPath 스펙 '%(jsonpath)s'의 구문 분석 오류: %(err)s" - -msgid "Period must be positive." -msgstr "기간은 양수여야 합니다. " - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 %(status)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 계속" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "파이프라인 %(pipeline)s: 변환기 %(trans)s을(를) 비우는 중 오류 발생" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "파이프라인 %(pipeline)s: %(smp)s의 변환기 %(trans)s에서 오류 후 종료" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "플러그인이 지정되지 않았지만, %s에 플러그인 이름이 제공되지 않음" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "폴링 %(mtr)s 센서가 %(cnt)s번 실패했습니다!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "폴링 %(name)s이(가) %(cnt)s번 실패했습니다!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s의 의견조사자가 사용 안함으로 설정되어 있습니다!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"의견조사자 %(name)s이(가) 소스 %(source)s를 더 이상 폴링하지 않도록 하십시오!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "공개자 최대 local_queue 길이가 초과됨. %d 가장 오래된 샘플 삭제 중" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "공개 정책을 알 수 없음(%s). 기본값으로 강제 설정함" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API가 %(status)s %(reason)s을(를) 리턴함" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "요청이 NorthBound REST API로 OpenDaylight에 연결하는 데 실패함" - -#, python-format -msgid "Required field %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -msgid "Resource" -msgstr "리소스" - -msgid "Sample" -msgstr "샘플" - -msgid "Samples should be included in request body" -msgstr "샘플이 요청 본문에 포함되어야 함" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "%s 확장자 로드 건너뛰기" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "문자열 %s이(가) 올바른 등시간이 아님" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "샘플과 gnocchi resources/ 메트릭 간 맵핑을 정의하는 Yaml 파일" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"데이터 유형 %(type)s이(가) 지원되지 않습니다. 지원되는 데이터 유형 목록은 " -"%(supported)s입니다." - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s에 'fields' 필드 필요" - -msgid "The path for the file publisher is required" -msgstr "파일 공개자의 경로가 필요함" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr " UDP: %s이(가) 보낸 데이터를 해독할 수 없습니다" - -msgid "UDP: Unable to store meter" -msgstr "UDP: 측정을 저장할 수 없습니다" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "데이터베이스 서버에 연결할 수 없음: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "%(value)s 값을 예상 데이터 유형 %(type)s(으)로 변환할 수 없습니다." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "자원을 검색할 수 없음: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "%(expr)s 표현식을 평가할 수 없음: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "공개자 %s을(를) 로드할 수 없음" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "하이퍼바이저 검사기를 로드할 수 없음: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"%(retries)d회 재시도한 이후에는 1차 mongodb에 다시 연결할 수 없습니다. 포기하" -"는 중입니다." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"1차 mongodb에 다시 연결할 수 없음: %(errmsg)s. %(retry_interval)d초 후에 다" -"시 시도합니다." - -msgid "Unable to send sample over UDP" -msgstr "UDP를 통해 샘플을 전송할 수 없음" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s을(를) 예상된 데이터 유형으로 변환하는 중에 예상치 않은 예외 발생 " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "알 수 없는 검색 확장자: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "알 수 없는 메타데이터 유형입니다. 키(%s)를 조회할 수 없습니다." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"로드 밸런서 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"fw %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "리스너 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"멤버 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"풀 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"vip %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "vpn%(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s을(를) VMware vSphere에서 찾을 수 없음" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s을(를) XenServer에서 찾을 수 없음 " - -msgid "Wrong sensor type" -msgstr "잘못된 센서 유형" - -msgid "XenAPI not installed" -msgstr "XenAPI가 설치되지 않음" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "정의 파일 %(file)s을(를) 읽는 중에 YAML 오류 발생" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Aodh를 사용하지 않게 설정하거나 사용할 수 없는 경우 경보 URL을 사용할 수 없습" -"니다." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "%(id)s의 CPU 시간을 가져올 수 없음: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "Gnocchi를 사용할 때 직접 옵션은 true일 수 없습니다." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "시간 순서 샘플에서 벗어남: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "선행 작업이 없는 샘플 삭제: %s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 출력 길이 불일치" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes 및 backup_count는 숫자여야 합니다." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"IPMI 센서 데이터 구문 분석에 실패했음, 제공된 입력에서 검색된 데이터가 없음" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "IPMI 센서 데이터 구문 분석에 실패했음, 알 수 없는 센서 유형" - -msgid "running ipmitool failure" -msgstr "ipmitool 실행 실패" diff --git a/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po b/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 5afe8033..00000000 --- a/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,492 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Gabriel Wainer, 2013 -# Gabriel Wainer, 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-22 11:05+0000\n" -"Last-Translator: Carlos Marques \n" -"Language: pt-BR\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Portuguese (Brazil)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s Não Encontrada" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"O transformador aritmético deve usar pelo menos um medidor na expressão '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Não é possível criar a tabela %(table_name)s; ela já existe. Ignorando erro" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continuar após erro de %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Não foi possível conectar-se ao host escravo: %s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Não foi possível conectar-se ao XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Não foi possível obter Uso de CPU para %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Não foi possível obter de Uso de Memória para %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Não foi possível obter a utilização de CPU da máquina virtual %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Não foi possível obter o endereço IP da instância %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Erro de libvirt ao consultar instância : [Código " -"de Erro %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Erro ao analisar a resposta de HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Erro ao parar pesquisador. " - -msgid "Event" -msgstr "Evento" - -msgid "Expression evaluated to a NaN value!" -msgstr "Expressão avaliada para um valor NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Falha ao importar extensão para %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Falha ao inspecionar os dados da instância , " -"estado do domínio é SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Falha ao inspecionar o uso da memória de %(instance_uuid)s, não é possível " -"obter informações a partir de libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Falha ao inspecionar o uso da memória da instância , não é possível obter informações a partir de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Falha ao carregar qualquer manipulador de notificações para %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Falha ao analisar o valor do registro de data e hora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Falha ao publicar %d pontos de dados, descartando-os" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Falha ao publicar %d pontos de dados, enfileire-os" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expressão de filtro inválida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Ignorando a instância %(name)s (%(instance_id)s): %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Ignorando a instância %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Ignorando loadbalancer %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Ignorando conjunto%(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintaxe YAML inválida no arquivo de definições %(file)s na linha: %(line)s, " -"coluna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Período inválido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" - -msgid "Limit must be positive" -msgstr "Limite deve ser positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Mais de um evento com o ID %s retornado a partir do driver de armazenamento" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Várias máquinas virtuais %s localizadas no XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "connection_url e connection_password devem ser especificados para uso" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Nenhum plug-in nomeado %(plugin)s disponível para %(name)s" - -msgid "Node Manager init failed" -msgstr "Inicialização do gerenciador de nó com falha" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Não Autorizado a acessar %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "API OpenDaylitght retornou %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "A API Opencontrail retornou%(status)s%(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"O operador %(operator)s não é suportado. Somente operador de igualdade está " -"disponível para o campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"O operador %(operator)s não é suportado. Os operadores suportados são: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expressão solicitada inválida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " -"%(err)s" - -msgid "Period must be positive." -msgstr "Período deve ser positivo." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s após erro do publicador %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Continue após erro do publicador %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Pipeline %(pipeline)s: Erro ao limpar transformador %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Saia após erro do transformador %(trans)s para %(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "O sensor de pesquisa %(mtr)s falhou para %(cnt)s vezes!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "O pesquisador para %s está desativado!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Evite o pesquisador %(name)s para a origem de pesquisa %(source)s atualmente!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Comprimento máximo de local_queue do publicador foi excedido, descartando %d " -"amostras antigas" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "Publicando política desconhecida (%s) força para o padrão" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "O pedido falhou ao conectar-se ao OpenDaylight com API REST NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo obrigatório %s não especificado" - -msgid "Resource" -msgstr "Recurso" - -msgid "Sample" -msgstr "Amostra" - -msgid "Samples should be included in request body" -msgstr "As amostras devem ser incluídas no corpo da solicitação" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Ignorar a extensão de carregamento para %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Sequência %s não é um isotime válido" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"O arquivo Yaml que define o mapeamento entre amostras e recursos gnocchi/" -"métrica" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"O tipo de dados %(type)s não é suportado. A lista de tipos de dados " -"suportados é: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "O campo 'fields' é necessário para %s" - -msgid "The path for the file publisher is required" -msgstr "O caminho para o publicador do arquivo é necessário" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: Não pode decodificar dados enviados por %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Não é possível armazenar medida" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Não é possível conectar-se ao servidor de banco de dados: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Não é possível converter o valor %(value)s para o tipo de dados esperado " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Não é possível descobrir recursos: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Não é possível avaliar expressão %(expr)s:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossível carregar publicador %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossível carregar o inspetor do hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Não é possível se reconectar ao mongodb primário após %(retries)d novas " -"tentativas. Desistindo." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Não é possível se reconectar ao mongodb primário: %(errmsg)s. Tentando " -"novamente em %(retry_interval)d segundos." - -msgid "Unable to send sample over UDP" -msgstr "Não é possível enviar amostra sobre UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exceção inesperada convertendo %(value)s para o tipo de dado esperado " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Extensão de descoberta desconhecida: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Tipo de metada desconhecido. Chave (%s) não será consultável." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no Balanceador de Carga %(id)s, " -"ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido na largura da fonte %(id)s, ignorando " -"a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no listener %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no membro %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no conjunto %(id)s, ignorando amostras" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido em vip %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " -"amostra" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s não localizado no VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "Máquina virtual %s não localizada no XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo de sensor errado" - -msgid "XenAPI not installed" -msgstr "XenAPI não instalado" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Erro YAML ao ler o arquivo de definições %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "não pôde obter o tempo de CPU para %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "A opção direta não pode ser true quando o Gnocchi está ativado. " - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "eliminando amostra fora de ordem de tempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "descartando amostra sem predecessor: %s" - -msgid "ipmitool output length mismatch" -msgstr "incompatibilidade no comprimento da saída de ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes e backup_count devem ser números." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"análise dos dados do sensor IPMI com falha, nenhum dado recuperado da " -"entrada fornecida" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "análise dos dados do sensor IPMI com falha,tipo de sensor desconhecido" - -msgid "running ipmitool failure" -msgstr "executando falha de ipmitool" diff --git a/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 98c2a275..00000000 --- a/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,495 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-10 08:01+0000\n" -"Last-Translator: Grigory Mokhin \n" -"Language: ru\n" -"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" -"%100>=11 && n%100<=14)? 2 : 3);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Russian\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s не найден" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Арифметический преобразователь должен использовать хотя бы один счетчик в " -"выражении %s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Не удалось создать таблицу %(table_name)s: уже существует. Игнорирование " -"ошибки" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Продолжить после ошибки с %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Не удалось подключиться к подчиненному хосту: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Не удалось подключиться к XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "" -"Не удалось получить информацию об использовании процессора для %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "" -"Не удалось получить информацию об использовании памяти для %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "" -"Не удалось получить информацию об использовании CPU для виртуальной машины %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Не удалось получить IP-адрес экземпляра %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Удаление уведомления %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Возникла ошибка в libvirt при поиске экземпляра <имя=%(name)s, ИД=%(id)s>: " -"[Код ошибки: %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Ошибка анализа ответа HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Ошибка остановки опрашивающего объекта." - -msgid "Event" -msgstr "Событие" - -msgid "Expression evaluated to a NaN value!" -msgstr "Результат вычисления выражения - значение NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Не удалось импортировать расширение для %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Не удалось проверить данные экземпляра <имя=%(name)s, ИД=%(id)s>, состояние " -"домена - SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Не удалось проверить использование памяти экземпляром %(instance_uuid)s, не " -"удалось получить информацию от libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Не удалось проверить использование памяти экземпляром <имя=%(name)s, ИД=" -"%(id)s>, не удалось получить информацию от libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Не удалось загрузить обработчики уведомлений для %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Ошибка анализа значения времени %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Не удалось опубликовать %d точек данных, выполняется их удаление" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Не удалось опубликовать %d точек данных, создайте для них очередь" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Недопустимое выражение фильтра: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Игнорирование экземпляра %(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Игнорирование экземпляра %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Балансировщик нагрузки %(loadbalancer_id)s игнорируется" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Пул %(pool_id)s игнорируется" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Недопустимый синтаксис YAML в файле определений %(file)s; строка: %(line)s, " -"столбец: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Недопустимый интервал %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Недопустимый тип особенности %(type)s для особенности %(trait)s" - -msgid "Limit must be positive" -msgstr "Ограничение должно быть положительным" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Из драйвера хранилища возвращено несколько событий с ИД %s" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Найдено несколько виртуальных машин %s в XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "Необходимо указать connection_url и connection_password" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Нет доступного модуля %(plugin)s для %(name)s" - -msgid "Node Manager init failed" -msgstr "Сбой инициализации администратора узлов" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Нет прав доступа к %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "Функция API OpenDaylight вернула %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "API Opencontrail возвратил %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Оператор %(operator)s не поддерживается. Для поля %(field)s возможен только " -"оператор равенства" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Оператор %(operator)s не поддерживается. Поддерживаемые операторы: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Недопустимое выражение сортировки: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Ошибка анализа спецификации JSONPath %(jsonpath)s для %(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "Период должен быть положительным." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Конвейер %(pipeline)s: %(status)s после ошибки от публикатора %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Конвейер %(pipeline)s: Продолжение после ошибки из публикатора %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Конвейер %(pipeline)s: Ошибка выгрузки преобразователя %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Конвейер %(pipeline)s: Выход после ошибки из преобразователя %(trans)s для " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Указан модуль, но не передано имя модуля для %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "%(cnt)s-кратный сбой датчика опроса %(mtr)s!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Опрос %(name)s не удалось выполнить %(cnt)s раз." - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Опрашивающий объект для %s выключен!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Сделайте так, чтобы опрашивающий объект %(name)s больше не опрашивал " -"источник %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Превышена максимальная длина local_queue публикатора, удаление %d самых " -"старых образцов" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "Стратегия публикации неизвестна (%s). По умолчанию принудительная" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "Функция API RGW AdminOps вернула %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "Сбой запроса на подключение к OpenDaylight с API REST NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Не указано обязательное поле %s" - -msgid "Resource" -msgstr "Ресурс" - -msgid "Sample" -msgstr "Образец" - -msgid "Samples should be included in request body" -msgstr "Образцы должны включаться в тело запроса" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Пропустить загрузку расширения для %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Строка %s не является допустимым значением isotime" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Файл Yaml, определяющий связи между образцами и ресурсами gnocchi " -"(показателями)" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Тип данных %(type)s не поддерживается. Список поддерживаемых типов данных: " -"%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Поле 'fields' является обязательным для %s" - -msgid "The path for the file publisher is required" -msgstr "Требуется путь для публикатора файлов" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: не удается декодировать данные, отправленные %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: не удалось сохранить счетчик" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Не удалось подключиться к серверу базы данных: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Преобразовать значение %(value)s в ожидаемый тип данных %(type)s невозможно." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Не удалось найти ресурсы: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Вычислить выражение %(expr)s невозможно: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Не удалось загрузить публикатор %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Не удалось загрузить инспектор гипервизора: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Не удалось повторно подключиться к основной базе данных mongodb после " -"%(retries)d попыток. Дальнейшие попытки прекращены." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Не удалось повторно подключиться к основной mongodb: %(errmsg)s. Повторное " -"подключение через %(retry_interval)d секунд." - -msgid "Unable to send sample over UDP" -msgstr "Не удалось отправить образец по UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Возникла непредвиденная исключительная ситуация при преобразовании %(value)s " -"в ожидаемый тип данных %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Неизвестное расширение поиска: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Неизвестный тип метаданных. Ключ (%s) нельзя будет запрашивать." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"В балансировщике нагрузки %(id)s получено неизвестное состояние %(stat)s, " -"пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"В fw %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"В обработчике %(id)s получено неизвестное состояние %(stat)s, пример " -"пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"В участнике %(id)s получено неизвестное состояние %(stat)s, пример " -"пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"В пуле %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"В vip %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"В VPN %(id)s получено неизвестное состояние %(stat)s, пример пропускается" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "Виртуальная машина %s не найдена в VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "Не найдена виртуальная машина %s в XenServer" - -msgid "Wrong sensor type" -msgstr "Неверный тип датчика" - -msgid "XenAPI not installed" -msgstr "XenAPI не установлен" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Ошибка YAML при чтении файла определений %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "не удалось получить процессорное время для %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "Параметр direct не может быть равен true, если включен Gnocchi." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "удаление образца, выпадающего из хронологического порядка: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "удаление образца без предшественника: %s" - -msgid "ipmitool output length mismatch" -msgstr "несоответствие длины вывода ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes и backup_count должны быть числами." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"сбой анализа данных датчика IPMI, не получены данные из переданного ввода" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "сбой анализа данных датчика IPMI, неизвестный тип датчика" - -msgid "running ipmitool failure" -msgstr "сбой выполнения ipmitool" diff --git a/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po b/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 93311b3e..00000000 --- a/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,465 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# aji.zqfan , 2015 -# yelu , 2013 -# Tom Fifield , 2013 -# 颜海峰 , 2014 -# yelu , 2013 -# Yu Zhang, 2013 -# Yu Zhang, 2013 -# 颜海峰 , 2014 -# English translations for ceilometer. -# Andreas Jaeger , 2016. #zanata -# Linda , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-31 03:32+0000\n" -"Last-Translator: Linda \n" -"Language: zh-CN\n" -"Language-Team: Chinese (China)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"Generated-By: Babel 2.2.0\n" -"X-Generator: Zanata 3.7.3\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "无法找到%(entity)s %(id)s " - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "算术变形器在表达式'%s'中必须至少使用一个指标" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "表%(table_name)s已经存在,无法创建。忽略此错误继续执行。" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "遇到错误%(name)s:%(error)s,继续执行" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "无法连接伺服主机:%s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "无法连接到XenAPI:%s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "无法为虚拟机%(id)s获取CPU使用率:%(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "无法为%(id)s获取内存使用信息:%(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "无法获取虚拟机%s的CPU使用率" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "无法为实例%s获取IP地址" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "正在丢弃通知%(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"查找实例 <名称为 %(name)s,标识为 %(id)s> 时,libvirt 中出错:[错误代码 " -"%(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "解析HTTP响应失败: %s" - -msgid "Error stopping pollster." -msgstr "停止轮询程序时出错。" - -msgid "Event" -msgstr "事件" - -msgid "Expression evaluated to a NaN value!" -msgstr "表达式计算结果为NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "无法为%(name)s引入扩展:%(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"为虚拟机获取监控数据失败了,虚拟机状态为SHUTOFF" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"为虚拟机%(instance_uuid)s采集内存使用指标失败了,无法从libvirt获取信息:" -"%(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"为虚拟机采集内存使用指标失败了,无法从libvirt获取信" -"息。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "无法为%s加载任何通知处理器" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "解析时间戳%s失败" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "发布%d个数据点时失败,正在将其丢弃" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "发布%d个数据点时失败,将其入队" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "过滤表达式不合法:%s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "忽略虚拟机%(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "忽略虚拟机%(name)s:%(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "正在忽略负载均衡器 %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "正在忽略池 %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "定义文件%(file)s中有非法YAML语法,行:%(line)s,列%(column)s。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "非法的间隔%(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特征%(trait)s包含了不合法的特征类型'%(type)s' " - -msgid "Limit must be positive" -msgstr "limit必须是正数" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "从数据库返回了多个id为%s的事件" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "多个虚拟机%s在XenServer中被找到" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "使用时必须指定connection_url和connection_password" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "未对 %(name)s 提供名为 %(plugin)s 的插件" - -msgid "Node Manager init failed" -msgstr "节点管理器初始化失败" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "权限不足以访问%(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight接口返回状态%(status)s,原因%(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail接口返回状态%(status)s,原因%(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "运算符 %(operator)s 不受支持。对于字段 %(field)s,只能使用等号运算符" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "运算符 %(operator)s 不受支持。受支持的运算符为:%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "orderby表达式不合法:%s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "对 %(name)s 指定的 JSONPath(即“%(jsonpath)s”)存在解析错误:%(err)s" - -msgid "Period must be positive." -msgstr "period 参数必须是正数" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"管道 %(pipeline)s:在发布程序 %(pub)s 中发生错误之后,处于 %(status)s 状态" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "流水线%(pipeline)s:发布器%(pub)s报错,继续执行" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "流水线%(pipeline)s:变形器%(trans)s清空数据时出错" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "流水线%(pipeline)s:数据%(smp)s的变形器%(trans)s遇到错误,退出" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "指定了插件,但未对 %s 提供插件名" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "拉取%(mtr)s传感器失败了%(cnt)s次!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "轮询 %(name)s 已失败 %(cnt)s 次!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s的采集器被禁用" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "请阻止轮询程序 %(name)s 再轮询源 %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "发布的数据量超过本地队列最大长度,正在丢弃最老的%d个数据" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "未知的发布策略(%s),强制使用默认策略" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps接口返回%(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "请求无法连接到OpenDaylight的北向REST接口" - -#, python-format -msgid "Required field %s not specified" -msgstr "必填项%s没有填写" - -msgid "Resource" -msgstr "资源" - -msgid "Sample" -msgstr "数据" - -msgid "Samples should be included in request body" -msgstr "样本应包括在请求主体中" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "跳过为%s加载扩展" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "字符串%s不是个合法的标准时间格式" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "用于定义样本与 gnocchi 资源/度量值之间的映射的Yaml 文件" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "数据类型%(type)s不被支持。支持的数据类型列表:%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s 需要字段“fields”" - -msgid "The path for the file publisher is required" -msgstr "文件发布器必须设置文件路径" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: 无法解码由 %s 发送的数据" - -msgid "UDP: Unable to store meter" -msgstr "UDP: 无法存储计量器" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "无法连接到数据库服务器:%(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "无法转换%(value)s到预期的数据类型%(type)s。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "无法发现资源:%s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "无法计算表达式%(expr)s:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "无法加载发布器%s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "无法加载管理程序的探测器:%s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "在%(retries)d次尝试后仍无法重连到MongoDB主节点。放弃重连。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"无法重连到MongoDB主节点:%(errmsg)s。在%(retry_interval)d秒后进行重试。" - -msgid "Unable to send sample over UDP" -msgstr "无法通过UDP发送采样" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "在转换%(value)s到预期的数据类型%(type)s时发生了未预料的异常。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "未知的发现器插件:%s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "未知的元数据类型。键(%s)将无法进行查询。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "在负载均衡器 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "从fw %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "在侦听器 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "在成员 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "从pool %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "从vip %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "在 VPN %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "在 VMware vSphere 中,找不到 VM %s" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "无法在XenServer中找到虚拟机%s" - -msgid "Wrong sensor type" -msgstr "错误的传感器类型" - -msgid "XenAPI not installed" -msgstr "XenAPI没有安装" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "读取定义文件%(file)s时遇到YAML错误" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "Aodh 被禁用或不可用时,警报 URL 不可用。" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "无法为虚拟机%(id)s获取CPU时间:%(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "启用 Gnocchi 后,direct 选项不能为 true。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "正在退出时间顺序样本:%s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "因为之前没有数据(用来计算差值)因而丢弃数据:%s" - -msgid "ipmitool output length mismatch" -msgstr "ipmi输出长度不匹配" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes和backup_count必须是整数。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "解析IPMI传感器数据失败,从给定的输入中无法检索到数据" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "解析IPMI传感器数据失败,未知的传感器类型" - -msgid "running ipmitool failure" -msgstr "运行ipmitool时失败了" diff --git a/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po b/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po deleted file mode 100644 index edfa9b08..00000000 --- a/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,455 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Stefano Maffulli , 2013 -# Andreas Jaeger , 2016. #zanata -# Jennifer , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-15 06:03+0000\n" -"Last-Translator: Jennifer \n" -"Language: zh-TW\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Chinese (Taiwan)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "找不到 %(entity)s %(id)s" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "在表示式 '%s' 中,算術轉換器必須至少使用一種計量" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "無法建立表格 %(table_name)s,該表格已經存在。將忽略錯誤" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "在 %(name)s 傳回錯誤 %(error)s 後繼續" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "無法連接附屬主機:%s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "無法連接 XenAPI:%s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的 CPU 使用率:%(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的記憶體用量:%(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "無法取得 VM %s CPU 使用率" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "無法取得實例 %s 的 IP 位址" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "正在捨棄通知 %(type)s(UUID:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"查閱實例 <名稱=%(name)s,ID=%(id)s> 時,libvirt 中發生錯誤:[錯誤碼 " -"%(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "剖析 HTTP 回應時發生錯誤:%s" - -msgid "Error stopping pollster." -msgstr "停止 pollster 時發生錯誤。" - -msgid "Event" -msgstr "事件" - -msgid "Expression evaluated to a NaN value!" -msgstr "表示式已求值為非數字值!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "無法匯入 %(name)s 的延伸:%(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "無法檢查實例 <名稱=%(name)s,ID=%(id)s> 的資料,網域狀態為 SHUTOFF。" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"無法檢查 %(instance_uuid)s 的記憶體用量,無法從 libVirt 取得資訊:%(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"無法檢查實例 <名稱=%(name)s,ID=%(id)s> 的記憶體用量,無法從 libVirt 取得資" -"訊。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "無法載入 %s 的任何通知處理程式" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "無法剖析時間戳記值 %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "無法發佈 %d 個資料點,正在捨棄它們" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "無法發佈 %d 個資料點,正在將它們排入佇列" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "過濾表示式無效:%s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "正在忽略實例 %(name)s (%(instance_id)s):%(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "正在忽略實例 %(name)s:%(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "正在忽略負載平衡器 %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "正在忽略儲存區 %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s 列中的 YAML 語法無效。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "期間 %(period)s 無效:%(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特徵 %(trait)s 的特徵類型 '%(type)s' 無效" - -msgid "Limit must be positive" -msgstr "限制值必須是正數" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "從儲存體驅動程式傳回了多個 ID 為 %s 的事件" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "在 XenServer 中找到多個 VM %s" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "必須指定 connection_url 和 connection_password,才能使用" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "沒有名為 %(plugin)s 的外掛程式可供 %(name)s 使用" - -msgid "Node Manager init failed" -msgstr "節點管理程式起始設定失敗" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "未獲授權來存取 %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight API 傳回了 %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API 傳回了 %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "運算子 %(operator)s 不受支援。只有等式運算子才可供欄位 %(field)s 使用" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "運算子 %(operator)s 不受支援。受支援的運算子為:%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "排序方式表示式無效:%s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "%(name)s 的 JSONPath 規格 '%(jsonpath)s' 中發生剖析錯誤:%(err)s" - -msgid "Period must be positive." -msgstr "期間必須是正數。" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後處於%(status)s狀態" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後繼續" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "管線 %(pipeline)s:清除轉換器 %(trans)s 時發生錯誤" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "管線 %(pipeline)s:%(smp)s 的轉換器 %(trans)s傳回錯誤後結束" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "已指定外掛程式,但卻未向 %s 提供外掛程式名稱" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "輪詢 %(name)s 失敗了 %(cnt)s 次!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "已停用 %s 的 Pollster!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "阻止 pollster %(name)s 再次輪詢資源 %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "已超出發佈者 local_queue 長度上限,正在捨棄 %d 個最舊的樣本" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "發佈原則不明 (%s),強制設為預設值" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "要求無法使用 NorthBound REST API 來連接至 OpenDaylight" - -#, python-format -msgid "Required field %s not specified" -msgstr "未指定必要欄位 %s" - -msgid "Resource" -msgstr "資源" - -msgid "Sample" -msgstr "樣本" - -msgid "Samples should be included in request body" -msgstr "要求內文中應該包括範例" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "跳過載入 %s 的延伸" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "字串 %s 不是有效的 ISO 時間" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "用來在範例與 gnocchi 資源/度量之間定義對映的Yaml 檔案" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "不支援資料類型 %(type)s。支援的資料類型清單為:%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s 需要欄位「欄位」" - -msgid "The path for the file publisher is required" -msgstr "需要檔案發佈者的路徑" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP:無法解碼由 %s 傳送的資料" - -msgid "UDP: Unable to store meter" -msgstr "UDP:無法儲存計量" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "無法連接至資料庫伺服器:%(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "無法將值 %(value)s 轉換成預期的資料類型 %(type)s。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "無法探索資源:%s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "無法對表示式 %(expr)s 進行求值:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "無法載入發佈者 %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "無法載入 Hypervisor 檢查程式:%s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "在 %(retries)d 次重試之後仍無法重新連接至主要 MongoDB。正在放棄。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"無法重新連接至主要 MongoDB:%(errmsg)s。請在%(retry_interval)d 秒之後再次嘗" -"試。" - -msgid "Unable to send sample over UDP" -msgstr "無法透過 UDP 來傳送樣本" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "將 %(value)s 轉換為預期的資料類型%(type)s 時發生非預期的異常狀況。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "不明的探索延伸:%s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "不明的 meta 資料類型。索引鍵 (%s) 將不可查詢。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "在負載平衡器 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "在防火牆 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "在接聽器 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "在成員 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "在儲存區 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "在 VIP %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "在 VPN %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "在 VMware vSphere 中找不到 VM %s" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "在 XenServer 中找不到 VM %s" - -msgid "Wrong sensor type" -msgstr "感應器類型錯誤" - -msgid "XenAPI not installed" -msgstr "未安裝 XenAPI" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "讀取定義檔 %(file)s 時發生 YAML 錯誤" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "當已停用或無法使用 Aodh 時,無法使用警示 URL" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的 CPU 時間:%(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "已啟用 Gnocchi 時,直接選項不能為 true。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "正在刪除不在時間順序內的範例:%s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "正在捨棄不含前一版本的樣本:%s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 輸出長度不符" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes 及 backup_count 應該是數字。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "剖析 IPMI 感應器資料失敗,未從給定的輸入擷取任何資料" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "剖析 IPMI 感應器資料失敗,感應器類型不明" - -msgid "running ipmitool failure" -msgstr "執行 ipmitool 失敗" diff --git a/ceilometer/messaging.py b/ceilometer/messaging.py deleted file mode 100644 index 7b325077..00000000 --- a/ceilometer/messaging.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging -from oslo_messaging import serializer as oslo_serializer - -DEFAULT_URL = "__default__" -TRANSPORTS = {} - - -def setup(): - oslo_messaging.set_transport_defaults('ceilometer') - - -def get_transport(url=None, optional=False, cache=True): - """Initialise the oslo_messaging layer.""" - global TRANSPORTS, DEFAULT_URL - cache_key = url or DEFAULT_URL - transport = TRANSPORTS.get(cache_key) - if not transport or not cache: - try: - transport = oslo_messaging.get_transport(cfg.CONF, url) - except (oslo_messaging.InvalidTransportURL, - oslo_messaging.DriverLoadFailure): - if not optional or url: - # NOTE(sileht): oslo_messaging is configured but unloadable - # so reraise the exception - raise - return None - else: - if cache: - TRANSPORTS[cache_key] = transport - return transport - - -def cleanup(): - """Cleanup the oslo_messaging layer.""" - global TRANSPORTS, NOTIFIERS - NOTIFIERS = {} - for url in TRANSPORTS: - TRANSPORTS[url].cleanup() - del TRANSPORTS[url] - - -_SERIALIZER = oslo_serializer.JsonPayloadSerializer() - - -def get_batch_notification_listener(transport, targets, endpoints, - allow_requeue=False, - batch_size=1, batch_timeout=None): - """Return a configured oslo_messaging notification listener.""" - return oslo_messaging.get_batch_notification_listener( - transport, targets, endpoints, executor='threading', - allow_requeue=allow_requeue, - batch_size=batch_size, batch_timeout=batch_timeout) - - -def get_notifier(transport, publisher_id): - """Return a configured oslo_messaging notifier.""" - notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) - return notifier.prepare(publisher_id=publisher_id) - - -def convert_to_old_notification_format(priority, notification): - # FIXME(sileht): temporary convert notification to old format - # to focus on oslo_messaging migration before refactoring the code to - # use the new oslo_messaging facilities - notification = notification.copy() - notification['priority'] = priority - notification.update(notification["metadata"]) - for k in notification['ctxt']: - notification['_context_' + k] = notification['ctxt'][k] - del notification['ctxt'] - del notification['metadata'] - return notification diff --git a/ceilometer/meter/__init__.py b/ceilometer/meter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/meter/data/meters.yaml b/ceilometer/meter/data/meters.yaml deleted file mode 100644 index 650b0309..00000000 --- a/ceilometer/meter/data/meters.yaml +++ /dev/null @@ -1,815 +0,0 @@ ---- - -metric: - # Image - - name: "image.size" - event_type: - - "image.upload" - - "image.delete" - - "image.update" - type: "gauge" - unit: B - volume: $.payload.size - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.download" - event_type: "image.send" - type: "delta" - unit: "B" - volume: $.payload.bytes_sent - resource_id: $.payload.image_id - user_id: $.payload.receiver_user_id - project_id: $.payload.receiver_tenant_id - - - name: "image.serve" - event_type: "image.send" - type: "delta" - unit: "B" - volume: $.payload.bytes_sent - resource_id: $.payload.image_id - project_id: $.payload.owner_id - - - name: 'volume.size' - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.size - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.volume_id - - - name: 'snapshot.size' - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.volume_size - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.snapshot_id - - # Magnum - - name: $.payload.metrics.[*].name - event_type: 'magnum.bay.metrics.*' - type: 'gauge' - unit: $.payload.metrics.[*].unit - volume: $.payload.metrics.[*].value - user_id: $.payload.user_id - project_id: $.payload.project_id - resource_id: $.payload.resource_id - lookup: ['name', 'unit', 'volume'] - - # Swift - - name: $.payload.measurements.[*].metric.[*].name - event_type: 'objectstore.http.request' - type: 'delta' - unit: $.payload.measurements.[*].metric.[*].unit - volume: $.payload.measurements.[*].result - resource_id: $.payload.target.id - user_id: $.payload.initiator.id - project_id: $.payload.initiator.project_id - lookup: ['name', 'unit', 'volume'] - - - name: 'memory' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'MB' - volume: $.payload.memory_mb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'vcpus' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'vcpu' - volume: $.payload.vcpus - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'compute.instance.booting.time' - event_type: 'compute.instance.create.end' - type: 'gauge' - unit: 'sec' - volume: - fields: [$.payload.created_at, $.payload.launched_at] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'disk.root.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.root_gb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'disk.ephemeral.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.ephemeral_gb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'bandwidth' - event_type: 'l3.meter' - type: 'delta' - unit: 'B' - volume: $.payload.bytes - project_id: $.payload.tenant_id - resource_id: $.payload.label_id - - - name: 'compute.node.cpu.frequency' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'MHz' - volume: $.payload.metrics[?(@.name='cpu.frequency')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.frequency')].source - - - name: 'compute.node.cpu.user.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.user.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.user.time')].source - - - name: 'compute.node.cpu.kernel.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - - - name: 'compute.node.cpu.idle.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.idle.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.idle.time')].source - - - name: 'compute.node.cpu.iowait.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - - - name: 'compute.node.cpu.kernel.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - - - name: 'compute.node.cpu.idle.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - - - name: 'compute.node.cpu.user.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.user.percent')].source - - - name: 'compute.node.cpu.iowait.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - - - name: 'compute.node.cpu.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.percent')].source - - # DNS - - name: 'dns.domain.exists' - event_type: 'dns.domain.exists' - type: 'cumulative' - unit: 's' - volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.id - user_id: $._context_user - metadata: - status: $.payload.status - pool_id: $.payload.pool_id - host: $.publisher_id - - # Trove - - name: 'trove.instance.exists' - event_type: 'trove.instance.exists' - type: 'cumulative' - unit: 's' - volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - user_id: $.payload.user_id - metadata: - nova_instance_id: $.payload.nova_instance_id - state: $.payload.state - service_id: $.payload.service_id - instance_type: $.payload.instance_type - instance_type_id: $.payload.instance_type_id - - -# NOTE: non-metric meters are generally events/existence meters -# These are DEPRECATED in current release and expected to be -# REMOVED in the next upcoming release. -# - # Image - - name: "image" - event_type: - - "image.upload" - - "image.update" - - "image.delete" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.upload" - event_type: - - "image.upload" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.delete" - event_type: - - "image.delete" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.update" - event_type: - - "image.update" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - # Orchestration - - name: 'stack.create' - event_type: - - 'orchestration.stack.create.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.update' - event_type: - - 'orchestration.stack.update.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.delete' - event_type: - - 'orchestration.stack.delete.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.resume' - event_type: - - 'orchestration.stack.resume.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.suspend' - event_type: - - 'orchestration.stack.suspend.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - # Volume - - name: 'volume' - type: 'gauge' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.exists' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.create.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.create.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.delete.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.delete.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.update.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.update.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.resize.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.resize.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - - name: 'volume.attach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.attach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.detach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.detach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - # Volume Snapshot - - name: 'snapshot' - type: 'gauge' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.exists' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.create.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.start' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.create.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.end' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.delete.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.start' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.delete.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.end' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - # Sahara - - name: 'cluster.create' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.create' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - - name: 'cluster.update' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.update' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - - name: 'cluster.delete' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.delete' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - # Identity - - name: 'identity.user.created' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.user.updated' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.user.deleted' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.created' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.updated' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.deleted' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.created' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.updated' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.deleted' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.created' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.updated' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.deleted' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role_assignment.created' - type: 'delta' - unit: 'role_assignment' - volume: 1 - event_type: - - 'identity.role_assignment.created' - resource_id: $.payload.role - user_id: $.payload.initiator.id - - - name: 'identity.role_assignment.deleted' - type: 'delta' - unit: 'role_assignment' - volume: 1 - event_type: - - 'identity.role_assignment.deleted' - resource_id: $.payload.role - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.success' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.pending' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.failure' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.trust.created' - type: 'delta' - unit: 'trust' - volume: 1 - event_type: - - 'identity.OS-TRUST:trust.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.trust.deleted' - type: 'delta' - unit: 'trust' - volume: 1 - event_type: - - 'identity.OS-TRUST:trust.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'storage.api.request' - type: 'delta' - unit: 'request' - volume: 1 - event_type: - - 'objectstore.http.request' - resource_id: $.payload.target.id - user_id: $.payload.initiator.id - project_id: $.payload.initiator.project_id - - - name: '$.payload.name' - event_type: 'profiler.*' - type: 'gauge' - unit: 'trace' - volume: 1 - user_id: $.payload.user_id - project_id: $.payload.project_id - resource_id: '"profiler-" + $.payload.base_id' diff --git a/ceilometer/meter/notifications.py b/ceilometer/meter/notifications.py deleted file mode 100644 index efb57213..00000000 --- a/ceilometer/meter/notifications.py +++ /dev/null @@ -1,230 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import pkg_resources -import six - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import declarative -from ceilometer.i18n import _LE, _LW -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('meter_definitions_cfg_file', - default="meters.yaml", - help="Configuration file for defining meter notifications." - ), -] - -cfg.CONF.register_opts(OPTS, group='meter') -cfg.CONF.import_opt('disable_non_metric_meters', 'ceilometer.notification', - group='notification') - -LOG = log.getLogger(__name__) - - -class MeterDefinition(object): - - SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", - "user_id", "project_id", "resource_id"] - - REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', - 'resource_id'] - - def __init__(self, definition_cfg, plugin_manager): - self.cfg = definition_cfg - missing = [field for field in self.REQUIRED_FIELDS - if not self.cfg.get(field)] - if missing: - raise declarative.MeterDefinitionException( - _LE("Required fields %s not specified") % missing, self.cfg) - - self._event_type = self.cfg.get('event_type') - if isinstance(self._event_type, six.string_types): - self._event_type = [self._event_type] - - if ('type' not in self.cfg.get('lookup', []) and - self.cfg['type'] not in sample.TYPES): - raise declarative.MeterDefinitionException( - _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) - - self._fallback_user_id = declarative.Definition( - 'user_id', "_context_user_id|_context_user", plugin_manager) - self._fallback_project_id = declarative.Definition( - 'project_id', "_context_tenant_id|_context_tenant", plugin_manager) - self._attributes = {} - self._metadata_attributes = {} - - for name in self.SAMPLE_ATTRIBUTES: - attr_cfg = self.cfg.get(name) - if attr_cfg: - self._attributes[name] = declarative.Definition( - name, attr_cfg, plugin_manager) - metadata = self.cfg.get('metadata', {}) - for name in metadata: - self._metadata_attributes[name] = declarative.Definition( - name, metadata[name], plugin_manager) - - # List of fields we expected when multiple meter are in the payload - self.lookup = self.cfg.get('lookup') - if isinstance(self.lookup, six.string_types): - self.lookup = [self.lookup] - - def match_type(self, meter_name): - for t in self._event_type: - if fnmatch.fnmatch(meter_name, t): - return True - - def to_samples(self, message, all_values=False): - # Sample defaults - sample = { - 'name': self.cfg["name"], 'type': self.cfg["type"], - 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, - 'user_id': self._fallback_user_id.parse(message), - 'project_id': self._fallback_project_id.parse(message), - 'resource_id': None, 'message': message, 'metadata': {}, - } - for name, parser in self._metadata_attributes.items(): - value = parser.parse(message) - if value: - sample['metadata'][name] = value - - # NOTE(sileht): We expect multiple samples in the payload - # so put each attribute into a list - if self.lookup: - for name in sample: - sample[name] = [sample[name]] - - for name in self.SAMPLE_ATTRIBUTES: - parser = self._attributes.get(name) - if parser is not None: - value = parser.parse(message, bool(self.lookup)) - # NOTE(sileht): If we expect multiple samples - # some attributes are overridden even we don't get any - # result. Also note in this case value is always a list - if ((not self.lookup and value is not None) or - (self.lookup and ((name in self.lookup + ["name"]) - or value))): - sample[name] = value - - if self.lookup: - nb_samples = len(sample['name']) - # skip if no meters in payload - if nb_samples <= 0: - raise StopIteration - - attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] - - samples_values = [] - for name in attributes: - values = sample.get(name) - nb_values = len(values) - if nb_values == nb_samples: - samples_values.append(values) - elif nb_values == 1 and name not in self.lookup: - samples_values.append(itertools.cycle(values)) - else: - nb = (0 if nb_values == 1 and values[0] is None - else nb_values) - LOG.warning('Only %(nb)d fetched meters contain ' - '"%(name)s" field instead of %(total)d.' % - dict(name=name, nb=nb, - total=nb_samples)) - raise StopIteration - - # NOTE(sileht): Transform the sample with multiple values per - # attribute into multiple samples with one value per attribute. - for values in zip(*samples_values): - yield dict((attributes[idx], value) - for idx, value in enumerate(values)) - else: - yield sample - - -class ProcessMeterNotifications(plugin_base.NotificationBase): - - event_types = [] - - def __init__(self, manager): - super(ProcessMeterNotifications, self).__init__(manager) - self.definitions = self._load_definitions() - - @staticmethod - def _load_definitions(): - plugin_manager = extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin') - meters_cfg = declarative.load_definitions( - {}, cfg.CONF.meter.meter_definitions_cfg_file, - pkg_resources.resource_filename(__name__, "data/meters.yaml")) - - definitions = {} - for meter_cfg in reversed(meters_cfg['metric']): - if meter_cfg.get('name') in definitions: - # skip duplicate meters - LOG.warning(_LW("Skipping duplicate meter definition %s") - % meter_cfg) - continue - if (meter_cfg.get('volume') != 1 - or not cfg.CONF.notification.disable_non_metric_meters): - try: - md = MeterDefinition(meter_cfg, plugin_manager) - except declarative.DefinitionException as e: - errmsg = _LE("Error loading meter definition: %s") - LOG.error(errmsg, six.text_type(e)) - else: - definitions[meter_cfg['name']] = md - return definitions.values() - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - It is defining the exchange and topics to be connected for this plugin. - :param conf: Configuration. - #TODO(prad): This should be defined in the notification agent - """ - targets = [] - exchanges = [ - conf.nova_control_exchange, - conf.cinder_control_exchange, - conf.glance_control_exchange, - conf.neutron_control_exchange, - conf.heat_control_exchange, - conf.keystone_control_exchange, - conf.sahara_control_exchange, - conf.trove_control_exchange, - conf.zaqar_control_exchange, - conf.swift_control_exchange, - conf.ceilometer_control_exchange, - conf.magnum_control_exchange, - conf.dns_control_exchange, - ] - - for exchange in exchanges: - targets.extend(oslo_messaging.Target(topic=topic, - exchange=exchange) - for topic in - self.get_notification_topics(conf)) - return targets - - def process_notification(self, notification_body): - for d in self.definitions: - if d.match_type(notification_body['event_type']): - for s in d.to_samples(notification_body): - yield sample.Sample.from_notification(**s) diff --git a/ceilometer/middleware.py b/ceilometer/middleware.py deleted file mode 100644 index 3901c3e2..00000000 --- a/ceilometer/middleware.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -cfg.CONF.import_opt('nova_control_exchange', - 'ceilometer.compute.notifications') -cfg.CONF.import_opt('glance_control_exchange', - 'ceilometer.notification') -cfg.CONF.import_opt('neutron_control_exchange', - 'ceilometer.network.notifications') -cfg.CONF.import_opt('cinder_control_exchange', - 'ceilometer.notification') - -OPTS = [ - cfg.MultiStrOpt('http_control_exchanges', - default=[cfg.CONF.nova_control_exchange, - cfg.CONF.glance_control_exchange, - cfg.CONF.neutron_control_exchange, - cfg.CONF.cinder_control_exchange], - help="Exchanges name to listen for notifications."), -] - -cfg.CONF.register_opts(OPTS) - - -class HTTPRequest(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - event_types = ['http.request'] - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, exchange=exchange) - for topic in self.get_notification_topics(conf) - for exchange in conf.http_control_exchanges] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - volume=1, - unit=message['event_type'].split('.')[1], - user_id=message['payload']['request'].get('HTTP_X_USER_ID'), - project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), - resource_id=message['payload']['request'].get( - 'HTTP_X_SERVICE_NAME'), - message=message) - - -class HTTPResponse(HTTPRequest): - event_types = ['http.response'] diff --git a/ceilometer/network/__init__.py b/ceilometer/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/floatingip.py b/ceilometer/network/floatingip.py deleted file mode 100644 index ce178704..00000000 --- a/ceilometer/network/floatingip.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2016 Sungard Availability Services -# Copyright 2016 Red Hat -# Copyright 2012 eNovance -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.i18n import _LW -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - -cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') - - -class FloatingIPPollster(base.BaseServicesPollster): - - FIELDS = ['router_id', - 'status', - 'floating_network_id', - 'fixed_ip_address', - 'port_id', - 'floating_ip_address', - ] - - @property - def default_discovery(self): - return 'fip_services' - - def get_samples(self, manager, cache, resources): - - for fip in resources or []: - if fip['status'] is None: - LOG.warning(_LW("Invalid status, skipping IP address %s") % - fip['floating_ip_address']) - continue - status = self.get_status_id(fip['status']) - yield sample.Sample( - name='ip.floating', - type=sample.TYPE_GAUGE, - unit='ip', - volume=status, - user_id=fip.get('user_id'), - project_id=fip['tenant_id'], - resource_id=fip['id'], - resource_metadata=self.extract_metadata(fip) - ) diff --git a/ceilometer/network/notifications.py b/ceilometer/network/notifications.py deleted file mode 100644 index 6a196abe..00000000 --- a/ceilometer/network/notifications.py +++ /dev/null @@ -1,258 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handler for producing network counter messages from Neutron notification - events. - -""" - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('neutron_control_exchange', - default='neutron', - help="Exchange name for Neutron notifications."), -] - -cfg.CONF.register_opts(OPTS) - - -class NetworkNotificationBase(plugin_base.NotificationBase): - - resource_name = None - - @property - def event_types(self): - return [ - # NOTE(flwang): When the *.create.start notification sending, - # there is no resource id assigned by Neutron yet. So we ignore - # the *.create.start notification for now and only listen the - # *.create.end to make sure the resource id is existed. - '%s.create.end' % self.resource_name, - '%s.update.*' % self.resource_name, - '%s.exists' % self.resource_name, - # FIXME(dhellmann): Neutron delete notifications do - # not include the same metadata as the other messages, - # so we ignore them for now. This isn't ideal, since - # it may mean we miss charging for some amount of time, - # but it is better than throwing away the existing - # metadata for a resource when it is deleted. - # '%s.delete.start' % (self.resource_name), - ] - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.neutron_control_exchange) - for topic in self.get_notification_topics(conf)] - - def process_notification(self, message): - counter_name = getattr(self, 'counter_name', self.resource_name) - unit_value = getattr(self, 'unit', self.resource_name) - - resource = message['payload'].get(self.resource_name) - if resource: - # NOTE(liusheng): In %s.update.start notifications, the id is in - # message['payload'] instead of resource itself. - if message['event_type'].endswith('update.start'): - resource['id'] = message['payload']['id'] - resources = [resource] - else: - resources = message['payload'].get(self.resource_name + 's', []) - - resource_message = message.copy() - for resource in resources: - resource_message['payload'] = resource - yield sample.Sample.from_notification( - name=counter_name, - type=sample.TYPE_GAUGE, - unit=unit_value, - volume=1, - user_id=resource_message['_context_user_id'], - project_id=resource_message['_context_tenant_id'], - resource_id=resource['id'], - message=resource_message) - event_type_split = resource_message['event_type'].split('.') - if len(event_type_split) > 2: - yield sample.Sample.from_notification( - name=counter_name - + "." + event_type_split[1], - type=sample.TYPE_DELTA, - unit=unit_value, - volume=1, - user_id=resource_message['_context_user_id'], - project_id=resource_message['_context_tenant_id'], - resource_id=resource['id'], - message=resource_message) - - -class Network(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron network notifications. - - Handle network.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'network' - - -class Subnet(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle subnet.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'subnet' - - -class Port(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle port.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'port' - - -class Router(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle router.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'router' - - -class FloatingIP(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle floatingip.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'floatingip' - counter_name = 'ip.floating' - unit = 'ip' - - -class Pool(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle pool.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'pool' - counter_name = 'network.services.lb.pool' - - -class Vip(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle vip.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'vip' - counter_name = 'network.services.lb.vip' - - -class Member(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle member.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'member' - counter_name = 'network.services.lb.member' - - -class HealthMonitor(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle health_monitor.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'health_monitor' - counter_name = 'network.services.lb.health_monitor' - - -class Firewall(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'firewall' - counter_name = 'network.services.firewall' - - -class FirewallPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall_policy.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'firewall_policy' - counter_name = 'network.services.firewall.policy' - - -class FirewallRule(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall_rule.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'firewall_rule' - counter_name = 'network.services.firewall.rule' - - -class VPNService(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle vpnservice.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'vpnservice' - counter_name = 'network.services.vpn' - - -class IPSecPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle pool.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'ipsecpolicy' - counter_name = 'network.services.vpn.ipsecpolicy' - - -class IKEPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle ikepolicy.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'ikepolicy' - counter_name = 'network.services.vpn.ikepolicy' - - -class IPSecSiteConnection(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle ipsec_site_connection.{create.end|update.*|exists} - notifications from neutron. - """ - resource_name = 'ipsec_site_connection' - counter_name = 'network.services.vpn.connections' diff --git a/ceilometer/network/services/__init__.py b/ceilometer/network/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/services/base.py b/ceilometer/network/services/base.py deleted file mode 100644 index 4aa666bd..00000000 --- a/ceilometer/network/services/base.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base - - -# status map for converting metric status to volume int -STATUS = { - 'inactive': 0, - 'active': 1, - 'pending_create': 2, - 'down': 3, - 'created': 4, - 'pending_update': 5, - 'pending_delete': 6, - 'error': 7, -} - - -class BaseServicesPollster(plugin_base.PollsterBase): - - FIELDS = [] - - @staticmethod - def _iter_cache(cache, meter_name, method): - if meter_name not in cache: - cache[meter_name] = list(method()) - return iter(cache[meter_name]) - - def extract_metadata(self, metric): - return dict((k, metric[k]) for k in self.FIELDS) - - @staticmethod - def get_status_id(value): - status = value.lower() - return STATUS.get(status, -1) diff --git a/ceilometer/network/services/discovery.py b/ceilometer/network/services/discovery.py deleted file mode 100644 index f20af9d4..00000000 --- a/ceilometer/network/services/discovery.py +++ /dev/null @@ -1,118 +0,0 @@ -# -# Copyright (c) 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base -from ceilometer import neutron_client - - -class _BaseServicesDiscovery(plugin_base.DiscoveryBase): - KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' - - def __init__(self): - super(_BaseServicesDiscovery, self).__init__() - self.neutron_cli = neutron_client.Client() - - -class LBPoolsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - pools = self.neutron_cli.pool_get_all() - return [i for i in pools - if i.get('status') != 'error'] - - -class LBVipsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - vips = self.neutron_cli.vip_get_all() - return [i for i in vips - if i.get('status', None) != 'error'] - - -class LBMembersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - members = self.neutron_cli.member_get_all() - return [i for i in members - if i.get('status', None) != 'error'] - - -class LBListenersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover load balancer listener resources to monitor.""" - - listeners = self.neutron_cli.list_listener() - return [i for i in listeners - if i.get('operating_status', None) != 'error'] - - -class LBLoadBalancersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover load balancer resources to monitor.""" - - loadbalancers = self.neutron_cli.list_loadbalancer() - return [i for i in loadbalancers - if i.get('operating_status', None) != 'error'] - - -class LBHealthMonitorsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - probes = self.neutron_cli.health_monitor_get_all() - return probes - - -class VPNServicesDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - vpnservices = self.neutron_cli.vpn_get_all() - return [i for i in vpnservices - if i.get('status', None) != 'error'] - - -class IPSecConnectionsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - conns = self.neutron_cli.ipsec_site_connections_get_all() - return conns - - -class FirewallDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - fw = self.neutron_cli.firewall_get_all() - return [i for i in fw - if i.get('status', None) != 'error'] - - -class FirewallPolicyDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - return self.neutron_cli.fw_policy_get_all() - - -class FloatingIPDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover floating IP resources to monitor.""" - - return self.neutron_cli.fip_get_all() diff --git a/ceilometer/network/services/fwaas.py b/ceilometer/network/services/fwaas.py deleted file mode 100644 index 7b827613..00000000 --- a/ceilometer/network/services/fwaas.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class FirewallPollster(base.BaseServicesPollster): - """Pollster to capture firewalls status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'name', - 'status', - 'firewall_policy_id', - ] - - @property - def default_discovery(self): - return 'fw_services' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for fw in resources: - LOG.debug("Firewall : %s" % fw) - status = self.get_status_id(fw['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," - "skipping sample") % {'stat': fw['status'], - 'id': fw['id']}) - continue - - yield sample.Sample( - name='network.services.firewall', - type=sample.TYPE_GAUGE, - unit='firewall', - volume=status, - user_id=None, - project_id=fw['tenant_id'], - resource_id=fw['id'], - resource_metadata=self.extract_metadata(fw) - ) - - -class FirewallPolicyPollster(base.BaseServicesPollster): - """Pollster to capture firewall policy samples.""" - - FIELDS = ['name', - 'description', - 'name', - 'firewall_rules', - 'shared', - 'audited', - ] - - @property - def default_discovery(self): - return 'fw_policy' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for fw in resources: - LOG.debug("Firewall Policy: %s" % fw) - - yield sample.Sample( - name='network.services.firewall.policy', - type=sample.TYPE_GAUGE, - unit='firewall_policy', - volume=1, - user_id=None, - project_id=fw['tenant_id'], - resource_id=fw['id'], - resource_metadata=self.extract_metadata(fw) - ) diff --git a/ceilometer/network/services/lbaas.py b/ceilometer/network/services/lbaas.py deleted file mode 100644 index 5f33d22c..00000000 --- a/ceilometer/network/services/lbaas.py +++ /dev/null @@ -1,464 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -from oslo_config import cfg -from oslo_log import log -import six - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import neutron_client -from ceilometer import sample - -LOG = log.getLogger(__name__) - -LBStatsData = collections.namedtuple( - 'LBStats', - ['active_connections', 'total_connections', 'bytes_in', 'bytes_out'] -) - -LOAD_BALANCER_STATUS_V2 = { - 'offline': 0, - 'online': 1, - 'no_monitor': 3, - 'error': 4, - 'degraded': 5 -} - - -class BaseLBPollster(base.BaseServicesPollster): - """Base Class for Load Balancer pollster""" - - def __init__(self): - super(BaseLBPollster, self).__init__() - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - def get_load_balancer_status_id(self, value): - if self.lb_version == 'v1': - resource_status = self.get_status_id(value) - elif self.lb_version == 'v2': - status = value.lower() - resource_status = LOAD_BALANCER_STATUS_V2.get(status, -1) - return resource_status - - -class LBPoolPollster(BaseLBPollster): - """Pollster to capture Load Balancer pool status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'lb_method', - 'name', - 'protocol', - 'provider', - 'status', - 'status_description', - 'subnet_id', - 'vip_id' - ] - - @property - def default_discovery(self): - return 'lb_pools' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for pool in resources: - LOG.debug("Load Balancer Pool : %s" % pool) - status = self.get_load_balancer_status_id(pool['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on pool " - "%(id)s, skipping sample") - % {'stat': pool['status'], 'id': pool['id']}) - continue - - yield sample.Sample( - name='network.services.lb.pool', - type=sample.TYPE_GAUGE, - unit='pool', - volume=status, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=self.extract_metadata(pool) - ) - - -class LBVipPollster(base.BaseServicesPollster): - """Pollster to capture Load Balancer Vip status samples.""" - - FIELDS = ['admin_state_up', - 'address', - 'connection_limit', - 'description', - 'name', - 'pool_id', - 'port_id', - 'protocol', - 'protocol_port', - 'status', - 'status_description', - 'subnet_id', - 'session_persistence', - ] - - @property - def default_discovery(self): - return 'lb_vips' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for vip in resources: - LOG.debug("Load Balancer Vip : %s" % vip) - status = self.get_status_id(vip['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on vip " - "%(id)s, skipping sample") - % {'stat': vip['status'], 'id': vip['id']}) - continue - - yield sample.Sample( - name='network.services.lb.vip', - type=sample.TYPE_GAUGE, - unit='vip', - volume=status, - user_id=None, - project_id=vip['tenant_id'], - resource_id=vip['id'], - resource_metadata=self.extract_metadata(vip) - ) - - -class LBMemberPollster(BaseLBPollster): - """Pollster to capture Load Balancer Member status samples.""" - - FIELDS = ['admin_state_up', - 'address', - 'pool_id', - 'protocol_port', - 'status', - 'status_description', - 'weight', - ] - - @property - def default_discovery(self): - return 'lb_members' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for member in resources: - LOG.debug("Load Balancer Member : %s" % member) - status = self.get_load_balancer_status_id(member['status']) - if status == -1: - LOG.warning(_("Unknown status %(stat)s received on member " - "%(id)s, skipping sample") - % {'stat': member['status'], 'id': member['id']}) - continue - yield sample.Sample( - name='network.services.lb.member', - type=sample.TYPE_GAUGE, - unit='member', - volume=status, - user_id=None, - project_id=member['tenant_id'], - resource_id=member['id'], - resource_metadata=self.extract_metadata(member) - ) - - -class LBHealthMonitorPollster(base.BaseServicesPollster): - """Pollster to capture Load Balancer Health probes status samples.""" - - FIELDS = ['admin_state_up', - 'delay', - 'max_retries', - 'pools', - 'timeout', - 'type' - ] - - @property - def default_discovery(self): - return 'lb_health_probes' - - def get_samples(self, manager, cache, resources): - for probe in resources: - LOG.debug("Load Balancer Health probe : %s" % probe) - yield sample.Sample( - name='network.services.lb.health_monitor', - type=sample.TYPE_GAUGE, - unit='health_monitor', - volume=1, - user_id=None, - project_id=probe['tenant_id'], - resource_id=probe['id'], - resource_metadata=self.extract_metadata(probe) - ) - - -@six.add_metaclass(abc.ABCMeta) -class _LBStatsPollster(base.BaseServicesPollster): - """Base Statistics pollster. - - It is capturing the statistics info and yielding samples for connections - and bandwidth. - """ - - def __init__(self): - super(_LBStatsPollster, self).__init__() - self.client = neutron_client.Client() - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - @staticmethod - def make_sample_from_pool(pool, name, type, unit, volume, - resource_metadata=None): - if not resource_metadata: - resource_metadata = {} - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=resource_metadata, - ) - - def _populate_stats_cache(self, pool_id, cache): - i_cache = cache.setdefault("lbstats", {}) - if pool_id not in i_cache: - stats = self.client.pool_stats(pool_id)['stats'] - i_cache[pool_id] = LBStatsData( - active_connections=stats['active_connections'], - total_connections=stats['total_connections'], - bytes_in=stats['bytes_in'], - bytes_out=stats['bytes_out'], - ) - return i_cache[pool_id] - - def _populate_stats_cache_v2(self, loadbalancer_id, cache): - i_cache = cache.setdefault("lbstats", {}) - if loadbalancer_id not in i_cache: - stats = self.client.get_loadbalancer_stats(loadbalancer_id) - i_cache[loadbalancer_id] = LBStatsData( - active_connections=stats['active_connections'], - total_connections=stats['total_connections'], - bytes_in=stats['bytes_in'], - bytes_out=stats['bytes_out'], - ) - return i_cache[loadbalancer_id] - - @property - def default_discovery(self): - discovery_resource = 'lb_pools' - if self.lb_version == 'v2': - discovery_resource = 'lb_loadbalancers' - return discovery_resource - - @abc.abstractmethod - def _get_sample(pool, c_data): - """Return one Sample.""" - - def get_samples(self, manager, cache, resources): - if self.lb_version == 'v1': - for pool in resources: - try: - c_data = self._populate_stats_cache(pool['id'], cache) - yield self._get_sample(pool, c_data) - except Exception: - LOG.exception(_('Ignoring pool %(pool_id)s'), - {'pool_id': pool['id']}) - elif self.lb_version == 'v2': - for loadbalancer in resources: - try: - c_data = self._populate_stats_cache_v2(loadbalancer['id'], - cache) - yield self._get_sample(loadbalancer, c_data) - except Exception: - LOG.exception( - _('Ignoring ' - 'loadbalancer %(loadbalancer_id)s'), - {'loadbalancer_id': loadbalancer['id']}) - - -class LBActiveConnectionsPollster(_LBStatsPollster): - """Pollster to capture Active Load Balancer connections.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.active.connections', - type=sample.TYPE_GAUGE, - unit='connection', - volume=data.active_connections, - ) - - -class LBTotalConnectionsPollster(_LBStatsPollster): - """Pollster to capture Total Load Balancer connections.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.total.connections', - type=sample.TYPE_CUMULATIVE, - unit='connection', - volume=data.total_connections, - ) - - -class LBBytesInPollster(_LBStatsPollster): - """Pollster to capture incoming bytes.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.incoming.bytes', - type=sample.TYPE_GAUGE, - unit='B', - volume=data.bytes_in, - ) - - -class LBBytesOutPollster(_LBStatsPollster): - """Pollster to capture outgoing bytes.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.outgoing.bytes', - type=sample.TYPE_GAUGE, - unit='B', - volume=data.bytes_out, - ) - - -def make_sample_from_pool(pool, name, type, unit, volume, - resource_metadata=None): - resource_metadata = resource_metadata or {} - - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=resource_metadata, - ) - - -class LBListenerPollster(BaseLBPollster): - """Pollster to capture Load Balancer Listener status samples.""" - - FIELDS = ['admin_state_up', - 'connection_limit', - 'description', - 'name', - 'default_pool_id', - 'protocol', - 'protocol_port', - 'operating_status', - 'loadbalancers' - ] - - @property - def default_discovery(self): - return 'lb_listeners' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for listener in resources: - LOG.debug("Load Balancer Listener : %s" % listener) - status = self.get_load_balancer_status_id( - listener['operating_status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on listener " - "%(id)s, skipping sample") - % {'stat': listener['operating_status'], - 'id': listener['id']}) - continue - - yield sample.Sample( - name='network.services.lb.listener', - type=sample.TYPE_GAUGE, - unit='listener', - volume=status, - user_id=None, - project_id=listener['tenant_id'], - resource_id=listener['id'], - resource_metadata=self.extract_metadata(listener) - ) - - -class LBLoadBalancerPollster(BaseLBPollster): - """Pollster to capture Load Balancer status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'vip_address', - 'listeners', - 'name', - 'vip_subnet_id', - 'operating_status', - ] - - @property - def default_discovery(self): - return 'lb_loadbalancers' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for loadbalancer in resources: - LOG.debug("Load Balancer: %s" % loadbalancer) - status = self.get_load_balancer_status_id( - loadbalancer['operating_status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received " - "on Load Balancer " - "%(id)s, skipping sample") - % {'stat': loadbalancer['operating_status'], - 'id': loadbalancer['id']}) - continue - - yield sample.Sample( - name='network.services.lb.loadbalancer', - type=sample.TYPE_GAUGE, - unit='loadbalancer', - volume=status, - user_id=None, - project_id=loadbalancer['tenant_id'], - resource_id=loadbalancer['id'], - resource_metadata=self.extract_metadata(loadbalancer) - ) diff --git a/ceilometer/network/services/vpnaas.py b/ceilometer/network/services/vpnaas.py deleted file mode 100644 index 948613d4..00000000 --- a/ceilometer/network/services/vpnaas.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class VPNServicesPollster(base.BaseServicesPollster): - """Pollster to capture VPN status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'name', - 'status', - 'subnet_id', - 'router_id' - ] - - @property - def default_discovery(self): - return 'vpn_services' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for vpn in resources: - LOG.debug("VPN : %s" % vpn) - status = self.get_status_id(vpn['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on vpn " - "%(id)s, skipping sample") - % {'stat': vpn['status'], 'id': vpn['id']}) - continue - - yield sample.Sample( - name='network.services.vpn', - type=sample.TYPE_GAUGE, - unit='vpnservice', - volume=status, - user_id=None, - project_id=vpn['tenant_id'], - resource_id=vpn['id'], - resource_metadata=self.extract_metadata(vpn) - ) - - -class IPSecConnectionsPollster(base.BaseServicesPollster): - """Pollster to capture vpn ipsec connections status samples.""" - - FIELDS = ['name', - 'description', - 'peer_address', - 'peer_id', - 'peer_cidrs', - 'psk', - 'initiator', - 'ikepolicy_id', - 'dpd', - 'ipsecpolicy_id', - 'vpnservice_id', - 'mtu', - 'admin_state_up', - 'status', - 'tenant_id' - ] - - @property - def default_discovery(self): - return 'ipsec_connections' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for conn in resources: - LOG.debug("IPSec Connection Info: %s" % conn) - - yield sample.Sample( - name='network.services.vpn.connections', - type=sample.TYPE_GAUGE, - unit='ipsec_site_connection', - volume=1, - user_id=None, - project_id=conn['tenant_id'], - resource_id=conn['id'], - resource_metadata=self.extract_metadata(conn) - ) diff --git a/ceilometer/network/statistics/__init__.py b/ceilometer/network/statistics/__init__.py deleted file mode 100644 index f45ed710..00000000 --- a/ceilometer/network/statistics/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import netutils -import six -from six.moves.urllib import parse as urlparse -from stevedore import driver as _driver - -from ceilometer.agent import plugin_base -from ceilometer import sample - - -@six.add_metaclass(abc.ABCMeta) -class _Base(plugin_base.PollsterBase): - - NAMESPACE = 'network.statistics.drivers' - drivers = {} - - @property - def default_discovery(self): - # this signifies that the pollster gets its resources from - # elsewhere, in this case they're manually listed in the - # pipeline configuration - return None - - @abc.abstractproperty - def meter_name(self): - """Return a Meter Name.""" - - @abc.abstractproperty - def meter_type(self): - """Return a Meter Type.""" - - @abc.abstractproperty - def meter_unit(self): - """Return a Meter Unit.""" - - @staticmethod - def _parse_my_resource(resource): - - parse_url = netutils.urlsplit(resource) - - params = urlparse.parse_qs(parse_url.query) - parts = urlparse.ParseResult(parse_url.scheme, - parse_url.netloc, - parse_url.path, - None, - None, - None) - return parts, params - - @staticmethod - def get_driver(scheme): - if scheme not in _Base.drivers: - _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE, - scheme).driver() - return _Base.drivers[scheme] - - def get_samples(self, manager, cache, resources): - resources = resources or [] - for resource in resources: - parse_url, params = self._parse_my_resource(resource) - ext = self.get_driver(parse_url.scheme) - sample_data = ext.get_sample_data(self.meter_name, - parse_url, - params, - cache) - - for data in sample_data or []: - if data is None: - continue - if not isinstance(data, list): - data = [data] - for (volume, resource_id, - resource_metadata) in data: - - yield sample.Sample( - name=self.meter_name, - type=self.meter_type, - unit=self.meter_unit, - volume=volume, - user_id=None, - project_id=None, - resource_id=resource_id, - resource_metadata=resource_metadata - ) diff --git a/ceilometer/network/statistics/driver.py b/ceilometer/network/statistics/driver.py deleted file mode 100644 index 0eb82550..00000000 --- a/ceilometer/network/statistics/driver.py +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Driver(object): - - @abc.abstractmethod - def get_sample_data(self, meter_name, parse_url, params, cache): - """Return volume, resource_id, resource_metadata, timestamp in tuple. - - If not implemented for meter_name, returns None - """ diff --git a/ceilometer/network/statistics/flow.py b/ceilometer/network/statistics/flow.py deleted file mode 100644 index b23b6424..00000000 --- a/ceilometer/network/statistics/flow.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class FlowPollster(statistics._Base): - - meter_name = 'switch.flow' - meter_type = sample.TYPE_GAUGE - meter_unit = 'flow' - - -class FlowPollsterDurationSeconds(statistics._Base): - - meter_name = 'switch.flow.duration_seconds' - meter_type = sample.TYPE_GAUGE - meter_unit = 's' - - -class FlowPollsterDurationNanoseconds(statistics._Base): - - meter_name = 'switch.flow.duration_nanoseconds' - meter_type = sample.TYPE_GAUGE - meter_unit = 'ns' - - -class FlowPollsterPackets(statistics._Base): - - meter_name = 'switch.flow.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class FlowPollsterBytes(statistics._Base): - - meter_name = 'switch.flow.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' diff --git a/ceilometer/network/statistics/opencontrail/__init__.py b/ceilometer/network/statistics/opencontrail/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/statistics/opencontrail/client.py b/ceilometer/network/statistics/opencontrail/client.py deleted file mode 100644 index a85512ff..00000000 --- a/ceilometer/network/statistics/opencontrail/client.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -from oslo_log import log -import requests -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ - - -CONF = cfg.CONF -CONF.import_opt('http_timeout', 'ceilometer.service') - - -LOG = log.getLogger(__name__) - - -class OpencontrailAPIFailed(Exception): - pass - - -class AnalyticsAPIBaseClient(object): - """Opencontrail Base Statistics REST API Client.""" - - def __init__(self, endpoint, data): - self.endpoint = endpoint - self.data = data or {} - - def request(self, path, fqdn_uuid, data=None): - req_data = copy.copy(self.data) - if data: - req_data.update(data) - - req_params = self._get_req_params(data=req_data) - - url = urlparse.urljoin(self.endpoint, path + fqdn_uuid) - self._log_req(url, req_params) - resp = requests.get(url, **req_params) - self._log_res(resp) - - if resp.status_code != 200: - raise OpencontrailAPIFailed( - _('Opencontrail API returned %(status)s %(reason)s') % - {'status': resp.status_code, 'reason': resp.reason}) - - return resp - - def _get_req_params(self, data=None): - req_params = { - 'headers': { - 'Accept': 'application/json' - }, - 'data': data, - 'allow_redirects': False, - 'timeout': CONF.http_timeout, - } - - return req_params - - @staticmethod - def _log_req(url, req_params): - if not CONF.debug: - return - - curl_command = ['REQ: curl -i -X GET '] - - params = [] - for name, value in six.iteritems(req_params['data']): - params.append("%s=%s" % (name, value)) - - curl_command.append('"%s?%s" ' % (url, '&'.join(params))) - - for name, value in six.iteritems(req_params['headers']): - curl_command.append('-H "%s: %s" ' % (name, value)) - - LOG.debug(''.join(curl_command)) - - @staticmethod - def _log_res(resp): - if not CONF.debug: - return - - dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, - resp.status_code, - resp.reason)] - dump.extend('%s: %s\n' % (k, v) - for k, v in six.iteritems(resp.headers)) - dump.append('\n') - if resp.content: - dump.extend([resp.content, '\n']) - - LOG.debug(''.join(dump)) - - -class NetworksAPIClient(AnalyticsAPIBaseClient): - """Opencontrail Statistics REST API Client.""" - - def get_vm_statistics(self, fqdn_uuid, data=None): - """Get statistics of a virtual-machines. - - URL: - {endpoint}/analytics/uves/virtual-machine/{fqdn_uuid} - """ - - path = '/analytics/uves/virtual-machine/' - resp = self.request(path, fqdn_uuid, data) - - return resp.json() - - -class Client(object): - - def __init__(self, endpoint, data=None): - self.networks = NetworksAPIClient(endpoint, data) diff --git a/ceilometer/network/statistics/opencontrail/driver.py b/ceilometer/network/statistics/opencontrail/driver.py deleted file mode 100644 index 5cb32dee..00000000 --- a/ceilometer/network/statistics/opencontrail/driver.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics import driver -from ceilometer.network.statistics.opencontrail import client -from ceilometer import neutron_client - - -class OpencontrailDriver(driver.Driver): - """Driver of network analytics of Opencontrail. - - This driver uses resources in "pipeline.yaml". - - Resource requires below conditions: - - * resource is url - * scheme is "opencontrail" - - This driver can be configured via query parameters. - Supported parameters: - - * scheme: - The scheme of request url to Opencontrail Analytics endpoint. - (default "http") - * virtual_network - Specify the virtual network. - (default None) - * fqdn_uuid: - Specify the VM fqdn UUID. - (default "*") - * resource: - The resource on which the counters are retrieved. - (default "if_stats_list") - - * fip_stats_list: - Traffic on floating ips - * if_stats_list: - Traffic on VM interfaces - - e.g.:: - - opencontrail://localhost:8081/?resource=fip_stats_list& - virtual_network=default-domain:openstack:public - """ - @staticmethod - def _prepare_cache(endpoint, params, cache): - - if 'network.statistics.opencontrail' in cache: - return cache['network.statistics.opencontrail'] - - data = { - 'o_client': client.Client(endpoint), - 'n_client': neutron_client.Client() - } - - cache['network.statistics.opencontrail'] = data - - return data - - def get_sample_data(self, meter_name, parse_url, params, cache): - - parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], - parse_url.netloc, - parse_url.path, - None, - None, - None) - endpoint = urlparse.urlunparse(parts) - - iter = self._get_iter(meter_name) - if iter is None: - # The extractor for this meter is not implemented or the API - # doesn't have method to get this meter. - return - - extractor = self._get_extractor(meter_name) - if extractor is None: - # The extractor for this meter is not implemented or the API - # doesn't have method to get this meter. - return - - data = self._prepare_cache(endpoint, params, cache) - - ports = data['n_client'].port_get_all() - ports_map = dict((port['id'], port) for port in ports) - - resource = params.get('resource', ['if_stats_list'])[0] - fqdn_uuid = params.get('fqdn_uuid', ['*'])[0] - virtual_network = params.get('virtual_network', [None])[0] - - statistics = data['o_client'].networks.get_vm_statistics(fqdn_uuid) - if not statistics: - return - - for value in statistics['value']: - for sample in iter(extractor, value, ports_map, - resource, virtual_network): - if sample is not None: - yield sample - - def _get_iter(self, meter_name): - if meter_name.startswith('switch.port'): - return self._iter_port - - def _get_extractor(self, meter_name): - method_name = '_' + meter_name.replace('.', '_') - return getattr(self, method_name, None) - - @staticmethod - def _explode_name(fq_name): - m = re.match( - "(?P[^:]+):(?P.+):(?P[^:]+)", - fq_name) - if not m: - return - return m.group('domain'), m.group('project'), m.group('port_id') - - @staticmethod - def _get_resource_meta(ports_map, stat, resource, network): - if resource == 'fip_stats_list': - if network and (network != stat['virtual_network']): - return - name = stat['iface_name'] - else: - name = stat['name'] - - domain, project, port_id = OpencontrailDriver._explode_name(name) - port = ports_map.get(port_id) - - tenant_id = None - network_id = None - device_owner_id = None - - if port: - tenant_id = port['tenant_id'] - network_id = port['network_id'] - device_owner_id = port['device_id'] - - resource_meta = {'device_owner_id': device_owner_id, - 'network_id': network_id, - 'project_id': tenant_id, - 'project': project, - 'resource': resource, - 'domain': domain} - - return port_id, resource_meta - - @staticmethod - def _iter_port(extractor, value, ports_map, resource, - virtual_network=None): - stats = value['value']['UveVirtualMachineAgent'].get(resource, []) - for stat in stats: - if type(stat) is list: - for sub_stats, node in zip(*[iter(stat)] * 2): - for sub_stat in sub_stats: - result = OpencontrailDriver._get_resource_meta( - ports_map, sub_stat, resource, virtual_network) - if not result: - continue - port_id, resource_meta = result - yield extractor(sub_stat, port_id, resource_meta) - else: - result = OpencontrailDriver._get_resource_meta( - ports_map, stat, resource, virtual_network) - if not result: - continue - port_id, resource_meta = result - yield extractor(stat, port_id, resource_meta) - - @staticmethod - def _switch_port_receive_packets(statistic, resource_id, resource_meta): - return int(statistic['in_pkts']), resource_id, resource_meta - - @staticmethod - def _switch_port_transmit_packets(statistic, resource_id, resource_meta): - return int(statistic['out_pkts']), resource_id, resource_meta - - @staticmethod - def _switch_port_receive_bytes(statistic, resource_id, resource_meta): - return int(statistic['in_bytes']), resource_id, resource_meta - - @staticmethod - def _switch_port_transmit_bytes(statistic, resource_id, resource_meta): - return int(statistic['out_bytes']), resource_id, resource_meta diff --git a/ceilometer/network/statistics/opendaylight/__init__.py b/ceilometer/network/statistics/opendaylight/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/statistics/opendaylight/client.py b/ceilometer/network/statistics/opendaylight/client.py deleted file mode 100644 index 21c9298b..00000000 --- a/ceilometer/network/statistics/opendaylight/client.py +++ /dev/null @@ -1,240 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -import requests -from requests import auth -import six - -from ceilometer.i18n import _ - - -CONF = cfg.CONF -CONF.import_opt('http_timeout', 'ceilometer.service') - - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(object): - """Base class of OpenDaylight REST APIs Clients.""" - - @abc.abstractproperty - def base_url(self): - """Returns base url for each REST API.""" - - def __init__(self, client): - self.client = client - - def request(self, path, container_name): - return self.client.request(self.base_url + path, container_name) - - -class OpenDaylightRESTAPIFailed(Exception): - pass - - -class StatisticsAPIClient(_Base): - """OpenDaylight Statistics REST API Client - - Base URL: - {endpoint}/statistics/{containerName} - """ - - base_url = '/statistics/%(container_name)s' - - def get_port_statistics(self, container_name): - """Get port statistics - - URL: - {Base URL}/port - """ - return self.request('/port', container_name) - - def get_flow_statistics(self, container_name): - """Get flow statistics - - URL: - {Base URL}/flow - """ - return self.request('/flow', container_name) - - def get_table_statistics(self, container_name): - """Get table statistics - - URL: - {Base URL}/table - """ - return self.request('/table', container_name) - - -class TopologyAPIClient(_Base): - """OpenDaylight Topology REST API Client - - Base URL: - {endpoint}/topology/{containerName} - """ - - base_url = '/topology/%(container_name)s' - - def get_topology(self, container_name): - """Get topology - - URL: - {Base URL} - """ - return self.request('', container_name) - - def get_user_links(self, container_name): - """Get user links - - URL: - {Base URL}/userLinks - """ - return self.request('/userLinks', container_name) - - -class SwitchManagerAPIClient(_Base): - """OpenDaylight Switch Manager REST API Client - - Base URL: - {endpoint}/switchmanager/{containerName} - """ - - base_url = '/switchmanager/%(container_name)s' - - def get_nodes(self, container_name): - """Get node information - - URL: - {Base URL}/nodes - """ - return self.request('/nodes', container_name) - - -class HostTrackerAPIClient(_Base): - """OpenDaylight Host Tracker REST API Client - - Base URL: - {endpoint}/hosttracker/{containerName} - """ - - base_url = '/hosttracker/%(container_name)s' - - def get_active_hosts(self, container_name): - """Get active hosts information - - URL: - {Base URL}/hosts/active - """ - return self.request('/hosts/active', container_name) - - def get_inactive_hosts(self, container_name): - """Get inactive hosts information - - URL: - {Base URL}/hosts/inactive - """ - return self.request('/hosts/inactive', container_name) - - -class Client(object): - - def __init__(self, endpoint, params): - self.statistics = StatisticsAPIClient(self) - self.topology = TopologyAPIClient(self) - self.switch_manager = SwitchManagerAPIClient(self) - self.host_tracker = HostTrackerAPIClient(self) - - self._endpoint = endpoint - - self._req_params = self._get_req_params(params) - - @staticmethod - def _get_req_params(params): - req_params = { - 'headers': { - 'Accept': 'application/json' - }, - 'timeout': CONF.http_timeout, - } - - auth_way = params.get('auth') - if auth_way in ['basic', 'digest']: - user = params.get('user') - password = params.get('password') - - if auth_way == 'basic': - auth_class = auth.HTTPBasicAuth - else: - auth_class = auth.HTTPDigestAuth - - req_params['auth'] = auth_class(user, password) - return req_params - - def _log_req(self, url): - - curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)] - - if 'auth' in self._req_params: - auth_class = self._req_params['auth'] - if isinstance(auth_class, auth.HTTPBasicAuth): - curl_command.append('--basic ') - else: - curl_command.append('--digest ') - - curl_command.append('--user "%s":"%s" ' % (auth_class.username, - auth_class.password)) - - for name, value in six.iteritems(self._req_params['headers']): - curl_command.append('-H "%s: %s" ' % (name, value)) - - LOG.debug(''.join(curl_command)) - - @staticmethod - def _log_res(resp): - - dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, - resp.status_code, - resp.reason)] - dump.extend('%s: %s\n' % (k, v) - for k, v in six.iteritems(resp.headers)) - dump.append('\n') - if resp.content: - dump.extend([resp.content, '\n']) - - LOG.debug(''.join(dump)) - - def _http_request(self, url): - if CONF.debug: - self._log_req(url) - resp = requests.get(url, **self._req_params) - if CONF.debug: - self._log_res(resp) - if resp.status_code // 100 != 2: - raise OpenDaylightRESTAPIFailed( - _('OpenDaylitght API returned %(status)s %(reason)s') % - {'status': resp.status_code, 'reason': resp.reason}) - - return resp.json() - - def request(self, path, container_name): - - url = self._endpoint + path % {'container_name': container_name} - return self._http_request(url) diff --git a/ceilometer/network/statistics/opendaylight/driver.py b/ceilometer/network/statistics/opendaylight/driver.py deleted file mode 100644 index bc78dbe5..00000000 --- a/ceilometer/network/statistics/opendaylight/driver.py +++ /dev/null @@ -1,448 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -import six -from six import moves -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer.network.statistics import driver -from ceilometer.network.statistics.opendaylight import client -from ceilometer import utils - - -LOG = log.getLogger(__name__) - - -def _get_properties(properties, prefix='properties'): - resource_meta = {} - if properties is not None: - for k, v in six.iteritems(properties): - value = v['value'] - key = prefix + '_' + k - if 'name' in v: - key += '_' + v['name'] - resource_meta[key] = value - return resource_meta - - -def _get_int_sample(key, statistic, resource_id, resource_meta): - if key not in statistic: - return None - return int(statistic[key]), resource_id, resource_meta - - -class OpenDayLightDriver(driver.Driver): - """Driver of network info collector from OpenDaylight. - - This driver uses resources in "pipeline.yaml". - Resource requires below conditions: - - * resource is url - * scheme is "opendaylight" - - This driver can be configured via query parameters. - Supported parameters: - - * scheme: - The scheme of request url to OpenDaylight REST API endpoint. - (default http) - * auth: - Auth strategy of http. - This parameter can be set basic and digest.(default None) - * user: - This is username that is used by auth.(default None) - * password: - This is password that is used by auth.(default None) - * container_name: - Name of container of OpenDaylight.(default "default") - This parameter allows multi values. - - e.g.:: - - opendaylight://127.0.0.1:8080/controller/nb/v2?container_name=default& - container_name=egg&auth=basic&user=admin&password=admin&scheme=http - - In this case, the driver send request to below URLs: - - http://127.0.0.1:8080/controller/nb/v2/statistics/default/flow - http://127.0.0.1:8080/controller/nb/v2/statistics/egg/flow - """ - @staticmethod - def _prepare_cache(endpoint, params, cache): - - if 'network.statistics.opendaylight' in cache: - return cache['network.statistics.opendaylight'] - - data = {} - - container_names = params.get('container_name', ['default']) - - odl_params = {} - if 'auth' in params: - odl_params['auth'] = params['auth'][0] - if 'user' in params: - odl_params['user'] = params['user'][0] - if 'password' in params: - odl_params['password'] = params['password'][0] - cs = client.Client(endpoint, odl_params) - - for container_name in container_names: - try: - container_data = {} - - # get flow statistics - container_data['flow'] = cs.statistics.get_flow_statistics( - container_name) - - # get port statistics - container_data['port'] = cs.statistics.get_port_statistics( - container_name) - - # get table statistics - container_data['table'] = cs.statistics.get_table_statistics( - container_name) - - # get topology - container_data['topology'] = cs.topology.get_topology( - container_name) - - # get switch information - container_data['switch'] = cs.switch_manager.get_nodes( - container_name) - - # get and optimize user links - # e.g. - # before: - # "OF|2@OF|00:00:00:00:00:00:00:02" - # after: - # { - # 'port': { - # 'type': 'OF', - # 'id': '2'}, - # 'node': { - # 'type': 'OF', - # 'id': '00:00:00:00:00:00:00:02' - # } - # } - user_links_raw = cs.topology.get_user_links(container_name) - user_links = [] - container_data['user_links'] = user_links - for user_link_row in user_links_raw['userLinks']: - user_link = {} - for k, v in six.iteritems(user_link_row): - if (k == "dstNodeConnector" or - k == "srcNodeConnector"): - port_raw, node_raw = v.split('@') - port = {} - port['type'], port['id'] = port_raw.split('|') - node = {} - node['type'], node['id'] = node_raw.split('|') - v = {'port': port, 'node': node} - user_link[k] = v - user_links.append(user_link) - - # get link status to hosts - container_data['active_hosts'] = ( - cs.host_tracker.get_active_hosts(container_name)) - container_data['inactive_hosts'] = ( - cs.host_tracker.get_inactive_hosts(container_name)) - data[container_name] = container_data - except Exception: - LOG.exception(_('Request failed to connect to OpenDaylight' - ' with NorthBound REST API')) - - cache['network.statistics.opendaylight'] = data - - return data - - def get_sample_data(self, meter_name, parse_url, params, cache): - - extractor = self._get_extractor(meter_name) - if extractor is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - iter = self._get_iter(meter_name) - if iter is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], - parse_url.netloc, - parse_url.path, - None, - None, - None) - endpoint = urlparse.urlunparse(parts) - - data = self._prepare_cache(endpoint, params, cache) - - samples = [] - for name, value in six.iteritems(data): - for sample in iter(extractor, value): - if sample is not None: - # set controller name and container name - # to resource_metadata - sample[2]['controller'] = 'OpenDaylight' - sample[2]['container'] = name - - samples.append(sample) - - return samples - - def _get_iter(self, meter_name): - if meter_name == 'switch': - return self._iter_switch - elif meter_name.startswith('switch.flow'): - return self._iter_flow - elif meter_name.startswith('switch.table'): - return self._iter_table - elif meter_name.startswith('switch.port'): - return self._iter_port - - def _get_extractor(self, meter_name): - method_name = '_' + meter_name.replace('.', '_') - return getattr(self, method_name, None) - - @staticmethod - def _iter_switch(extractor, data): - for switch in data['switch']['nodeProperties']: - yield extractor(switch, switch['node']['id'], {}) - - @staticmethod - def _switch(statistic, resource_id, resource_meta): - - resource_meta.update(_get_properties(statistic.get('properties'))) - - return 1, resource_id, resource_meta - - @staticmethod - def _iter_port(extractor, data): - for port_statistic in data['port']['portStatistics']: - for statistic in port_statistic['portStatistic']: - resource_meta = {'port': statistic['nodeConnector']['id']} - yield extractor(statistic, port_statistic['node']['id'], - resource_meta, data) - - @staticmethod - def _switch_port(statistic, resource_id, resource_meta, data): - my_node_id = resource_id - my_port_id = statistic['nodeConnector']['id'] - - # link status from topology - edge_properties = data['topology']['edgeProperties'] - for edge_property in edge_properties: - edge = edge_property['edge'] - - if (edge['headNodeConnector']['node']['id'] == my_node_id and - edge['headNodeConnector']['id'] == my_port_id): - target_node = edge['tailNodeConnector'] - elif (edge['tailNodeConnector']['node']['id'] == my_node_id and - edge['tailNodeConnector']['id'] == my_port_id): - target_node = edge['headNodeConnector'] - else: - continue - - resource_meta['topology_node_id'] = target_node['node']['id'] - resource_meta['topology_node_port'] = target_node['id'] - - resource_meta.update(_get_properties( - edge_property.get('properties'), - prefix='topology')) - - break - - # link status from user links - for user_link in data['user_links']: - if (user_link['dstNodeConnector']['node']['id'] == my_node_id and - user_link['dstNodeConnector']['port']['id'] == my_port_id): - target_node = user_link['srcNodeConnector'] - elif (user_link['srcNodeConnector']['node']['id'] == my_node_id and - user_link['srcNodeConnector']['port']['id'] == my_port_id): - target_node = user_link['dstNodeConnector'] - else: - continue - - resource_meta['user_link_node_id'] = target_node['node']['id'] - resource_meta['user_link_node_port'] = target_node['port']['id'] - resource_meta['user_link_status'] = user_link['status'] - resource_meta['user_link_name'] = user_link['name'] - - break - - # link status to hosts - for hosts, status in moves.zip( - [data['active_hosts'], data['inactive_hosts']], - ['active', 'inactive']): - for host_config in hosts['hostConfig']: - if (host_config['nodeId'] != my_node_id or - host_config['nodeConnectorId'] != my_port_id): - continue - - resource_meta['host_status'] = status - for key in ['dataLayerAddress', 'vlan', 'staticHost', - 'networkAddress']: - if key in host_config: - resource_meta['host_' + key] = host_config[key] - - break - - return 1, resource_id, resource_meta - - @staticmethod - def _switch_port_receive_packets(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receivePackets', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_packets(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitPackets', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_bytes(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveBytes', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_bytes(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitBytes', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_drops(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveDrops', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_drops(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitDrops', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_errors(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveErrors', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_errors(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitErrors', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_frame_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveFrameError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_overrun_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveOverRunError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_crc_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveCrcError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_collision_count(statistic, resource_id, - resource_meta, data): - return _get_int_sample('collisionCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _iter_table(extractor, data): - for table_statistic in data['table']['tableStatistics']: - for statistic in table_statistic['tableStatistic']: - resource_meta = {'table_id': statistic['nodeTable']['id']} - yield extractor(statistic, - table_statistic['node']['id'], - resource_meta) - - @staticmethod - def _switch_table(statistic, resource_id, resource_meta): - return 1, resource_id, resource_meta - - @staticmethod - def _switch_table_active_entries(statistic, resource_id, - resource_meta): - return _get_int_sample('activeCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_table_lookup_packets(statistic, resource_id, - resource_meta): - return _get_int_sample('lookupCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_table_matched_packets(statistic, resource_id, - resource_meta): - return _get_int_sample('matchedCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _iter_flow(extractor, data): - for flow_statistic in data['flow']['flowStatistics']: - for statistic in flow_statistic['flowStatistic']: - resource_meta = {'flow_id': statistic['flow']['id'], - 'table_id': statistic['tableId']} - for key, value in utils.dict_to_keyval(statistic['flow'], - 'flow'): - resource_meta[key.replace('.', '_')] = value - yield extractor(statistic, - flow_statistic['node']['id'], - resource_meta) - - @staticmethod - def _switch_flow(statistic, resource_id, resource_meta): - return 1, resource_id, resource_meta - - @staticmethod - def _switch_flow_duration_seconds(statistic, resource_id, - resource_meta): - return _get_int_sample('durationSeconds', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_duration_nanoseconds(statistic, resource_id, - resource_meta): - return _get_int_sample('durationNanoseconds', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_packets(statistic, resource_id, resource_meta): - return _get_int_sample('packetCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_bytes(statistic, resource_id, resource_meta): - return _get_int_sample('byteCount', statistic, resource_id, - resource_meta) diff --git a/ceilometer/network/statistics/port.py b/ceilometer/network/statistics/port.py deleted file mode 100644 index d9039022..00000000 --- a/ceilometer/network/statistics/port.py +++ /dev/null @@ -1,109 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class PortPollster(statistics._Base): - - meter_name = 'switch.port' - meter_type = sample.TYPE_GAUGE - meter_unit = 'port' - - -class PortPollsterReceivePackets(statistics._Base): - - meter_name = 'switch.port.receive.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitPackets(statistics._Base): - - meter_name = 'switch.port.transmit.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveBytes(statistics._Base): - - meter_name = 'switch.port.receive.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' - - -class PortPollsterTransmitBytes(statistics._Base): - - meter_name = 'switch.port.transmit.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' - - -class PortPollsterReceiveDrops(statistics._Base): - - meter_name = 'switch.port.receive.drops' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitDrops(statistics._Base): - - meter_name = 'switch.port.transmit.drops' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveErrors(statistics._Base): - - meter_name = 'switch.port.receive.errors' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitErrors(statistics._Base): - - meter_name = 'switch.port.transmit.errors' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveFrameErrors(statistics._Base): - - meter_name = 'switch.port.receive.frame_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveOverrunErrors(statistics._Base): - - meter_name = 'switch.port.receive.overrun_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveCRCErrors(statistics._Base): - - meter_name = 'switch.port.receive.crc_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterCollisionCount(statistics._Base): - - meter_name = 'switch.port.collision.count' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' diff --git a/ceilometer/network/statistics/switch.py b/ceilometer/network/statistics/switch.py deleted file mode 100644 index 268b2589..00000000 --- a/ceilometer/network/statistics/switch.py +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class SWPollster(statistics._Base): - - meter_name = 'switch' - meter_type = sample.TYPE_GAUGE - meter_unit = 'switch' diff --git a/ceilometer/network/statistics/table.py b/ceilometer/network/statistics/table.py deleted file mode 100644 index 2571cd6a..00000000 --- a/ceilometer/network/statistics/table.py +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class TablePollster(statistics._Base): - - meter_name = 'switch.table' - meter_type = sample.TYPE_GAUGE - meter_unit = 'table' - - -class TablePollsterActiveEntries(statistics._Base): - - meter_name = 'switch.table.active.entries' - meter_type = sample.TYPE_GAUGE - meter_unit = 'entry' - - -class TablePollsterLookupPackets(statistics._Base): - - meter_name = 'switch.table.lookup.packets' - meter_type = sample.TYPE_GAUGE - meter_unit = 'packet' - - -class TablePollsterMatchedPackets(statistics._Base): - - meter_name = 'switch.table.matched.packets' - meter_type = sample.TYPE_GAUGE - meter_unit = 'packet' diff --git a/ceilometer/neutron_client.py b/ceilometer/neutron_client.py deleted file mode 100644 index 5299ef45..00000000 --- a/ceilometer/neutron_client.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -from neutronclient.common import exceptions -from neutronclient.v2_0 import client as clientv20 -from oslo_config import cfg -from oslo_log import log - -from ceilometer import keystone_client - -SERVICE_OPTS = [ - cfg.StrOpt('neutron', - default='network', - help='Neutron service type.'), - cfg.StrOpt('neutron_lbaas_version', - default='v2', - choices=('v1', 'v2'), - help='Neutron load balancer version.') -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - -LOG = log.getLogger(__name__) - - -def logged(func): - - @functools.wraps(func) - def with_logging(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions.NeutronClientException as e: - if e.status_code == 404: - LOG.warning("The resource could not be found.") - else: - LOG.warning(e) - return [] - except Exception as e: - LOG.exception(e) - raise - - return with_logging - - -class Client(object): - """A client which gets information via python-neutronclient.""" - - def __init__(self): - conf = cfg.CONF.service_credentials - params = { - 'session': keystone_client.get_session(), - 'endpoint_type': conf.interface, - 'region_name': conf.region_name, - 'service_type': cfg.CONF.service_types.neutron, - } - self.client = clientv20.Client(**params) - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - @logged - def port_get_all(self): - resp = self.client.list_ports() - return resp.get('ports') - - @logged - def vip_get_all(self): - resp = self.client.list_vips() - return resp.get('vips') - - @logged - def pool_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_pools() - resources = resp.get('pools') - elif self.lb_version == 'v2': - resources = self.list_pools_v2() - return resources - - @logged - def member_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_members() - resources = resp.get('members') - elif self.lb_version == 'v2': - resources = self.list_members_v2() - return resources - - @logged - def health_monitor_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_health_monitors() - resources = resp.get('health_monitors') - elif self.lb_version == 'v2': - resources = self.list_health_monitors_v2() - return resources - - @logged - def pool_stats(self, pool): - return self.client.retrieve_pool_stats(pool) - - @logged - def vpn_get_all(self): - resp = self.client.list_vpnservices() - return resp.get('vpnservices') - - @logged - def ipsec_site_connections_get_all(self): - resp = self.client.list_ipsec_site_connections() - return resp.get('ipsec_site_connections') - - @logged - def firewall_get_all(self): - resp = self.client.list_firewalls() - return resp.get('firewalls') - - @logged - def fw_policy_get_all(self): - resp = self.client.list_firewall_policies() - return resp.get('firewall_policies') - - @logged - def fip_get_all(self): - fips = self.client.list_floatingips()['floatingips'] - return fips - - @logged - def list_pools_v2(self): - """This method is used to get the pools list. - - This method uses Load Balancer v2_0 API to achieve - the detailed list of the pools. - - :returns: The list of the pool resources - """ - pool_status = dict() - resp = self.client.list_lbaas_pools() - temp_pools = resp.get('pools') - resources = [] - pool_listener_dict = self._get_pool_and_listener_ids(temp_pools) - for k, v in pool_listener_dict.items(): - loadbalancer_id = self._get_loadbalancer_id_with_listener_id(v) - status = self._get_pool_status(loadbalancer_id, v) - for k, v in status.items(): - pool_status[k] = v - - for pool in temp_pools: - pool_id = pool.get('id') - pool['status'] = pool_status[pool_id] - pool['lb_method'] = pool.get('lb_algorithm') - pool['status_description'] = pool['status'] - # Based on the LBaaSv2 design, the properties 'vip_id' - # and 'subnet_id' should belong to the loadbalancer resource and - # not to the pool resource. However, because we don't want to - # change the metadata of the pool resource this release, - # we set them to empty values manually. - pool['provider'] = '' - pool['vip_id'] = '' - pool['subnet_id'] = '' - resources.append(pool) - - return resources - - @logged - def list_members_v2(self): - """Method is used to list the members info. - - This method is used to get the detailed list of the members - with Load Balancer v2_0 API - - :returns: The list of the member resources - """ - resources = [] - pools = self.client.list_lbaas_pools().get('pools') - for pool in pools: - pool_id = pool.get('id') - listener_id = pool.get('listeners')[0].get('id') - lb_id = self._get_loadbalancer_id_with_listener_id(listener_id) - status = self._get_member_status(lb_id, [listener_id, pool_id]) - resp = self.client.list_lbaas_members(pool_id) - temp_members = resp.get('members') - for member in temp_members: - member['status'] = status[member.get('id')] - member['pool_id'] = pool_id - member['status_description'] = member['status'] - resources.append(member) - return resources - - @logged - def list_health_monitors_v2(self): - """Method is used to list the health monitors - - This method is used to get the detailed list of the health - monitors with Load Balancer v2_0 - - :returns: The list of the health monitor resources - """ - resp = self.client.list_lbaas_healthmonitors() - resources = resp.get('healthmonitors') - return resources - - def _get_pool_and_listener_ids(self, pools): - """Method is used to get the mapping between pool and listener - - This method is used to get the pool ids and listener ids - from the pool list. - - :param pools: The list of the polls - :returns: The relationship between pool and listener. - It's a dictionary type. The key of this dict is - the id of pool and the value of it is the id of the first - listener which the pool belongs to - """ - pool_listener_dict = dict() - for pool in pools: - key = pool.get("id") - value = pool.get('listeners')[0].get('id') - pool_listener_dict[key] = value - return pool_listener_dict - - def _retrieve_loadbalancer_status_tree(self, loadbalancer_id): - """Method is used to get the status of a LB. - - This method is used to get the status tree of a specific - Load Balancer. - - :param loadbalancer_id: The ID of the specific Load - Balancer. - :returns: The status of the specific Load Balancer. - It consists of the load balancer and all of its - children's provisioning and operating statuses - """ - lb_status_tree = self.client.retrieve_loadbalancer_status( - loadbalancer_id) - return lb_status_tree - - def _get_loadbalancer_id_with_listener_id(self, listener_id): - """This method is used to get the loadbalancer id. - - :param listener_id: The ID of the listener - :returns: The ID of the Loadbalancer - """ - listener = self.client.show_listener(listener_id) - listener_lbs = listener.get('listener').get('loadbalancers') - loadbalancer_id = listener_lbs[0].get('id') - return loadbalancer_id - - def _get_member_status(self, loadbalancer_id, parent_id): - """Method used to get the status of member resource. - - This method is used to get the status of member - resource belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :param parent_id: The parent ID list of the member resource. - For the member resource, the parent_id should be [listener_id, - pool_id]. - :returns: The status dictionary of the member - resource. The key is the ID of the member. The value is - the operating status of the member resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the members. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - listener_id = listener_status.get('id') - if listener_id == parent_id[0]: - pools_status = listener_status.get('pools') - for pool_status in pools_status: - if pool_status.get('id') == parent_id[1]: - members_status = pool_status.get('members') - for member_status in members_status: - key = member_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = member_status.get('operating_status') - status_dict[key] = value - break - break - - return status_dict - - def _get_listener_status(self, loadbalancer_id): - """Method used to get the status of the listener resource. - - This method is used to get the status of the listener - resources belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :returns: The status dictionary of the listener - resource. The key is the ID of the listener resource. The - value is the operating status of the listener resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the listeners. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - key = listener_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = listener_status.get('operating_status') - status_dict[key] = value - - return status_dict - - def _get_pool_status(self, loadbalancer_id, parent_id): - """Method used to get the status of pool resource. - - This method is used to get the status of the pool - resources belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :param parent_id: The parent ID of the pool resource. - :returns: The status dictionary of the pool resource. - The key is the ID of the pool resource. The value is - the operating status of the pool resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the pools. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - listener_id = listener_status.get('id') - if listener_id == parent_id: - pools_status = listener_status.get('pools') - for pool_status in pools_status: - key = pool_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = pool_status.get('operating_status') - status_dict[key] = value - break - - return status_dict - - @logged - def list_listener(self): - """This method is used to get the list of the listeners.""" - resp = self.client.list_listeners() - resources = resp.get('listeners') - for listener in resources: - loadbalancer_id = listener.get('loadbalancers')[0].get('id') - status = self._get_listener_status(loadbalancer_id) - listener['operating_status'] = status[listener.get('id')] - return resources - - @logged - def list_loadbalancer(self): - """This method is used to get the list of the loadbalancers.""" - resp = self.client.list_loadbalancers() - resources = resp.get('loadbalancers') - return resources - - @logged - def get_loadbalancer_stats(self, loadbalancer_id): - """This method is used to get the statistics of the loadbalancer. - - :param loadbalancer_id: the ID of the specified loadbalancer - """ - resp = self.client.retrieve_loadbalancer_stats(loadbalancer_id) - resource = resp.get('stats') - return resource diff --git a/ceilometer/notification.py b/ceilometer/notification.py deleted file mode 100644 index c33e536b..00000000 --- a/ceilometer/notification.py +++ /dev/null @@ -1,340 +0,0 @@ -# -# Copyright 2012-2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import itertools -import threading - -from concurrent import futures -from futurist import periodics -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from stevedore import extension - -from ceilometer.agent import plugin_base as base -from ceilometer import coordination -from ceilometer.event import endpoint as event_endpoint -from ceilometer import exchange_control -from ceilometer.i18n import _, _LI, _LW -from ceilometer import messaging -from ceilometer import pipeline -from ceilometer import service_base -from ceilometer import utils - - -LOG = log.getLogger(__name__) - - -OPTS = [ - cfg.IntOpt('pipeline_processing_queues', - default=10, - min=1, - help='Number of queues to parallelize workload across. This ' - 'value should be larger than the number of active ' - 'notification agents for optimal results.'), - cfg.BoolOpt('ack_on_event_error', - default=True, - deprecated_group='collector', - help='Acknowledge message when event persistence fails.'), - cfg.BoolOpt('store_events', - deprecated_group='collector', - default=False, - help='Save event details.'), - cfg.BoolOpt('disable_non_metric_meters', - default=True, - help='WARNING: Ceilometer historically offered the ability to ' - 'store events as meters. This usage is NOT advised as it ' - 'can flood the metering database and cause performance ' - 'degradation.'), - cfg.BoolOpt('workload_partitioning', - default=False, - help='Enable workload partitioning, allowing multiple ' - 'notification agents to be run simultaneously.'), - cfg.MultiStrOpt('messaging_urls', - default=[], - secret=True, - help="Messaging URLs to listen for notifications. " - "Example: rabbit://user:pass@host1:port1" - "[,user:pass@hostN:portN]/virtual_host " - "(DEFAULT/transport_url is used if empty). This " - "is useful when you have dedicate messaging nodes " - "for each service, for example, all nova " - "notifications go to rabbit-nova:5672, while all " - "cinder notifications go to rabbit-cinder:5672."), - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'publishing them'), - cfg.IntOpt('batch_timeout', - default=None, - help='Number of seconds to wait before publishing samples' - 'when batch_size is not reached (None means indefinitely)'), -] - -cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS) -cfg.CONF.register_opts(OPTS, group="notification") -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') - - -class NotificationService(service_base.PipelineBasedService): - """Notification service. - - When running multiple agents, additional queuing sequence is required for - inter process communication. Each agent has two listeners: one to listen - to the main OpenStack queue and another listener(and notifier) for IPC to - divide pipeline sink endpoints. Coordination should be enabled to have - proper active/active HA. - """ - - NOTIFICATION_NAMESPACE = 'ceilometer.notification' - NOTIFICATION_IPC = 'ceilometer-pipe' - - @classmethod - def _get_notifications_manager(cls, pm): - return extension.ExtensionManager( - namespace=cls.NOTIFICATION_NAMESPACE, - invoke_on_load=True, - invoke_args=(pm, ) - ) - - def _get_notifiers(self, transport, pipe): - notifiers = [] - for x in range(cfg.CONF.notification.pipeline_processing_queues): - notifiers.append(oslo_messaging.Notifier( - transport, - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id=pipe.name, - topics=['%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x)])) - return notifiers - - def _get_pipe_manager(self, transport, pipeline_manager): - - if cfg.CONF.notification.workload_partitioning: - pipe_manager = pipeline.SamplePipelineTransportManager() - for pipe in pipeline_manager.pipelines: - key = pipeline.get_pipeline_grouping_key(pipe) - pipe_manager.add_transporter( - (pipe.source.support_meter, key or ['resource_id'], - self._get_notifiers(transport, pipe))) - else: - pipe_manager = pipeline_manager - - return pipe_manager - - def _get_event_pipeline_manager(self, transport): - - if cfg.CONF.notification.store_events: - if cfg.CONF.notification.workload_partitioning: - event_pipe_manager = pipeline.EventPipelineTransportManager() - for pipe in self.event_pipeline_manager.pipelines: - event_pipe_manager.add_transporter( - (pipe.source.support_event, ['event_type'], - self._get_notifiers(transport, pipe))) - else: - event_pipe_manager = self.event_pipeline_manager - - return event_pipe_manager - - def start(self): - super(NotificationService, self).start() - self.periodic = None - self.partition_coordinator = None - self.coord_lock = threading.Lock() - - self.listeners = [] - - # NOTE(kbespalov): for the pipeline queues used a single amqp host - # hence only one listener is required - self.pipeline_listener = None - - self.pipeline_manager = pipeline.setup_pipeline() - - if cfg.CONF.notification.store_events: - self.event_pipeline_manager = pipeline.setup_event_pipeline() - - self.transport = messaging.get_transport() - - if cfg.CONF.notification.workload_partitioning: - self.group_id = self.NOTIFICATION_NAMESPACE - self.partition_coordinator = coordination.PartitionCoordinator() - self.partition_coordinator.start() - else: - # FIXME(sileht): endpoint uses the notification_topics option - # and it should not because this is an oslo_messaging option - # not a ceilometer. Until we have something to get the - # notification_topics in another way, we must create a transport - # to ensure the option has been registered by oslo_messaging. - messaging.get_notifier(self.transport, '') - self.group_id = None - - self.pipe_manager = self._get_pipe_manager(self.transport, - self.pipeline_manager) - self.event_pipe_manager = self._get_event_pipeline_manager( - self.transport) - - self._configure_main_queue_listeners(self.pipe_manager, - self.event_pipe_manager) - - if cfg.CONF.notification.workload_partitioning: - # join group after all manager set up is configured - self.partition_coordinator.join_group(self.group_id) - self.partition_coordinator.watch_group(self.group_id, - self._refresh_agent) - - @periodics.periodic(spacing=cfg.CONF.coordination.heartbeat, - run_immediately=True) - def heartbeat(): - self.partition_coordinator.heartbeat() - - @periodics.periodic(spacing=cfg.CONF.coordination.check_watchers, - run_immediately=True) - def run_watchers(): - self.partition_coordinator.run_watchers() - - self.periodic = periodics.PeriodicWorker.create( - [], executor_factory=lambda: - futures.ThreadPoolExecutor(max_workers=10)) - self.periodic.add(heartbeat) - self.periodic.add(run_watchers) - - utils.spawn_thread(self.periodic.start) - - # configure pipelines after all coordination is configured. - self._configure_pipeline_listener() - - if not cfg.CONF.notification.disable_non_metric_meters: - LOG.warning(_LW('Non-metric meters may be collected. It is highly ' - 'advisable to disable these meters using ' - 'ceilometer.conf or the pipeline.yaml')) - - self.init_pipeline_refresh() - - def _configure_main_queue_listeners(self, pipe_manager, - event_pipe_manager): - notification_manager = self._get_notifications_manager(pipe_manager) - if not list(notification_manager): - LOG.warning(_('Failed to load any notification handlers for %s'), - self.NOTIFICATION_NAMESPACE) - - ack_on_error = cfg.CONF.notification.ack_on_event_error - - endpoints = [] - if cfg.CONF.notification.store_events: - endpoints.append( - event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) - - targets = [] - for ext in notification_manager: - handler = ext.obj - if (cfg.CONF.notification.disable_non_metric_meters and - isinstance(handler, base.NonMetricNotificationBase)): - continue - LOG.debug('Event types from %(name)s: %(type)s' - ' (ack_on_error=%(error)s)', - {'name': ext.name, - 'type': ', '.join(handler.event_types), - 'error': ack_on_error}) - # NOTE(gordc): this could be a set check but oslo_messaging issue - # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 - # This ensures we don't create multiple duplicate consumers. - for new_tar in handler.get_targets(cfg.CONF): - if new_tar not in targets: - targets.append(new_tar) - endpoints.append(handler) - - urls = cfg.CONF.notification.messaging_urls or [None] - for url in urls: - transport = messaging.get_transport(url) - listener = messaging.get_batch_notification_listener( - transport, targets, endpoints, - batch_size=cfg.CONF.notification.batch_size, - batch_timeout=cfg.CONF.notification.batch_timeout) - listener.start() - self.listeners.append(listener) - - def _refresh_agent(self, event): - self._configure_pipeline_listener() - - def _configure_pipeline_listener(self): - with self.coord_lock: - ev_pipes = [] - if cfg.CONF.notification.store_events: - ev_pipes = self.event_pipeline_manager.pipelines - pipelines = self.pipeline_manager.pipelines + ev_pipes - transport = messaging.get_transport() - partitioned = self.partition_coordinator.extract_my_subset( - self.group_id, - range(cfg.CONF.notification.pipeline_processing_queues)) - - endpoints = [] - targets = [] - - for pipe in pipelines: - if isinstance(pipe, pipeline.EventPipeline): - endpoints.append(pipeline.EventPipelineEndpoint(pipe)) - else: - endpoints.append(pipeline.SamplePipelineEndpoint(pipe)) - - for pipe_set, pipe in itertools.product(partitioned, pipelines): - LOG.debug('Pipeline endpoint: %s from set: %s', - pipe.name, pipe_set) - topic = '%s-%s-%s' % (self.NOTIFICATION_IPC, - pipe.name, pipe_set) - targets.append(oslo_messaging.Target(topic=topic)) - - if self.pipeline_listener: - self.pipeline_listener.stop() - self.pipeline_listener.wait() - - self.pipeline_listener = messaging.get_batch_notification_listener( - transport, - targets, - endpoints, - batch_size=cfg.CONF.notification.batch_size, - batch_timeout=cfg.CONF.notification.batch_timeout) - self.pipeline_listener.start() - - def stop(self): - if self.started: - if self.periodic: - self.periodic.stop() - self.periodic.wait() - if self.partition_coordinator: - self.partition_coordinator.stop() - if self.pipeline_listener: - utils.kill_listeners([self.pipeline_listener]) - utils.kill_listeners(self.listeners) - super(NotificationService, self).stop() - - def reload_pipeline(self): - LOG.info(_LI("Reloading notification agent and listeners.")) - - if self.pipeline_validated: - self.pipe_manager = self._get_pipe_manager( - self.transport, self.pipeline_manager) - - if self.event_pipeline_validated: - self.event_pipe_manager = self._get_event_pipeline_manager( - self.transport) - - # restart the main queue listeners. - utils.kill_listeners(self.listeners) - self._configure_main_queue_listeners( - self.pipe_manager, self.event_pipe_manager) - - # restart the pipeline listeners if workload partitioning - # is enabled. - if cfg.CONF.notification.workload_partitioning: - self._configure_pipeline_listener() diff --git a/ceilometer/nova_client.py b/ceilometer/nova_client.py deleted file mode 100644 index b5578e63..00000000 --- a/ceilometer/nova_client.py +++ /dev/null @@ -1,171 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -import novaclient -from novaclient import api_versions -from novaclient import client as nova_client -from oslo_config import cfg -from oslo_log import log - -from ceilometer import keystone_client - -OPTS = [ - cfg.BoolOpt('nova_http_log_debug', - default=False, - # Added in Mitaka - deprecated_for_removal=True, - help=('Allow novaclient\'s debug log output. ' - '(Use default_log_levels instead)')), -] - -SERVICE_OPTS = [ - cfg.StrOpt('nova', - default='compute', - help='Nova service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_opt('http_timeout', 'ceilometer.service') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - -LOG = log.getLogger(__name__) - - -def logged(func): - - @functools.wraps(func) - def with_logging(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - LOG.exception(e) - raise - - return with_logging - - -class Client(object): - """A client which gets information via python-novaclient.""" - - def __init__(self, endpoint_override=None, auth=None): - """Initialize a nova client object.""" - conf = cfg.CONF.service_credentials - - logger = None - if cfg.CONF.nova_http_log_debug: - logger = log.getLogger("novaclient-debug") - logger.logger.setLevel(log.DEBUG) - - self.nova_client = nova_client.Client( - version=api_versions.APIVersion('2.1'), - session=keystone_client.get_session(), - - # nova adapter options - region_name=conf.region_name, - interface=conf.interface, - service_type=cfg.CONF.service_types.nova, - - # keystone adapter options - endpoint_override=endpoint_override, - auth=auth, - logger=logger) - - def _with_flavor_and_image(self, instances): - flavor_cache = {} - image_cache = {} - for instance in instances: - self._with_flavor(instance, flavor_cache) - self._with_image(instance, image_cache) - - return instances - - def _with_flavor(self, instance, cache): - fid = instance.flavor['id'] - if fid in cache: - flavor = cache.get(fid) - else: - try: - flavor = self.nova_client.flavors.get(fid) - except novaclient.exceptions.NotFound: - flavor = None - cache[fid] = flavor - - attr_defaults = [('name', 'unknown-id-%s' % fid), - ('vcpus', 0), ('ram', 0), ('disk', 0), - ('ephemeral', 0)] - - for attr, default in attr_defaults: - if not flavor: - instance.flavor[attr] = default - continue - instance.flavor[attr] = getattr(flavor, attr, default) - - def _with_image(self, instance, cache): - try: - iid = instance.image['id'] - except TypeError: - instance.image = None - instance.kernel_id = None - instance.ramdisk_id = None - return - - if iid in cache: - image = cache.get(iid) - else: - try: - image = self.nova_client.images.get(iid) - except novaclient.exceptions.NotFound: - image = None - cache[iid] = image - - attr_defaults = [('kernel_id', None), - ('ramdisk_id', None)] - - instance.image['name'] = ( - getattr(image, 'name') if image else 'unknown-id-%s' % iid) - image_metadata = getattr(image, 'metadata', None) - - for attr, default in attr_defaults: - ameta = image_metadata.get(attr) if image_metadata else default - setattr(instance, attr, ameta) - - @logged - def instance_get_all_by_host(self, hostname, since=None): - """Returns list of instances on particular host. - - If since is supplied, it will return the instances changed since that - datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' - """ - search_opts = {'host': hostname, 'all_tenants': True} - if since: - search_opts['changes-since'] = since - return self._with_flavor_and_image(self.nova_client.servers.list( - detailed=True, - search_opts=search_opts)) - - @logged - def instance_get_all(self, since=None): - """Returns list of all instances. - - If since is supplied, it will return the instances changes since that - datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' - """ - search_opts = {'all_tenants': True} - if since: - search_opts['changes-since'] = since - return self.nova_client.servers.list( - detailed=True, - search_opts=search_opts) diff --git a/ceilometer/objectstore/__init__.py b/ceilometer/objectstore/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/objectstore/rgw.py b/ceilometer/objectstore/rgw.py deleted file mode 100644 index 581df4c2..00000000 --- a/ceilometer/objectstore/rgw.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2015 Reliance Jio Infocomm Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with ceph object stores -""" - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import six.moves.urllib.parse as urlparse - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - -LOG = log.getLogger(__name__) - -SERVICE_OPTS = [ - cfg.StrOpt('radosgw', - default='object-store', - help='Radosgw service type.'), -] - -CREDENTIAL_OPTS = [ - cfg.StrOpt('access_key', - secret=True, - help='Access key for Radosgw Admin.'), - cfg.StrOpt('secret_key', - secret=True, - help='Secret key for Radosgw Admin.') -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.register_opts(CREDENTIAL_OPTS, group='rgw_admin_credentials') -cfg.CONF.import_group('rgw_admin_credentials', 'ceilometer.service') - - -class _Base(plugin_base.PollsterBase): - METHOD = 'bucket' - _ENDPOINT = None - - def __init__(self): - self.access_key = cfg.CONF.rgw_admin_credentials.access_key - self.secret = cfg.CONF.rgw_admin_credentials.secret_key - - @property - def default_discovery(self): - return 'tenant' - - @property - def CACHE_KEY_METHOD(self): - return 'rgw.get_%s' % self.METHOD - - @staticmethod - def _get_endpoint(ksclient): - # we store the endpoint as a base class attribute, so keystone is - # only ever called once, also we assume that in a single deployment - # we may be only deploying `radosgw` or `swift` as the object-store - if _Base._ENDPOINT is None: - try: - conf = cfg.CONF.service_credentials - rgw_url = keystone_client.get_service_catalog( - ksclient).url_for( - service_type=cfg.CONF.service_types.radosgw, - interface=conf.interface) - _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') - except exceptions.EndpointNotFound: - LOG.debug("Radosgw endpoint not found") - return _Base._ENDPOINT - - def _iter_accounts(self, ksclient, cache, tenants): - if self.CACHE_KEY_METHOD not in cache: - cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( - ksclient, tenants)) - return iter(cache[self.CACHE_KEY_METHOD]) - - def _get_account_info(self, ksclient, tenants): - endpoint = self._get_endpoint(ksclient) - if not endpoint: - raise StopIteration() - - try: - from ceilometer.objectstore.rgw_client import RGWAdminClient - rgw_client = RGWAdminClient(endpoint, self.access_key, self.secret) - except ImportError: - raise plugin_base.PollsterPermanentError(tenants) - - for t in tenants: - api_method = 'get_%s' % self.METHOD - yield t.id, getattr(rgw_client, api_method)(t.id) - - -class ContainersObjectsPollster(_Base): - """Get info about object counts in a container using RGW Admin APIs.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - for it in bucket_info['buckets']: - yield sample.Sample( - name='radosgw.containers.objects', - type=sample.TYPE_GAUGE, - volume=int(it.num_objects), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + it.name, - resource_metadata=None, - ) - - -class ContainersSizePollster(_Base): - """Get info about object sizes in a container using RGW Admin APIs.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - for it in bucket_info['buckets']: - yield sample.Sample( - name='radosgw.containers.objects.size', - type=sample.TYPE_GAUGE, - volume=int(it.size * 1024), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + it.name, - resource_metadata=None, - ) - - -class ObjectsSizePollster(_Base): - """Iterate over all accounts, using keystone.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects.size', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['size'] * 1024), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsPollster(_Base): - """Iterate over all accounts, using keystone.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['num_objects']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsContainersPollster(_Base): - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects.containers', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['num_buckets']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class UsagePollster(_Base): - - METHOD = 'usage' - - def get_samples(self, manager, cache, resources): - for tenant, usage in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.api.request', - type=sample.TYPE_GAUGE, - volume=int(usage), - unit='request', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) diff --git a/ceilometer/objectstore/rgw_client.py b/ceilometer/objectstore/rgw_client.py deleted file mode 100644 index 2a3d1d7f..00000000 --- a/ceilometer/objectstore/rgw_client.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from collections import namedtuple - -from awsauth import S3Auth -import requests -import six.moves.urllib.parse as urlparse - -from ceilometer.i18n import _ - - -class RGWAdminAPIFailed(Exception): - pass - - -class RGWAdminClient(object): - Bucket = namedtuple('Bucket', 'name, num_objects, size') - - def __init__(self, endpoint, access_key, secret_key): - self.access_key = access_key - self.secret = secret_key - self.endpoint = endpoint - self.hostname = urlparse.urlparse(endpoint).netloc - - def _make_request(self, path, req_params): - uri = "{0}/{1}".format(self.endpoint, path) - r = requests.get(uri, params=req_params, - auth=S3Auth(self.access_key, self.secret, - self.hostname) - ) - - if r.status_code != 200: - raise RGWAdminAPIFailed( - _('RGW AdminOps API returned %(status)s %(reason)s') % - {'status': r.status_code, 'reason': r.reason}) - - return r.json() - - def get_bucket(self, tenant_id): - path = "bucket" - req_params = {"uid": tenant_id, "stats": "true"} - json_data = self._make_request(path, req_params) - stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} - stats['num_buckets'] = len(json_data) - for it in json_data: - for k, v in it["usage"].items(): - stats['num_objects'] += v["num_objects"] - stats['size'] += v["size_kb"] - stats['buckets'].append(self.Bucket(it["bucket"], - v["num_objects"], v["size_kb"])) - return stats - - def get_usage(self, tenant_id): - path = "usage" - req_params = {"uid": tenant_id} - json_data = self._make_request(path, req_params) - usage_data = json_data["summary"] - return sum((it["total"]["ops"] for it in usage_data)) diff --git a/ceilometer/objectstore/swift.py b/ceilometer/objectstore/swift.py deleted file mode 100644 index d57ff68b..00000000 --- a/ceilometer/objectstore/swift.py +++ /dev/null @@ -1,202 +0,0 @@ -# -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with object stores -""" - -from __future__ import absolute_import - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import six.moves.urllib.parse as urlparse -from swiftclient import client as swift - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _LI -from ceilometer import keystone_client -from ceilometer import sample - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('reseller_prefix', - default='AUTH_', - help="Swift reseller prefix. Must be on par with " - "reseller_prefix in proxy-server.conf."), -] - -SERVICE_OPTS = [ - cfg.StrOpt('swift', - default='object-store', - help='Swift service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class _Base(plugin_base.PollsterBase): - - METHOD = 'head' - _ENDPOINT = None - - @property - def default_discovery(self): - return 'tenant' - - @property - def CACHE_KEY_METHOD(self): - return 'swift.%s_account' % self.METHOD - - @staticmethod - def _get_endpoint(ksclient): - # we store the endpoint as a base class attribute, so keystone is - # only ever called once - if _Base._ENDPOINT is None: - try: - conf = cfg.CONF.service_credentials - _Base._ENDPOINT = keystone_client.get_service_catalog( - ksclient).url_for( - service_type=cfg.CONF.service_types.swift, - interface=conf.interface, - region_name=conf.region_name) - except exceptions.EndpointNotFound as e: - LOG.info(_LI("Swift endpoint not found: %s"), e) - return _Base._ENDPOINT - - def _iter_accounts(self, ksclient, cache, tenants): - if self.CACHE_KEY_METHOD not in cache: - cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( - ksclient, tenants)) - return iter(cache[self.CACHE_KEY_METHOD]) - - def _get_account_info(self, ksclient, tenants): - endpoint = self._get_endpoint(ksclient) - if not endpoint: - raise StopIteration() - - for t in tenants: - api_method = '%s_account' % self.METHOD - yield (t.id, getattr(swift, api_method) - (self._neaten_url(endpoint, t.id), - keystone_client.get_auth_token(ksclient))) - - @staticmethod - def _neaten_url(endpoint, tenant_id): - """Transform the registered url to standard and valid format.""" - return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', - 'v1/' + cfg.CONF.reseller_prefix + tenant_id) - - -class ObjectsPollster(_Base): - """Collect the total objects count for each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-object-count']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsSizePollster(_Base): - """Collect the total objects size of each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects.size', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-bytes-used']), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsContainersPollster(_Base): - """Collect the container count for each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects.containers', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-container-count']), - unit='container', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ContainersObjectsPollster(_Base): - """Collect the objects count per container for each project.""" - - METHOD = 'get' - - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - containers_info = account[1] - for container in containers_info: - yield sample.Sample( - name='storage.containers.objects', - type=sample.TYPE_GAUGE, - volume=int(container['count']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + container['name'], - resource_metadata=None, - ) - - -class ContainersSizePollster(_Base): - """Collect the total objects size per container for each project.""" - - METHOD = 'get' - - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - containers_info = account[1] - for container in containers_info: - yield sample.Sample( - name='storage.containers.objects.size', - type=sample.TYPE_GAUGE, - volume=int(container['bytes']), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + container['name'], - resource_metadata=None, - ) diff --git a/ceilometer/opts.py b/ceilometer/opts.py index 8f9f93a1..46fbb5db 100644 --- a/ceilometer/opts.py +++ b/ceilometer/opts.py @@ -13,112 +13,19 @@ # under the License. import itertools -from keystoneauth1 import loading - -import ceilometer.agent.manager import ceilometer.api import ceilometer.api.app -import ceilometer.cmd.polling -import ceilometer.collector -import ceilometer.compute.discovery -import ceilometer.compute.notifications -import ceilometer.compute.util -import ceilometer.compute.virt.inspector -import ceilometer.compute.virt.libvirt.inspector -import ceilometer.compute.virt.vmware.inspector -import ceilometer.compute.virt.xenapi.inspector -import ceilometer.coordination import ceilometer.dispatcher -import ceilometer.dispatcher.file -import ceilometer.dispatcher.gnocchi -import ceilometer.energy.kwapi -import ceilometer.event.converter -import ceilometer.hardware.discovery -import ceilometer.image.glance -import ceilometer.ipmi.notifications.ironic -import ceilometer.ipmi.platform.intel_node_manager -import ceilometer.ipmi.pollsters -import ceilometer.keystone_client -import ceilometer.meter.notifications -import ceilometer.middleware -import ceilometer.network.notifications -import ceilometer.neutron_client -import ceilometer.notification -import ceilometer.nova_client -import ceilometer.objectstore.rgw -import ceilometer.objectstore.swift -import ceilometer.pipeline -import ceilometer.publisher.messaging -import ceilometer.publisher.utils -import ceilometer.sample -import ceilometer.service import ceilometer.storage import ceilometer.utils def list_opts(): return [ - ('DEFAULT', - itertools.chain(ceilometer.agent.manager.OPTS, - ceilometer.api.app.OPTS, - ceilometer.cmd.polling.CLI_OPTS, - ceilometer.compute.notifications.OPTS, - ceilometer.compute.util.OPTS, - ceilometer.compute.virt.inspector.OPTS, - ceilometer.compute.virt.libvirt.inspector.OPTS, - ceilometer.dispatcher.OPTS, - ceilometer.image.glance.OPTS, - ceilometer.ipmi.notifications.ironic.OPTS, - ceilometer.middleware.OPTS, - ceilometer.network.notifications.OPTS, - ceilometer.nova_client.OPTS, - ceilometer.objectstore.swift.OPTS, - ceilometer.pipeline.OPTS, - ceilometer.sample.OPTS, - ceilometer.service.OPTS, - ceilometer.storage.CLI_OPTS, - ceilometer.utils.OPTS,)), + ('DEFAULT', ceilometer.api.app.OPTS), ('api', itertools.chain(ceilometer.api.OPTS, - ceilometer.api.app.API_OPTS, - [ceilometer.service.API_OPT])), - ('collector', - itertools.chain(ceilometer.collector.OPTS, - [ceilometer.service.COLL_OPT])), - ('compute', ceilometer.compute.discovery.OPTS), - ('coordination', ceilometer.coordination.OPTS), + ceilometer.api.app.API_OPTS)), ('database', ceilometer.storage.OPTS), - ('dispatcher_file', ceilometer.dispatcher.file.OPTS), - ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi.dispatcher_opts), - ('event', ceilometer.event.converter.OPTS), - ('exchange_control', ceilometer.exchange_control.EXCHANGE_OPTS), - ('hardware', ceilometer.hardware.discovery.OPTS), - ('ipmi', - itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, - ceilometer.ipmi.pollsters.OPTS)), - ('meter', ceilometer.meter.notifications.OPTS), - ('notification', - itertools.chain(ceilometer.notification.OPTS, - [ceilometer.service.NOTI_OPT])), - ('polling', ceilometer.agent.manager.POLLING_OPTS), - ('publisher', ceilometer.publisher.utils.OPTS), - ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), - ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), - # NOTE(sileht): the configuration file contains only the options - # for the password plugin that handles keystone v2 and v3 API - # with discovery. But other options are possible. - ('service_credentials', ( - ceilometer.keystone_client.CLI_OPTS + - loading.get_auth_common_conf_options() + - loading.get_auth_plugin_conf_options('password'))), - ('service_types', - itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, - ceilometer.image.glance.SERVICE_OPTS, - ceilometer.neutron_client.SERVICE_OPTS, - ceilometer.nova_client.SERVICE_OPTS, - ceilometer.objectstore.rgw.SERVICE_OPTS, - ceilometer.objectstore.swift.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), - ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), - ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] diff --git a/ceilometer/pipeline.py b/ceilometer/pipeline.py deleted file mode 100644 index 13bc6c5a..00000000 --- a/ceilometer/pipeline.py +++ /dev/null @@ -1,866 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# Copyright 2014 Red Hat, Inc -# -# Authors: Yunhong Jiang -# Eoghan Glynn -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import hashlib -from itertools import chain -import os - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from oslo_utils import timeutils -import six -from stevedore import extension -import yaml - - -from ceilometer.event.storage import models -from ceilometer.i18n import _, _LI, _LW -from ceilometer import publisher -from ceilometer.publisher import utils as publisher_utils -from ceilometer import sample as sample_util - - -OPTS = [ - cfg.StrOpt('pipeline_cfg_file', - default="pipeline.yaml", - help="Configuration file for pipeline definition." - ), - cfg.StrOpt('event_pipeline_cfg_file', - default="event_pipeline.yaml", - help="Configuration file for event pipeline definition." - ), - cfg.BoolOpt('refresh_pipeline_cfg', - default=False, - help="Refresh Pipeline configuration on-the-fly." - ), - cfg.BoolOpt('refresh_event_pipeline_cfg', - default=False, - help="Refresh Event Pipeline configuration on-the-fly." - ), - cfg.IntOpt('pipeline_polling_interval', - default=20, - help="Polling interval for pipeline file configuration" - " in seconds." - ), -] - -cfg.CONF.register_opts(OPTS) - -LOG = log.getLogger(__name__) - - -class PipelineException(Exception): - def __init__(self, message, pipeline_cfg): - self.msg = message - self.pipeline_cfg = pipeline_cfg - - def __str__(self): - return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg) - - -@six.add_metaclass(abc.ABCMeta) -class PipelineEndpoint(object): - - def __init__(self, pipeline): - self.filter_rule = oslo_messaging.NotificationFilter( - publisher_id=pipeline.name) - self.publish_context = PublishContext([pipeline]) - - @abc.abstractmethod - def sample(self, messages): - pass - - -class SamplePipelineEndpoint(PipelineEndpoint): - def sample(self, messages): - samples = chain.from_iterable(m["payload"] for m in messages) - samples = [ - sample_util.Sample(name=s['counter_name'], - type=s['counter_type'], - unit=s['counter_unit'], - volume=s['counter_volume'], - user_id=s['user_id'], - project_id=s['project_id'], - resource_id=s['resource_id'], - timestamp=s['timestamp'], - resource_metadata=s['resource_metadata'], - source=s.get('source')) - for s in samples if publisher_utils.verify_signature( - s, cfg.CONF.publisher.telemetry_secret) - ] - with self.publish_context as p: - p(samples) - - -class EventPipelineEndpoint(PipelineEndpoint): - def sample(self, messages): - events = chain.from_iterable(m["payload"] for m in messages) - events = [ - models.Event( - message_id=ev['message_id'], - event_type=ev['event_type'], - generated=timeutils.normalize_time( - timeutils.parse_isotime(ev['generated'])), - traits=[models.Trait(name, dtype, - models.Trait.convert_value(dtype, value)) - for name, dtype, value in ev['traits']], - raw=ev.get('raw', {})) - for ev in events if publisher_utils.verify_signature( - ev, cfg.CONF.publisher.telemetry_secret) - ] - try: - with self.publish_context as p: - p(events) - except Exception: - if not cfg.CONF.notification.ack_on_event_error: - return oslo_messaging.NotificationResult.REQUEUE - raise - return oslo_messaging.NotificationResult.HANDLED - - -class _PipelineTransportManager(object): - def __init__(self): - self.transporters = [] - - @staticmethod - def hash_grouping(datapoint, grouping_keys): - value = '' - for key in grouping_keys or []: - value += datapoint.get(key) if datapoint.get(key) else '' - return hash(value) - - def add_transporter(self, transporter): - self.transporters.append(transporter) - - def publisher(self): - serializer = self.serializer - hash_grouping = self.hash_grouping - transporters = self.transporters - filter_attr = self.filter_attr - event_type = self.event_type - - class PipelinePublishContext(object): - def __enter__(self): - def p(data): - # TODO(gordc): cleanup so payload is always single - # datapoint. we can't correctly bucketise - # datapoints if batched. - data = [data] if not isinstance(data, list) else data - for datapoint in data: - serialized_data = serializer(datapoint) - for d_filter, grouping_keys, notifiers in transporters: - if d_filter(serialized_data[filter_attr]): - key = (hash_grouping(serialized_data, - grouping_keys) - % len(notifiers)) - notifier = notifiers[key] - notifier.sample({}, - event_type=event_type, - payload=[serialized_data]) - return p - - def __exit__(self, exc_type, exc_value, traceback): - pass - - return PipelinePublishContext() - - -class SamplePipelineTransportManager(_PipelineTransportManager): - filter_attr = 'counter_name' - event_type = 'ceilometer.pipeline' - - @staticmethod - def serializer(data): - return publisher_utils.meter_message_from_counter( - data, cfg.CONF.publisher.telemetry_secret) - - -class EventPipelineTransportManager(_PipelineTransportManager): - filter_attr = 'event_type' - event_type = 'pipeline.event' - - @staticmethod - def serializer(data): - return publisher_utils.message_from_event( - data, cfg.CONF.publisher.telemetry_secret) - - -class PublishContext(object): - - def __init__(self, pipelines=None): - pipelines = pipelines or [] - self.pipelines = set(pipelines) - - def add_pipelines(self, pipelines): - self.pipelines.update(pipelines) - - def __enter__(self): - def p(data): - for p in self.pipelines: - p.publish_data(data) - return p - - def __exit__(self, exc_type, exc_value, traceback): - for p in self.pipelines: - p.flush() - - -class Source(object): - """Represents a source of samples or events.""" - - def __init__(self, cfg): - self.cfg = cfg - - try: - self.name = cfg['name'] - self.sinks = cfg.get('sinks') - except KeyError as err: - raise PipelineException( - "Required field %s not specified" % err.args[0], cfg) - - def __str__(self): - return self.name - - def check_sinks(self, sinks): - if not self.sinks: - raise PipelineException( - "No sink defined in source %s" % self, - self.cfg) - for sink in self.sinks: - if sink not in sinks: - raise PipelineException( - "Dangling sink %s from source %s" % (sink, self), - self.cfg) - - def check_source_filtering(self, data, d_type): - """Source data rules checking - - - At least one meaningful datapoint exist - - Included type and excluded type can't co-exist on the same pipeline - - Included type meter and wildcard can't co-exist at same pipeline - """ - if not data: - raise PipelineException('No %s specified' % d_type, self.cfg) - - if ([x for x in data if x[0] not in '!*'] and - [x for x in data if x[0] == '!']): - raise PipelineException( - 'Both included and excluded %s specified' % d_type, - cfg) - - if '*' in data and [x for x in data if x[0] not in '!*']: - raise PipelineException( - 'Included %s specified with wildcard' % d_type, - self.cfg) - - @staticmethod - def is_supported(dataset, data_name): - # Support wildcard like storage.* and !disk.* - # Start with negation, we consider that the order is deny, allow - if any(fnmatch.fnmatch(data_name, datapoint[1:]) - for datapoint in dataset if datapoint[0] == '!'): - return False - - if any(fnmatch.fnmatch(data_name, datapoint) - for datapoint in dataset if datapoint[0] != '!'): - return True - - # if we only have negation, we suppose the default is allow - return all(datapoint.startswith('!') for datapoint in dataset) - - -class EventSource(Source): - """Represents a source of events. - - In effect it is a set of notification handlers capturing events for a set - of matching notifications. - """ - - def __init__(self, cfg): - super(EventSource, self).__init__(cfg) - self.events = cfg.get('events') - self.check_source_filtering(self.events, 'events') - - def support_event(self, event_name): - return self.is_supported(self.events, event_name) - - -class SampleSource(Source): - """Represents a source of samples. - - In effect it is a set of pollsters and/or notification handlers emitting - samples for a set of matching meters. Each source encapsulates meter name - matching, polling interval determination, optional resource enumeration or - discovery, and mapping to one or more sinks for publication. - """ - - def __init__(self, cfg): - super(SampleSource, self).__init__(cfg) - # Support 'counters' for backward compatibility - self.meters = cfg.get('meters', cfg.get('counters')) - try: - self.interval = int(cfg.get('interval', 600)) - except ValueError: - raise PipelineException("Invalid interval value", cfg) - if self.interval <= 0: - raise PipelineException("Interval value should > 0", cfg) - - self.resources = cfg.get('resources') or [] - if not isinstance(self.resources, list): - raise PipelineException("Resources should be a list", cfg) - - self.discovery = cfg.get('discovery') or [] - if not isinstance(self.discovery, list): - raise PipelineException("Discovery should be a list", cfg) - self.check_source_filtering(self.meters, 'meters') - - def get_interval(self): - return self.interval - - def support_meter(self, meter_name): - return self.is_supported(self.meters, meter_name) - - -class Sink(object): - """Represents a sink for the transformation and publication of data. - - Each sink config is concerned *only* with the transformation rules - and publication conduits for data. - - In effect, a sink describes a chain of handlers. The chain starts - with zero or more transformers and ends with one or more publishers. - - The first transformer in the chain is passed data from the - corresponding source, takes some action such as deriving rate of - change, performing unit conversion, or aggregating, before passing - the modified data to next step. - - The subsequent transformers, if any, handle the data similarly. - - At the end of the chain, publishers publish the data. The exact - publishing method depends on publisher type, for example, pushing - into data storage via the message bus providing guaranteed delivery, - or for loss-tolerant data UDP may be used. - - If no transformers are included in the chain, the publishers are - passed data directly from the sink which are published unchanged. - """ - - def __init__(self, cfg, transformer_manager): - self.cfg = cfg - - try: - self.name = cfg['name'] - # It's legal to have no transformer specified - self.transformer_cfg = cfg.get('transformers') or [] - except KeyError as err: - raise PipelineException( - "Required field %s not specified" % err.args[0], cfg) - - if not cfg.get('publishers'): - raise PipelineException("No publisher specified", cfg) - - self.publishers = [] - for p in cfg['publishers']: - if '://' not in p: - # Support old format without URL - p = p + "://" - try: - self.publishers.append(publisher.get_publisher(p, - self.NAMESPACE)) - except Exception: - LOG.exception(_("Unable to load publisher %s"), p) - - self.multi_publish = True if len(self.publishers) > 1 else False - self.transformers = self._setup_transformers(cfg, transformer_manager) - - def __str__(self): - return self.name - - def _setup_transformers(self, cfg, transformer_manager): - transformers = [] - for transformer in self.transformer_cfg: - parameter = transformer['parameters'] or {} - try: - ext = transformer_manager[transformer['name']] - except KeyError: - raise PipelineException( - "No transformer named %s loaded" % transformer['name'], - cfg) - transformers.append(ext.plugin(**parameter)) - LOG.info(_LI( - "Pipeline %(pipeline)s: Setup transformer instance %(name)s " - "with parameter %(param)s") % ({'pipeline': self, - 'name': transformer['name'], - 'param': parameter})) - - return transformers - - -class EventSink(Sink): - - NAMESPACE = 'ceilometer.event.publisher' - - def publish_events(self, events): - if events: - for p in self.publishers: - try: - p.publish_events(events) - except Exception: - LOG.exception(_("Pipeline %(pipeline)s: %(status)s" - " after error from publisher %(pub)s") % - ({'pipeline': self, 'status': 'Continue' if - self.multi_publish else 'Exit', 'pub': p} - )) - if not self.multi_publish: - raise - - @staticmethod - def flush(): - """Flush data after all events have been injected to pipeline.""" - - -class SampleSink(Sink): - - NAMESPACE = 'ceilometer.publisher' - - def _transform_sample(self, start, sample): - try: - for transformer in self.transformers[start:]: - sample = transformer.handle_sample(sample) - if not sample: - LOG.debug( - "Pipeline %(pipeline)s: Sample dropped by " - "transformer %(trans)s", {'pipeline': self, - 'trans': transformer}) - return - return sample - except Exception as err: - # TODO(gordc): only use one log level. - LOG.warning(_("Pipeline %(pipeline)s: " - "Exit after error from transformer " - "%(trans)s for %(smp)s") % ({'pipeline': self, - 'trans': transformer, - 'smp': sample})) - LOG.exception(err) - - def _publish_samples(self, start, samples): - """Push samples into pipeline for publishing. - - :param start: The first transformer that the sample will be injected. - This is mainly for flush() invocation that transformer - may emit samples. - :param samples: Sample list. - - """ - - transformed_samples = [] - if not self.transformers: - transformed_samples = samples - else: - for sample in samples: - LOG.debug( - "Pipeline %(pipeline)s: Transform sample " - "%(smp)s from %(trans)s transformer", {'pipeline': self, - 'smp': sample, - 'trans': start}) - sample = self._transform_sample(start, sample) - if sample: - transformed_samples.append(sample) - - if transformed_samples: - for p in self.publishers: - try: - p.publish_samples(transformed_samples) - except Exception: - LOG.exception(_( - "Pipeline %(pipeline)s: Continue after error " - "from publisher %(pub)s") % ({'pipeline': self, - 'pub': p})) - - def publish_samples(self, samples): - self._publish_samples(0, samples) - - def flush(self): - """Flush data after all samples have been injected to pipeline.""" - - for (i, transformer) in enumerate(self.transformers): - try: - self._publish_samples(i + 1, - list(transformer.flush())) - except Exception as err: - LOG.warning(_( - "Pipeline %(pipeline)s: Error flushing " - "transformer %(trans)s") % ({'pipeline': self, - 'trans': transformer})) - LOG.exception(err) - - -@six.add_metaclass(abc.ABCMeta) -class Pipeline(object): - """Represents a coupling between a sink and a corresponding source.""" - - def __init__(self, source, sink): - self.source = source - self.sink = sink - self.name = str(self) - - def __str__(self): - return (self.source.name if self.source.name == self.sink.name - else '%s:%s' % (self.source.name, self.sink.name)) - - def flush(self): - self.sink.flush() - - @property - def publishers(self): - return self.sink.publishers - - @abc.abstractmethod - def publish_data(self, data): - """Publish data from pipeline.""" - - -class EventPipeline(Pipeline): - """Represents a pipeline for Events.""" - - def __str__(self): - # NOTE(gordc): prepend a namespace so we ensure event and sample - # pipelines do not have the same name. - return 'event:%s' % super(EventPipeline, self).__str__() - - def support_event(self, event_type): - return self.source.support_event(event_type) - - def publish_data(self, events): - if not isinstance(events, list): - events = [events] - supported = [e for e in events - if self.source.support_event(e.event_type)] - self.sink.publish_events(supported) - - -class SamplePipeline(Pipeline): - """Represents a pipeline for Samples.""" - - def get_interval(self): - return self.source.interval - - @property - def resources(self): - return self.source.resources - - @property - def discovery(self): - return self.source.discovery - - def support_meter(self, meter_name): - return self.source.support_meter(meter_name) - - def _validate_volume(self, s): - volume = s.volume - if volume is None: - LOG.warning(_LW( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has no volume (volume: None), the sample will' - ' be dropped') - % {'counter_name': s.name, - 'resource_id': s.resource_id, - 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} - ) - return False - if not isinstance(volume, (int, float)): - try: - volume = float(volume) - except ValueError: - LOG.warning(_LW( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has volume which is not a number ' - '(volume: %(counter_volume)s), the sample will be dropped') - % {'counter_name': s.name, - 'resource_id': s.resource_id, - 'timestamp': ( - s.timestamp if s.timestamp else 'NO TIMESTAMP'), - 'counter_volume': volume} - ) - return False - return True - - def publish_data(self, samples): - if not isinstance(samples, list): - samples = [samples] - supported = [s for s in samples if self.source.support_meter(s.name) - and self._validate_volume(s)] - self.sink.publish_samples(supported) - - -SAMPLE_TYPE = {'pipeline': SamplePipeline, - 'source': SampleSource, - 'sink': SampleSink} - -EVENT_TYPE = {'pipeline': EventPipeline, - 'source': EventSource, - 'sink': EventSink} - - -class PipelineManager(object): - """Pipeline Manager - - Pipeline manager sets up pipelines according to config file - - Usually only one pipeline manager exists in the system. - - """ - - def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE): - """Setup the pipelines according to config. - - The configuration is supported as follows: - - Decoupled: the source and sink configuration are separately - specified before being linked together. This allows source- - specific configuration, such as resource discovery, to be - kept focused only on the fine-grained source while avoiding - the necessity for wide duplication of sink-related config. - - The configuration is provided in the form of separate lists - of dictionaries defining sources and sinks, for example: - - {"sources": [{"name": source_1, - "interval": interval_time, - "meters" : ["meter_1", "meter_2"], - "resources": ["resource_uri1", "resource_uri2"], - "sinks" : ["sink_1", "sink_2"] - }, - {"name": source_2, - "interval": interval_time, - "meters" : ["meter_3"], - "sinks" : ["sink_2"] - }, - ], - "sinks": [{"name": sink_1, - "transformers": [ - {"name": "Transformer_1", - "parameters": {"p1": "value"}}, - - {"name": "Transformer_2", - "parameters": {"p1": "value"}}, - ], - "publishers": ["publisher_1", "publisher_2"] - }, - {"name": sink_2, - "publishers": ["publisher_3"] - }, - ] - } - - The interval determines the cadence of sample injection into - the pipeline where samples are produced under the direct control - of an agent, i.e. via a polling cycle as opposed to incoming - notifications. - - Valid meter format is '*', '!meter_name', or 'meter_name'. - '*' is wildcard symbol means any meters; '!meter_name' means - "meter_name" will be excluded; 'meter_name' means 'meter_name' - will be included. - - The 'meter_name" is Sample name field. - - Valid meters definition is all "included meter names", all - "excluded meter names", wildcard and "excluded meter names", or - only wildcard. - - The resources is list of URI indicating the resources from where - the meters should be polled. It's optional and it's up to the - specific pollster to decide how to use it. - - Transformer's name is plugin name in setup.cfg. - - Publisher's name is plugin name in setup.cfg - - """ - self.pipelines = [] - if not ('sources' in cfg and 'sinks' in cfg): - raise PipelineException("Both sources & sinks are required", - cfg) - LOG.info(_LI('detected decoupled pipeline config format')) - - unique_names = set() - sources = [] - for s in cfg.get('sources', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated source names: %s" % - name, self) - else: - unique_names.add(name) - sources.append(p_type['source'](s)) - unique_names.clear() - - sinks = {} - for s in cfg.get('sinks', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated sink names: %s" % - name, self) - else: - unique_names.add(name) - sinks[s['name']] = p_type['sink'](s, transformer_manager) - unique_names.clear() - - for source in sources: - source.check_sinks(sinks) - for target in source.sinks: - pipe = p_type['pipeline'](source, sinks[target]) - if pipe.name in unique_names: - raise PipelineException( - "Duplicate pipeline name: %s. Ensure pipeline" - " names are unique. (name is the source and sink" - " names combined)" % pipe.name, cfg) - else: - unique_names.add(pipe.name) - self.pipelines.append(pipe) - unique_names.clear() - - def publisher(self): - """Build a new Publisher for these manager pipelines. - - :param context: The context. - """ - return PublishContext(self.pipelines) - - -class PollingManager(object): - """Polling Manager - - Polling manager sets up polling according to config file. - """ - - def __init__(self, cfg): - """Setup the polling according to config. - - The configuration is the sources half of the Pipeline Config. - """ - self.sources = [] - if not ('sources' in cfg and 'sinks' in cfg): - raise PipelineException("Both sources & sinks are required", - cfg) - LOG.info(_LI('detected decoupled pipeline config format')) - - unique_names = set() - for s in cfg.get('sources', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated source names: %s" % - name, self) - else: - unique_names.add(name) - self.sources.append(SampleSource(s)) - unique_names.clear() - - -def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE): - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - LOG.debug("Pipeline config file: %s", cfg_file) - - with open(cfg_file) as fap: - data = fap.read() - - pipeline_cfg = yaml.safe_load(data) - LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) - - return PipelineManager(pipeline_cfg, - transformer_manager or - extension.ExtensionManager( - 'ceilometer.transformer', - ), p_type) - - -def _setup_polling_manager(cfg_file): - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - LOG.debug("Polling config file: %s", cfg_file) - - with open(cfg_file) as fap: - data = fap.read() - - pipeline_cfg = yaml.safe_load(data) - LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) - - return PollingManager(pipeline_cfg) - - -def setup_event_pipeline(transformer_manager=None): - """Setup event pipeline manager according to yaml config file.""" - cfg_file = cfg.CONF.event_pipeline_cfg_file - return _setup_pipeline_manager(cfg_file, transformer_manager, EVENT_TYPE) - - -def setup_pipeline(transformer_manager=None): - """Setup pipeline manager according to yaml config file.""" - cfg_file = cfg.CONF.pipeline_cfg_file - return _setup_pipeline_manager(cfg_file, transformer_manager) - - -def _get_pipeline_cfg_file(p_type=SAMPLE_TYPE): - if p_type == EVENT_TYPE: - cfg_file = cfg.CONF.event_pipeline_cfg_file - else: - cfg_file = cfg.CONF.pipeline_cfg_file - - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - return cfg_file - - -def get_pipeline_mtime(p_type=SAMPLE_TYPE): - cfg_file = _get_pipeline_cfg_file(p_type) - return os.path.getmtime(cfg_file) - - -def get_pipeline_hash(p_type=SAMPLE_TYPE): - - cfg_file = _get_pipeline_cfg_file(p_type) - with open(cfg_file) as fap: - data = fap.read() - if six.PY3: - data = data.encode('utf-8') - - file_hash = hashlib.md5(data).hexdigest() - return file_hash - - -def setup_polling(): - """Setup polling manager according to yaml config file.""" - cfg_file = cfg.CONF.pipeline_cfg_file - return _setup_polling_manager(cfg_file) - - -def get_pipeline_grouping_key(pipe): - keys = [] - for transformer in pipe.sink.transformers: - keys += transformer.grouping_keys - return list(set(keys)) diff --git a/ceilometer/publisher/__init__.py b/ceilometer/publisher/__init__.py index 2966124b..e69de29b 100644 --- a/ceilometer/publisher/__init__.py +++ b/ceilometer/publisher/__init__.py @@ -1,48 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import netutils -import six -from stevedore import driver - - -def get_publisher(url, namespace='ceilometer.publisher'): - """Get publisher driver and load it. - - :param URL: URL for the publisher - :param namespace: Namespace to use to look for drivers. - """ - parse_result = netutils.urlsplit(url) - loaded_driver = driver.DriverManager(namespace, parse_result.scheme) - return loaded_driver.driver(parse_result) - - -@six.add_metaclass(abc.ABCMeta) -class PublisherBase(object): - """Base class for plugins that publish data.""" - - def __init__(self, parsed_url): - pass - - @abc.abstractmethod - def publish_samples(self, samples): - """Publish samples into final conduit.""" - - @abc.abstractmethod - def publish_events(self, events): - """Publish events into final conduit.""" diff --git a/ceilometer/publisher/direct.py b/ceilometer/publisher/direct.py deleted file mode 100644 index 6a52350b..00000000 --- a/ceilometer/publisher/direct.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.dispatcher import database -from ceilometer import publisher -from ceilometer.publisher import utils - - -class DirectPublisher(publisher.PublisherBase): - """A publisher that allows saving directly from the pipeline. - - Samples are saved to the currently configured database by hitching - a ride on the DatabaseDispatcher. This is useful where it is desirable - to limit the number of external services that are required. - """ - - def __init__(self, parsed_url): - super(DirectPublisher, self).__init__(parsed_url) - dispatcher = database.DatabaseDispatcher(cfg.CONF) - self.meter_conn = dispatcher.meter_conn - self.event_conn = dispatcher.event_conn - - def publish_samples(self, samples): - if not isinstance(samples, list): - samples = [samples] - - # Transform the Sample objects into a list of dicts - meters = [ - utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - for sample in samples - ] - - for meter in meters: - if meter.get('timestamp'): - ts = timeutils.parse_isotime(meter['timestamp']) - meter['timestamp'] = timeutils.normalize_time(ts) - self.meter_conn.record_metering_data(meter) - - def publish_events(self, events): - if not isinstance(events, list): - events = [events] - - self.event_conn.record_events(events) diff --git a/ceilometer/publisher/file.py b/ceilometer/publisher/file.py deleted file mode 100644 index fd3714f8..00000000 --- a/ceilometer/publisher/file.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers - -from oslo_log import log -from six.moves.urllib import parse as urlparse - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import publisher - -LOG = log.getLogger(__name__) - - -class FilePublisher(publisher.PublisherBase): - """Publisher metering data to file. - - The publisher which records metering data into a file. The file name and - location should be configured in ceilometer pipeline configuration file. - If a file name and location is not specified, this File Publisher will not - log any meters other than log a warning in Ceilometer log file. - - To enable this publisher, add the following section to the - /etc/ceilometer/publisher.yaml file or simply add it to an existing - pipeline:: - - - - name: meter_file - interval: 600 - counters: - - "*" - transformers: - publishers: - - file:///var/test?max_bytes=10000000&backup_count=5 - - File path is required for this publisher to work properly. If max_bytes - or backup_count is missing, FileHandler will be used to save the metering - data. If max_bytes and backup_count are present, RotatingFileHandler will - be used to save the metering data. - """ - - def __init__(self, parsed_url): - super(FilePublisher, self).__init__(parsed_url) - - self.publisher_logger = None - path = parsed_url.path - if not path or path.lower() == 'file': - LOG.error(_('The path for the file publisher is required')) - return - - rfh = None - max_bytes = 0 - backup_count = 0 - # Handling other configuration options in the query string - if parsed_url.query: - params = urlparse.parse_qs(parsed_url.query) - if params.get('max_bytes') and params.get('backup_count'): - try: - max_bytes = int(params.get('max_bytes')[0]) - backup_count = int(params.get('backup_count')[0]) - except ValueError: - LOG.error(_('max_bytes and backup_count should be ' - 'numbers.')) - return - # create rotating file handler - rfh = logging.handlers.RotatingFileHandler( - path, encoding='utf8', maxBytes=max_bytes, - backupCount=backup_count) - - self.publisher_logger = logging.Logger('publisher.file') - self.publisher_logger.propagate = False - self.publisher_logger.setLevel(logging.INFO) - rfh.setLevel(logging.INFO) - self.publisher_logger.addHandler(rfh) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - if self.publisher_logger: - for sample in samples: - self.publisher_logger.info(sample.as_dict()) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - raise ceilometer.NotImplementedError diff --git a/ceilometer/publisher/http.py b/ceilometer/publisher/http.py deleted file mode 100644 index 97e942c8..00000000 --- a/ceilometer/publisher/http.py +++ /dev/null @@ -1,137 +0,0 @@ -# -# Copyright 2016 IBM -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -from oslo_serialization import jsonutils -import requests -from requests import adapters -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _LE -from ceilometer import publisher - -LOG = log.getLogger(__name__) - - -class HttpPublisher(publisher.PublisherBase): - """Publisher metering data to a http endpoint - - The publisher which records metering data into a http endpoint. The - endpoint should be configured in ceilometer pipeline configuration file. - If the timeout and/or retry_count are not specified, the default timeout - and retry_count will be set to 1000 and 2 respectively. - - To use this publisher for samples, add the following section to the - /etc/ceilometer/publisher.yaml file or simply add it to an existing - pipeline:: - - - name: meter_file - interval: 600 - counters: - - "*" - transformers: - publishers: - - http://host:80/path?timeout=1&max_retries=2 - - To use this publisher for events, the raw message needs to be present in - the event. To enable that, ceilometer.conf file will need to have a - section like the following: - - [event] - store_raw = info - - Then in the event_pipeline.yaml file, you can use the publisher in one of - the sinks like the following: - - - name: event_sink - transformers: - publishers: - - http://host:80/path?timeout=1&max_retries=2 - - Http end point is required for this publisher to work properly. - """ - - def __init__(self, parsed_url): - super(HttpPublisher, self).__init__(parsed_url) - self.target = parsed_url.geturl() - - if not parsed_url.hostname: - raise ValueError('The hostname of an endpoint for ' - 'HttpPublisher is required') - - # non-numeric port from the url string will cause a ValueError - # exception when the port is read. Do a read to make sure the port - # is valid, if not, ValueError will be thrown. - parsed_url.port - - self.headers = {'Content-type': 'application/json'} - - # Handling other configuration options in the query string - if parsed_url.query: - params = urlparse.parse_qs(parsed_url.query) - self.timeout = self._get_param(params, 'timeout', 1) - self.max_retries = self._get_param(params, 'max_retries', 2) - else: - self.timeout = 1 - self.max_retries = 2 - - LOG.debug('HttpPublisher for endpoint %s is initialized!' % - self.target) - - @staticmethod - def _get_param(params, name, default_value): - try: - return int(params.get(name)[-1]) - except (ValueError, TypeError): - LOG.debug('Default value %(value)s is used for %(name)s' % - {'value': default_value, 'name': name}) - return default_value - - def _do_post(self, data): - if not data: - LOG.debug('Data set is empty!') - return - - session = requests.Session() - session.mount(self.target, - adapters.HTTPAdapter(max_retries=self.max_retries)) - - content = ','.join([jsonutils.dumps(item) for item in data]) - content = '[' + content + ']' - - LOG.debug('Data to be posted by HttpPublisher: %s' % content) - - res = session.post(self.target, data=content, headers=self.headers, - timeout=self.timeout) - if res.status_code >= 300: - LOG.error(_LE('Data post failed with status code %s') % - res.status_code) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - data = [sample.as_dict() for sample in samples] - self._do_post(data) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - data = [evt.as_dict()['raw']['payload'] for evt in events - if evt.as_dict().get('raw', {}).get('payload')] - self._do_post(data) diff --git a/ceilometer/publisher/kafka_broker.py b/ceilometer/publisher/kafka_broker.py deleted file mode 100644 index 2ecae2d6..00000000 --- a/ceilometer/publisher/kafka_broker.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2015 Cisco Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import kafka -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import netutils -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _LE -from ceilometer.publisher import messaging - -LOG = log.getLogger(__name__) - - -class KafkaBrokerPublisher(messaging.MessagingPublisher): - """Publish metering data to kafka broker. - - The ip address and port number of kafka broker should be configured in - ceilometer pipeline configuration file. If an ip address is not specified, - this kafka publisher will not publish any meters. - - To enable this publisher, add the following section to the - /etc/ceilometer/pipeline.yaml file or simply add it to an existing - pipeline:: - - meter: - - name: meter_kafka - interval: 600 - counters: - - "*" - transformers: - sinks: - - kafka_sink - sinks: - - name: kafka_sink - transformers: - publishers: - - kafka://[kafka_broker_ip]:[kafka_broker_port]?topic=[topic] - - Kafka topic name and broker's port are required for this publisher to work - properly. If topic parameter is missing, this kafka publisher publish - metering data under a topic name, 'ceilometer'. If the port number is not - specified, this Kafka Publisher will use 9092 as the broker's port. - This publisher has transmit options such as queue, drop, and retry. These - options are specified using policy field of URL parameter. When queue - option could be selected, local queue length can be determined using - max_queue_length field as well. When the transfer fails with retry - option, try to resend the data as many times as specified in max_retry - field. If max_retry is not specified, default the number of retry is 100. - """ - - def __init__(self, parsed_url): - super(KafkaBrokerPublisher, self).__init__(parsed_url) - options = urlparse.parse_qs(parsed_url.query) - - self._producer = None - self._host, self._port = netutils.parse_host_port( - parsed_url.netloc, default_port=9092) - self._topic = options.get('topic', ['ceilometer'])[-1] - self.max_retry = int(options.get('max_retry', [100])[-1]) - - def _ensure_connection(self): - if self._producer: - return - - try: - client = kafka.KafkaClient("%s:%s" % (self._host, self._port)) - self._producer = kafka.SimpleProducer(client) - except Exception as e: - LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) - raise messaging.DeliveryFailure('Kafka Client is not available, ' - 'please restart Kafka client') - - def _send(self, event_type, data): - self._ensure_connection() - # TODO(sileht): don't split the payload into multiple network - # message ... but how to do that without breaking consuming - # application... - try: - for d in data: - self._producer.send_messages(self._topic, jsonutils.dumps(d)) - except Exception as e: - messaging.raise_delivery_failure(e) diff --git a/ceilometer/publisher/messaging.py b/ceilometer/publisher/messaging.py deleted file mode 100644 index 3a12690f..00000000 --- a/ceilometer/publisher/messaging.py +++ /dev/null @@ -1,221 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample using the preferred RPC mechanism. -""" - -import abc -import itertools -import operator - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six -import six.moves.urllib.parse as urlparse - -from ceilometer.i18n import _, _LE, _LI -from ceilometer import messaging -from ceilometer import publisher -from ceilometer.publisher import utils - - -LOG = log.getLogger(__name__) - -NOTIFIER_OPTS = [ - cfg.StrOpt('metering_topic', - default='metering', - help='The topic that ceilometer uses for metering ' - 'notifications.', - ), - cfg.StrOpt('event_topic', - default='event', - help='The topic that ceilometer uses for event ' - 'notifications.', - ), - cfg.StrOpt('telemetry_driver', - default='messagingv2', - help='The driver that ceilometer uses for metering ' - 'notifications.', - deprecated_name='metering_driver', - ) -] - -cfg.CONF.register_opts(NOTIFIER_OPTS, - group="publisher_notifier") -cfg.CONF.import_opt('host', 'ceilometer.service') - - -class DeliveryFailure(Exception): - def __init__(self, message=None, cause=None): - super(DeliveryFailure, self).__init__(message) - self.cause = cause - - -def raise_delivery_failure(exc): - excutils.raise_with_cause(DeliveryFailure, - encodeutils.exception_to_unicode(exc), - cause=exc) - - -@six.add_metaclass(abc.ABCMeta) -class MessagingPublisher(publisher.PublisherBase): - - def __init__(self, parsed_url): - options = urlparse.parse_qs(parsed_url.query) - # the value of options is a list of url param values - # only take care of the latest one if the option - # is provided more than once - self.per_meter_topic = bool(int( - options.get('per_meter_topic', [0])[-1])) - - self.policy = options.get('policy', ['default'])[-1] - self.max_queue_length = int(options.get( - 'max_queue_length', [1024])[-1]) - self.max_retry = 0 - - self.local_queue = [] - - if self.policy in ['default', 'queue', 'drop']: - LOG.info(_LI('Publishing policy set to %s') % self.policy) - else: - LOG.warning(_('Publishing policy is unknown (%s) force to ' - 'default') % self.policy) - self.policy = 'default' - - self.retry = 1 if self.policy in ['queue', 'drop'] else None - - def publish_samples(self, samples): - """Publish samples on RPC. - - :param samples: Samples from pipeline after transformation. - - """ - - meters = [ - utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - for sample in samples - ] - topic = cfg.CONF.publisher_notifier.metering_topic - self.local_queue.append((topic, meters)) - - if self.per_meter_topic: - for meter_name, meter_list in itertools.groupby( - sorted(meters, key=operator.itemgetter('counter_name')), - operator.itemgetter('counter_name')): - meter_list = list(meter_list) - topic_name = topic + '.' + meter_name - LOG.debug('Publishing %(m)d samples on %(n)s', - {'m': len(meter_list), 'n': topic_name}) - self.local_queue.append((topic_name, meter_list)) - - self.flush() - - def flush(self): - # NOTE(sileht): - # this is why the self.local_queue is emptied before processing the - # queue and the remaining messages in the queue are added to - # self.local_queue after in case of another call having already added - # something in the self.local_queue - queue = self.local_queue - self.local_queue = [] - self.local_queue = (self._process_queue(queue, self.policy) + - self.local_queue) - if self.policy == 'queue': - self._check_queue_length() - - def _check_queue_length(self): - queue_length = len(self.local_queue) - if queue_length > self.max_queue_length > 0: - count = queue_length - self.max_queue_length - self.local_queue = self.local_queue[count:] - LOG.warning(_("Publisher max local_queue length is exceeded, " - "dropping %d oldest samples") % count) - - def _process_queue(self, queue, policy): - current_retry = 0 - while queue: - topic, data = queue[0] - try: - self._send(topic, data) - except DeliveryFailure: - data = sum([len(m) for __, m in queue]) - if policy == 'queue': - LOG.warning(_("Failed to publish %d datapoints, queue " - "them"), data) - return queue - elif policy == 'drop': - LOG.warning(_("Failed to publish %d datapoints, " - "dropping them"), data) - return [] - current_retry += 1 - if current_retry >= self.max_retry: - LOG.exception(_LE("Failed to retry to send sample data " - "with max_retry times")) - raise - else: - queue.pop(0) - return [] - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - ev_list = [utils.message_from_event( - event, cfg.CONF.publisher.telemetry_secret) for event in events] - - topic = cfg.CONF.publisher_notifier.event_topic - self.local_queue.append((topic, ev_list)) - self.flush() - - @abc.abstractmethod - def _send(self, topic, meters): - """Send the meters to the messaging topic.""" - - -class NotifierPublisher(MessagingPublisher): - def __init__(self, parsed_url, default_topic): - super(NotifierPublisher, self).__init__(parsed_url) - options = urlparse.parse_qs(parsed_url.query) - topic = options.get('topic', [default_topic]) - self.notifier = oslo_messaging.Notifier( - messaging.get_transport(), - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id='telemetry.publisher.%s' % cfg.CONF.host, - topics=topic, - retry=self.retry - ) - - def _send(self, event_type, data): - try: - self.notifier.sample({}, event_type=event_type, - payload=data) - except oslo_messaging.MessageDeliveryFailure as e: - raise_delivery_failure(e) - - -class SampleNotifierPublisher(NotifierPublisher): - def __init__(self, parsed_url): - super(SampleNotifierPublisher, self).__init__( - parsed_url, cfg.CONF.publisher_notifier.metering_topic) - - -class EventNotifierPublisher(NotifierPublisher): - def __init__(self, parsed_url): - super(EventNotifierPublisher, self).__init__( - parsed_url, cfg.CONF.publisher_notifier.event_topic) diff --git a/ceilometer/publisher/test.py b/ceilometer/publisher/test.py deleted file mode 100644 index 8ae3b1e7..00000000 --- a/ceilometer/publisher/test.py +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample in memory, useful for testing -""" - -from ceilometer import publisher - - -class TestPublisher(publisher.PublisherBase): - """Publisher used in unit testing.""" - - def __init__(self, parsed_url): - self.samples = [] - self.events = [] - self.calls = 0 - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - self.samples.extend(samples) - self.calls += 1 - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - self.events.extend(events) - self.calls += 1 diff --git a/ceilometer/publisher/udp.py b/ceilometer/publisher/udp.py deleted file mode 100644 index dd677b4b..00000000 --- a/ceilometer/publisher/udp.py +++ /dev/null @@ -1,74 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample using an UDP mechanism -""" - -import socket - -import msgpack -from oslo_config import cfg -from oslo_log import log -from oslo_utils import netutils - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import publisher -from ceilometer.publisher import utils - -cfg.CONF.import_opt('udp_port', 'ceilometer.collector', - group='collector') - -LOG = log.getLogger(__name__) - - -class UDPPublisher(publisher.PublisherBase): - def __init__(self, parsed_url): - self.host, self.port = netutils.parse_host_port( - parsed_url.netloc, - default_port=cfg.CONF.collector.udp_port) - if netutils.is_valid_ipv6(self.host): - addr_family = socket.AF_INET6 - else: - addr_family = socket.AF_INET - self.socket = socket.socket(addr_family, - socket.SOCK_DGRAM) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - - for sample in samples: - msg = utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - host = self.host - port = self.port - LOG.debug("Publishing sample %(msg)s over UDP to " - "%(host)s:%(port)d", {'msg': msg, 'host': host, - 'port': port}) - try: - self.socket.sendto(msgpack.dumps(msg), - (self.host, self.port)) - except Exception as e: - LOG.warning(_("Unable to send sample over UDP")) - LOG.exception(e) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - raise ceilometer.NotImplementedError diff --git a/ceilometer/publisher/utils.py b/ceilometer/publisher/utils.py deleted file mode 100644 index 6f377312..00000000 --- a/ceilometer/publisher/utils.py +++ /dev/null @@ -1,143 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Utils for publishers -""" - -import hashlib -import hmac - -from oslo_config import cfg -import six - -from ceilometer import utils - -OPTS = [ - cfg.StrOpt('telemetry_secret', - secret=True, - default='change this for valid signing', - help='Secret value for signing messages. Set value empty if ' - 'signing is not required to avoid computational overhead.', - deprecated_opts=[cfg.DeprecatedOpt("metering_secret", - "DEFAULT"), - cfg.DeprecatedOpt("metering_secret", - "publisher_rpc"), - cfg.DeprecatedOpt("metering_secret", - "publisher")] - ), -] -cfg.CONF.register_opts(OPTS, group="publisher") - - -def compute_signature(message, secret): - """Return the signature for a message dictionary.""" - if not secret: - return '' - - if isinstance(secret, six.text_type): - secret = secret.encode('utf-8') - digest_maker = hmac.new(secret, b'', hashlib.sha256) - for name, value in utils.recursive_keypairs(message): - if name == 'message_signature': - # Skip any existing signature value, which would not have - # been part of the original message. - continue - digest_maker.update(six.text_type(name).encode('utf-8')) - digest_maker.update(six.text_type(value).encode('utf-8')) - return digest_maker.hexdigest() - - -def besteffort_compare_digest(first, second): - """Returns True if both string inputs are equal, otherwise False. - - This function should take a constant amount of time regardless of - how many characters in the strings match. - - """ - # NOTE(sileht): compare_digest method protected for timing-attacks - # exists since python >= 2.7.7 and python >= 3.3 - # this a bit less-secure python fallback version - # taken from https://github.com/openstack/python-keystoneclient/blob/ - # master/keystoneclient/middleware/memcache_crypt.py#L88 - if len(first) != len(second): - return False - result = 0 - if six.PY3 and isinstance(first, bytes) and isinstance(second, bytes): - for x, y in zip(first, second): - result |= x ^ y - else: - for x, y in zip(first, second): - result |= ord(x) ^ ord(y) - return result == 0 - - -if hasattr(hmac, 'compare_digest'): - compare_digest = hmac.compare_digest -else: - compare_digest = besteffort_compare_digest - - -def verify_signature(message, secret): - """Check the signature in the message. - - Message is verified against the value computed from the rest of the - contents. - """ - if not secret: - return True - - old_sig = message.get('message_signature', '') - new_sig = compute_signature(message, secret) - - if isinstance(old_sig, six.text_type): - try: - old_sig = old_sig.encode('ascii') - except UnicodeDecodeError: - return False - if six.PY3: - new_sig = new_sig.encode('ascii') - - return compare_digest(new_sig, old_sig) - - -def meter_message_from_counter(sample, secret): - """Make a metering message ready to be published or stored. - - Returns a dictionary containing a metering message - for a notification message and a Sample instance. - """ - msg = {'source': sample.source, - 'counter_name': sample.name, - 'counter_type': sample.type, - 'counter_unit': sample.unit, - 'counter_volume': sample.volume, - 'user_id': sample.user_id, - 'project_id': sample.project_id, - 'resource_id': sample.resource_id, - 'timestamp': sample.timestamp, - 'resource_metadata': sample.resource_metadata, - 'message_id': sample.id, - } - msg['message_signature'] = compute_signature(msg, secret) - return msg - - -def message_from_event(event, secret): - """Make an event message ready to be published or stored. - - Returns a serialized model of Event containing an event message - """ - msg = event.serialize() - msg['message_signature'] = compute_signature(msg, secret) - return msg diff --git a/ceilometer/sample.py b/ceilometer/sample.py deleted file mode 100644 index 8237626c..00000000 --- a/ceilometer/sample.py +++ /dev/null @@ -1,109 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Authors: Doug Hellmann -# Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Sample class for holding data about a metering event. - -A Sample doesn't really do anything, but we need a way to -ensure that all of the appropriate fields have been filled -in by the plugins that create them. -""" - -import copy -import uuid - -from oslo_config import cfg - - -OPTS = [ - cfg.StrOpt('sample_source', - default='openstack', - help='Source for samples emitted on this instance.'), -] - -cfg.CONF.register_opts(OPTS) - - -# Fields explanation: -# -# Source: the source of this sample -# Name: the name of the meter, must be unique -# Type: the type of the meter, must be either: -# - cumulative: the value is incremented and never reset to 0 -# - delta: the value is reset to 0 each time it is sent -# - gauge: the value is an absolute value and is not a counter -# Unit: the unit of the meter -# Volume: the sample value -# User ID: the user ID -# Project ID: the project ID -# Resource ID: the resource ID -# Timestamp: when the sample has been read -# Resource metadata: various metadata -# id: an uuid of a sample, can be taken from API when post sample via API -class Sample(object): - - def __init__(self, name, type, unit, volume, user_id, project_id, - resource_id, timestamp=None, resource_metadata=None, - source=None, id=None): - self.name = name - self.type = type - self.unit = unit - self.volume = volume - self.user_id = user_id - self.project_id = project_id - self.resource_id = resource_id - self.timestamp = timestamp - self.resource_metadata = resource_metadata or {} - self.source = source or cfg.CONF.sample_source - self.id = id or str(uuid.uuid1()) - - def as_dict(self): - return copy.copy(self.__dict__) - - def __repr__(self): - return '' % ( - self.name, self.volume, self.resource_id, self.timestamp) - - @classmethod - def from_notification(cls, name, type, volume, unit, - user_id, project_id, resource_id, - message, timestamp=None, metadata=None, source=None): - if not metadata: - metadata = (copy.copy(message['payload']) - if isinstance(message['payload'], dict) else {}) - metadata['event_type'] = message['event_type'] - metadata['host'] = message['publisher_id'] - ts = timestamp if timestamp else message['timestamp'] - return cls(name=name, - type=type, - volume=volume, - unit=unit, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=ts, - resource_metadata=metadata, - source=source) - - def set_timestamp(self, timestamp): - self.timestamp = timestamp - -TYPE_GAUGE = 'gauge' -TYPE_DELTA = 'delta' -TYPE_CUMULATIVE = 'cumulative' - -TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) diff --git a/ceilometer/service.py b/ceilometer/service.py index a770a8f5..8ac23e3b 100644 --- a/ceilometer/service.py +++ b/ceilometer/service.py @@ -12,69 +12,20 @@ # License for the specific language governing permissions and limitations # under the License. -import socket import sys -from keystoneauth1 import loading as ka_loading from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from ceilometer.conf import defaults -from ceilometer import keystone_client -from ceilometer import messaging from ceilometer import version -OPTS = [ - cfg.StrOpt('host', - default=socket.gethostname(), - help='Name of this node, which must be valid in an AMQP ' - 'key. Can be an opaque identifier. For ZeroMQ only, must ' - 'be a valid host name, FQDN, or IP address.'), - cfg.IntOpt('http_timeout', - default=600, - help='Timeout seconds for HTTP requests. Set it to None to ' - 'disable timeout.'), -] -cfg.CONF.register_opts(OPTS) - -API_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='api_workers', - help='Number of workers for api, default value is 1.') -cfg.CONF.register_opt(API_OPT, 'api') - -NOTI_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='notification_workers', - help='Number of workers for notification service, ' - 'default value is 1.') -cfg.CONF.register_opt(NOTI_OPT, 'notification') - -COLL_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='collector_workers', - help='Number of workers for collector service. ' - 'default value is 1.') -cfg.CONF.register_opt(COLL_OPT, 'collector') - -keystone_client.register_keystoneauth_opts(cfg.CONF) - def prepare_service(argv=None, config_files=None): oslo_i18n.enable_lazy() log.register_options(cfg.CONF) - log_levels = (cfg.CONF.default_log_levels + - ['futurist=INFO', 'neutronclient=INFO', - 'keystoneclient=INFO']) - log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() if argv is None: @@ -83,12 +34,9 @@ def prepare_service(argv=None, config_files=None): version=version.version_info.version_string(), default_config_files=config_files) - ka_loading.load_auth_from_conf_options(cfg.CONF, "service_credentials") - log.setup(cfg.CONF, 'ceilometer') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # ceilometer-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) - messaging.setup() diff --git a/ceilometer/service_base.py b/ceilometer/service_base.py deleted file mode 100644 index 9fcbe11b..00000000 --- a/ceilometer/service_base.py +++ /dev/null @@ -1,153 +0,0 @@ -# -# Copyright 2015 Hewlett Packard -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service as os_service -import six - -from ceilometer.i18n import _LE, _LI -from ceilometer import pipeline - -LOG = log.getLogger(__name__) - - -class ServiceBase(os_service.Service): - def __init__(self): - self.started = False - super(ServiceBase, self).__init__() - - def start(self): - self.started = True - super(ServiceBase, self).start() - - -@six.add_metaclass(abc.ABCMeta) -class PipelineBasedService(ServiceBase): - def clear_pipeline_validation_status(self): - """Clears pipeline validation status flags.""" - self.pipeline_validated = False - self.event_pipeline_validated = False - - def init_pipeline_refresh(self): - """Initializes pipeline refresh state.""" - self.clear_pipeline_validation_status() - if cfg.CONF.refresh_pipeline_cfg: - self.set_pipeline_mtime(pipeline.get_pipeline_mtime()) - self.set_pipeline_hash(pipeline.get_pipeline_hash()) - - if cfg.CONF.refresh_event_pipeline_cfg: - self.set_pipeline_mtime(pipeline.get_pipeline_mtime( - pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) - self.set_pipeline_hash(pipeline.get_pipeline_hash( - pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) - - if (cfg.CONF.refresh_pipeline_cfg or - cfg.CONF.refresh_event_pipeline_cfg): - self.tg.add_timer(cfg.CONF.pipeline_polling_interval, - self.refresh_pipeline) - - def get_pipeline_mtime(self, p_type=pipeline.SAMPLE_TYPE): - return (self.event_pipeline_mtime if p_type == pipeline.EVENT_TYPE else - self.pipeline_mtime) - - def set_pipeline_mtime(self, mtime, p_type=pipeline.SAMPLE_TYPE): - if p_type == pipeline.EVENT_TYPE: - self.event_pipeline_mtime = mtime - else: - self.pipeline_mtime = mtime - - def get_pipeline_hash(self, p_type=pipeline.SAMPLE_TYPE): - return (self.event_pipeline_hash if p_type == pipeline.EVENT_TYPE else - self.pipeline_hash) - - def set_pipeline_hash(self, _hash, p_type=pipeline.SAMPLE_TYPE): - if p_type == pipeline.EVENT_TYPE: - self.event_pipeline_hash = _hash - else: - self.pipeline_hash = _hash - - @abc.abstractmethod - def reload_pipeline(self): - """Reload pipeline in the agents.""" - - def pipeline_changed(self, p_type=pipeline.SAMPLE_TYPE): - """Returns hash of changed pipeline else False.""" - - pipeline_mtime = self.get_pipeline_mtime(p_type) - mtime = pipeline.get_pipeline_mtime(p_type) - if mtime > pipeline_mtime: - LOG.info(_LI('Pipeline configuration file has been updated.')) - - self.set_pipeline_mtime(mtime, p_type) - _hash = pipeline.get_pipeline_hash(p_type) - pipeline_hash = self.get_pipeline_hash(p_type) - if _hash != pipeline_hash: - LOG.info(_LI("Detected change in pipeline configuration.")) - return _hash - return False - - def refresh_pipeline(self): - """Refreshes appropriate pipeline, then delegates to agent.""" - - if cfg.CONF.refresh_pipeline_cfg: - pipeline_hash = self.pipeline_changed() - if pipeline_hash: - try: - # Pipeline in the notification agent. - if hasattr(self, 'pipeline_manager'): - self.pipeline_manager = pipeline.setup_pipeline() - # Polling in the polling agent. - elif hasattr(self, 'polling_manager'): - self.polling_manager = pipeline.setup_polling() - LOG.debug("Pipeline has been refreshed. " - "old hash: %(old)s, new hash: %(new)s", - {'old': self.pipeline_hash, - 'new': pipeline_hash}) - self.set_pipeline_hash(pipeline_hash) - self.pipeline_validated = True - except Exception as err: - LOG.debug("Active pipeline config's hash is %s", - self.pipeline_hash) - LOG.exception(_LE('Unable to load changed pipeline: %s') - % err) - - if cfg.CONF.refresh_event_pipeline_cfg: - ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE) - if ev_pipeline_hash: - try: - # Pipeline in the notification agent. - if hasattr(self, 'event_pipeline_manager'): - self.event_pipeline_manager = (pipeline. - setup_event_pipeline()) - - LOG.debug("Event Pipeline has been refreshed. " - "old hash: %(old)s, new hash: %(new)s", - {'old': self.event_pipeline_hash, - 'new': ev_pipeline_hash}) - self.set_pipeline_hash(ev_pipeline_hash, - pipeline.EVENT_TYPE) - self.event_pipeline_validated = True - except Exception as err: - LOG.debug("Active event pipeline config's hash is %s", - self.event_pipeline_hash) - LOG.exception(_LE('Unable to load changed event pipeline:' - ' %s') % err) - - if self.pipeline_validated or self.event_pipeline_validated: - self.reload_pipeline() - self.clear_pipeline_validation_status() diff --git a/ceilometer/storage/__init__.py b/ceilometer/storage/__init__.py index 97431a53..14f66db2 100644 --- a/ceilometer/storage/__init__.py +++ b/ceilometer/storage/__init__.py @@ -22,27 +22,15 @@ import retrying import six.moves.urllib.parse as urlparse from stevedore import driver -from ceilometer import utils - LOG = log.getLogger(__name__) OPTS = [ - cfg.IntOpt('metering_time_to_live', - default=-1, - help="Number of seconds that samples are kept " - "in the database for (<= 0 means forever).", - deprecated_opts=[cfg.DeprecatedOpt('time_to_live', - 'database')]), cfg.IntOpt('event_time_to_live', default=-1, help=("Number of seconds that events are kept " "in the database for (<= 0 means forever).")), - cfg.StrOpt('metering_connection', - secret=True, - help='The connection string used to connect to the metering ' - 'database. (if unset, connection is used)'), cfg.StrOpt('event_connection', secret=True, help='The connection string used to connect to the event ' @@ -51,17 +39,6 @@ OPTS = [ cfg.CONF.register_opts(OPTS, group='database') -CLI_OPTS = [ - cfg.BoolOpt('sql-expire-samples-only', - default=False, - help="Indicates if expirer expires only samples. If set true," - " expired samples will be deleted, but residual" - " resource and meter definition data will remain.", - ), -] - -cfg.CONF.register_cli_opts(CLI_OPTS) - db_options.set_defaults(cfg.CONF) @@ -78,87 +55,28 @@ class StorageBadAggregate(Exception): code = 400 -def get_connection_from_config(conf, purpose='metering'): +def get_connection_from_config(conf): retries = conf.database.max_retries # Convert retry_interval secs to msecs for retry decorator @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, stop_max_attempt_number=retries if retries >= 0 else None) def _inner(): - namespace = 'ceilometer.%s.storage' % purpose - url = (getattr(conf.database, '%s_connection' % purpose) or + url = (getattr(conf.database, 'event_connection') or conf.database.connection) - return get_connection(url, namespace) + return get_connection(url) return _inner() -def get_connection(url, namespace): +def get_connection(url): """Return an open connection to the database.""" connection_scheme = urlparse.urlparse(url).scheme # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] # NOTE: translation not applied bug #1446983 - LOG.debug('looking for %(name)r driver in %(namespace)r', - {'name': engine_name, 'namespace': namespace}) - mgr = driver.DriverManager(namespace, engine_name) + LOG.debug('looking for %(name)r driver in ceilometer.event.storage', + {'name': engine_name}) + mgr = driver.DriverManager('ceilometer.event.storage', engine_name) return mgr.driver(url) - - -class SampleFilter(object): - """Holds the properties for building a query from a meter/sample filter. - - :param user: The sample owner. - :param project: The sample project. - :param start_timestamp: Earliest time point in the request. - :param start_timestamp_op: Earliest timestamp operation in the request. - :param end_timestamp: Latest time point in the request. - :param end_timestamp_op: Latest timestamp operation in the request. - :param resource: Optional filter for resource id. - :param meter: Optional filter for meter type using the meter name. - :param source: Optional source filter. - :param message_id: Optional sample_id filter. - :param metaquery: Optional filter on the metadata - """ - def __init__(self, user=None, project=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - resource=None, meter=None, - source=None, message_id=None, - metaquery=None): - self.user = user - self.project = project - self.start_timestamp = utils.sanitize_timestamp(start_timestamp) - self.start_timestamp_op = start_timestamp_op - self.end_timestamp = utils.sanitize_timestamp(end_timestamp) - self.end_timestamp_op = end_timestamp_op - self.resource = resource - self.meter = meter - self.source = source - self.metaquery = metaquery or {} - self.message_id = message_id - - def __repr__(self): - return ("" % - (self.user, - self.project, - self.start_timestamp, - self.start_timestamp_op, - self.end_timestamp, - self.end_timestamp_op, - self.resource, - self.meter, - self.source, - self.metaquery, - self.message_id)) diff --git a/ceilometer/storage/base.py b/ceilometer/storage/base.py index e8c4e97e..a6444739 100644 --- a/ceilometer/storage/base.py +++ b/ceilometer/storage/base.py @@ -15,60 +15,7 @@ """Base classes for storage engines """ -import datetime -import inspect -import math - -from oslo_utils import timeutils import six -from six import moves - -import ceilometer - - -def iter_period(start, end, period): - """Split a time from start to end in periods of a number of seconds. - - This function yields the (start, end) time for each period composing the - time passed as argument. - - :param start: When the period set start. - :param end: When the period end starts. - :param period: The duration of the period. - """ - period_start = start - increment = datetime.timedelta(seconds=period) - for i in moves.xrange(int(math.ceil( - timeutils.delta_seconds(start, end) - / float(period)))): - next_start = period_start + increment - yield (period_start, next_start) - period_start = next_start - - -def _handle_sort_key(model_name, sort_key=None): - """Generate sort keys according to the passed in sort key from user. - - :param model_name: Database model name be query.(meter, etc.) - :param sort_key: sort key passed from user. - return: sort keys list - """ - sort_keys_extra = {'meter': ['user_id', 'project_id'], - 'resource': ['user_id', 'project_id', 'timestamp'], - } - - sort_keys = sort_keys_extra[model_name] - if not sort_key: - return sort_keys - # NOTE(Fengqian): We need to put the sort key from user - # in the first place of sort keys list. - try: - sort_keys.remove(sort_key) - except ValueError: - pass - finally: - sort_keys.insert(0, sort_key) - return sort_keys class Model(object): @@ -92,154 +39,3 @@ class Model(object): def __eq__(self, other): return self.as_dict() == other.as_dict() - - @classmethod - def get_field_names(cls): - fields = inspect.getargspec(cls.__init__)[0] - return set(fields) - set(["self"]) - - -class Connection(object): - """Base class for storage system connections.""" - - # A dictionary representing the capabilities of this driver. - CAPABILITIES = { - 'meters': {'query': {'simple': False, - 'metadata': False}}, - 'resources': {'query': {'simple': False, - 'metadata': False}}, - 'samples': {'query': {'simple': False, - 'metadata': False, - 'complex': False}}, - 'statistics': {'groupby': False, - 'query': {'simple': False, - 'metadata': False}, - 'aggregation': {'standard': False, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - STORAGE_CAPABILITIES = { - 'storage': {'production_ready': False}, - } - - def __init__(self, url): - pass - - @staticmethod - def upgrade(): - """Migrate the database to `version` or the most recent version.""" - - @staticmethod - def record_metering_data(data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - - All timestamps must be naive utc datetime object. - """ - raise ceilometer.NotImplementedError( - 'Recording metering data is not implemented') - - @staticmethod - def clear_expired_metering_data(ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - - :param ttl: Number of seconds to keep records for. - """ - raise ceilometer.NotImplementedError( - 'Clearing samples not implemented') - - @staticmethod - def get_resources(user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances. - - Iterable items containing resource information. - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional timestamp start range operation. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional timestamp end range operation. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - raise ceilometer.NotImplementedError('Resources not implemented') - - @staticmethod - def get_meters(user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of model.Meter instances. - - Iterable items containing meter information. - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - raise ceilometer.NotImplementedError('Meters not implemented') - - @staticmethod - def get_samples(sample_filter, limit=None): - """Return an iterable of model.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - raise ceilometer.NotImplementedError('Samples not implemented') - - @staticmethod - def get_meter_statistics(sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of model.Statistics instances. - - The filter must have a meter value set. - """ - raise ceilometer.NotImplementedError('Statistics not implemented') - - @staticmethod - def clear(): - """Clear database.""" - - @staticmethod - def query_samples(filter_expr=None, orderby=None, limit=None): - """Return an iterable of model.Sample objects. - - :param filter_expr: Filter expression for query. - :param orderby: List of field name and direction pairs for order by. - :param limit: Maximum number of results to return. - """ - - raise ceilometer.NotImplementedError('Complex query for samples ' - 'is not implemented.') - - @classmethod - def get_capabilities(cls): - """Return an dictionary with the capabilities of each driver.""" - return cls.CAPABILITIES - - @classmethod - def get_storage_capabilities(cls): - """Return a dictionary representing the performance capabilities. - - This is needed to evaluate the performance of each driver. - """ - return cls.STORAGE_CAPABILITIES diff --git a/ceilometer/storage/hbase/migration.py b/ceilometer/storage/hbase/migration.py deleted file mode 100644 index 9cc3df93..00000000 --- a/ceilometer/storage/hbase/migration.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""HBase storage backend migrations -""" - -import re - -from ceilometer.storage.hbase import utils as hbase_utils - - -def migrate_resource_table(conn, table): - """Migrate table 'resource' in HBase. - - Change qualifiers format from "%s+%s+%s!%s!%s" % - (rts, source, counter_name, counter_type,counter_unit) - in columns with meters f:m_* - to new separator format "%s:%s:%s:%s:%s" % - (rts, source, counter_name, counter_type,counter_unit) - """ - resource_table = conn.table(table) - resource_filter = ("QualifierFilter(=, " - "'regexstring:m_\\d{19}\\+" - "[\\w-\\._]*\\+[\\w-\\._!]')") - gen = resource_table.scan(filter=resource_filter) - for row, data in gen: - columns = [] - updated_columns = dict() - column_prefix = "f:" - for column, value in data.items(): - if column.startswith('f:m_'): - columns.append(column) - parts = column[2:].split("+", 2) - parts.extend(parts.pop(2).split("!")) - column = hbase_utils.prepare_key(*parts) - updated_columns[column_prefix + column] = value - resource_table.put(row, updated_columns) - resource_table.delete(row, columns) - - -def migrate_meter_table(conn, table): - """Migrate table 'meter' in HBase. - - Change row format from "%s_%d_%s" % (counter_name, rts, message_signature) - to new separator format "%s:%s:%s" % (counter_name, rts, message_signature) - """ - meter_table = conn.table(table) - meter_filter = ("RowFilter(=, " - "'regexstring:[\\w\\._-]*_\\d{19}_\\w*')") - gen = meter_table.scan(filter=meter_filter) - for row, data in gen: - parts = row.rsplit('_', 2) - new_row = hbase_utils.prepare_key(*parts) - meter_table.put(new_row, data) - meter_table.delete(row) - - -def migrate_event_table(conn, table): - """Migrate table 'event' in HBase. - - Change row format from ""%d_%s" % timestamp, event_id, - to new separator format "%s:%s" % timestamp, event_id - Also change trait columns from %s+%s % trait.name, trait.dtype - to %s:%s % trait.name, trait.dtype - """ - event_table = conn.table(table) - event_filter = "RowFilter(=, 'regexstring:\\d*_\\w*')" - gen = event_table.scan(filter=event_filter) - trait_pattern = re.compile("f:[\w\-_]*\+\w") - column_prefix = "f:" - for row, data in gen: - row_parts = row.split("_", 1) - update_data = {} - for column, value in data.items(): - if trait_pattern.match(column): - trait_parts = column[2:].rsplit('+', 1) - column = hbase_utils.prepare_key(*trait_parts) - update_data[column_prefix + column] = value - new_row = hbase_utils.prepare_key(*row_parts) - event_table.put(new_row, update_data) - event_table.delete(row) - - -TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table, - 'meter': migrate_meter_table, - 'event': migrate_event_table} - - -def migrate_tables(conn, tables): - if type(tables) is not list: - tables = [tables] - for table in tables: - if table in TABLE_MIGRATION_FUNCS: - TABLE_MIGRATION_FUNCS.get(table)(conn, table) diff --git a/ceilometer/storage/hbase/utils.py b/ceilometer/storage/hbase/utils.py index f151558e..c348684b 100644 --- a/ceilometer/storage/hbase/utils.py +++ b/ceilometer/storage/hbase/utils.py @@ -191,139 +191,6 @@ def make_query(metaquery=None, trait_query=None, **kwargs): return res_q -def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs): - """Return a list of required columns in meter table to be scanned. - - SingleColumnFilter has 'columns' filter that should be used to determine - what columns we are interested in. But if we want to use 'filter' and - 'columns' together we have to include columns we are filtering by - to columns list. - - Please see an example: If we make scan with filter - "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')" - and columns ['f:rts'], the output will be always empty - because only 'rts' will be returned and filter will be applied - to this data so 's_test-1' cannot be find. - To make this request correct it should be fixed as follows: - filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')", - columns = ['f:rts','f:s_test-1']} - - :param metaquery: optional metaquery dict - :param need_timestamp: flag, which defines the need for timestamp columns - :param kwargs: key-value pairs to filter on. Key should be a real - column name in db - """ - columns = ['f:message', 'f:recorded_at'] - columns.extend("f:%s" % k for k, v in kwargs.items() - if v is not None) - if metaquery: - columns.extend("f:r_%s" % k for k, v in metaquery.items() - if v is not None) - source = kwargs.get('source') - if source: - columns.append("f:s_%s" % source) - if need_timestamp: - columns.extend(['f:rts', 'f:timestamp']) - return columns - - -def make_sample_query_from_filter(sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - - meter = sample_filter.meter - if not meter and require_meter: - raise RuntimeError('Missing required meter specifier') - start_row, end_row, ts_query = make_timestamp_query( - make_general_rowkey_scan, - start=sample_filter.start_timestamp, - start_op=sample_filter.start_timestamp_op, - end=sample_filter.end_timestamp, - end_op=sample_filter.end_timestamp_op, - some_id=meter) - kwargs = dict(user_id=sample_filter.user, - project_id=sample_filter.project, - counter_name=meter, - resource_id=sample_filter.resource, - source=sample_filter.source, - message_id=sample_filter.message_id) - - q = make_query(metaquery=sample_filter.metaquery, **kwargs) - - if q: - res_q = q + " AND " + ts_query if ts_query else q - else: - res_q = ts_query if ts_query else None - - need_timestamp = (sample_filter.start_timestamp or - sample_filter.end_timestamp) is not None - columns = get_meter_columns(metaquery=sample_filter.metaquery, - need_timestamp=need_timestamp, **kwargs) - return res_q, start_row, end_row, columns - - -def make_meter_query_for_resource(start_timestamp, start_timestamp_op, - end_timestamp, end_timestamp_op, source, - query=None): - """This method is used when Resource table should be filtered by meters. - - In this method we are looking into all qualifiers with m_ prefix. - :param start_timestamp: meter's timestamp start range. - :param start_timestamp_op: meter's start time operator, like ge, gt. - :param end_timestamp: meter's timestamp end range. - :param end_timestamp_op: meter's end time operator, like lt, le. - :param source: source filter. - :param query: a query string to concatenate with. - """ - start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp) - mq = [] - start_op = start_timestamp_op or 'ge' - end_op = end_timestamp_op or 'lt' - - if start_rts: - filter_value = (start_rts + ':' + quote(source) if source - else start_rts) - mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value)) - - if end_rts: - filter_value = (end_rts + ':' + quote(source) if source - else end_rts) - mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value)) - - if mq: - meter_q = " AND ".join(mq) - # If there is a filtering on time_range we need to point that - # qualifiers should start with m_. Otherwise in case e.g. - # QualifierFilter (>=, 'binaryprefix:m_9222030811134775808') - # qualifier 's_test' satisfies the filter and will be returned. - meter_q = _QualifierFilter("=", '') + " AND " + meter_q - query = meter_q if not query else query + " AND " + meter_q - return query - - -def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): - """If it's filter on some_id without start and end. - - start_row = some_id while end_row = some_id + MAX_BYTE. - """ - if some_id is None: - return None, None - if not rts_start: - # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123 - # will be quoted and character will be turn in a composition that is - # started with '%' (chr(37)) that lexicographically is less than chr - # of number - rts_start = chr(122) - end_row = prepare_key(some_id, rts_start) - start_row = prepare_key(some_id, rts_end) - - return start_row, end_row - - def prepare_key(*args): """Prepares names for rows and columns with correct separator. @@ -338,16 +205,6 @@ def prepare_key(*args): return ":".join(key_quote) -def timestamp_from_record_tuple(record): - """Extract timestamp from HBase tuple record.""" - return record[0]['timestamp'] - - -def resource_id_from_record_tuple(record): - """Extract resource_id from HBase tuple record.""" - return record[0]['resource_id'] - - def deserialize_entry(entry, get_raw_meta=True): """Return a list of flatten_result, sources, meters and metadata. diff --git a/ceilometer/storage/impl_hbase.py b/ceilometer/storage/impl_hbase.py deleted file mode 100644 index eb3423e2..00000000 --- a/ceilometer/storage/impl_hbase.py +++ /dev/null @@ -1,439 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import operator -import time - -from oslo_log import log -from oslo_utils import timeutils - -import ceilometer -from ceilometer.storage import base -from ceilometer.storage.hbase import base as hbase_base -from ceilometer.storage.hbase import migration as hbase_migration -from ceilometer.storage.hbase import utils as hbase_utils -from ceilometer.storage import models -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True}}, - 'statistics': {'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True}}, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -class Connection(hbase_base.Connection, base.Connection): - """Put the metering data into a HBase database - - Collections: - - - meter (describes sample actually): - - - row-key: consists of reversed timestamp, meter and a message uuid - for purposes of uniqueness - - Column Families: - - f: contains the following qualifiers: - - - counter_name: - - counter_type: - - counter_unit: - - counter_volume: - - message: - - message_id: - - message_signature: - - resource_metadata: raw metadata for corresponding resource - of the meter - - project_id: - - resource_id: - - user_id: - - recorded_at: - - flattened metadata with prefix r_metadata. e.g.:: - - f:r_metadata.display_name or f:r_metadata.tag - - - rts: - - timestamp: - - source for meter with prefix 's' - - - resource: - - - row_key: uuid of resource - - Column Families: - - f: contains the following qualifiers: - - - resource_metadata: raw metadata for corresponding resource - - project_id: - - resource_id: - - user_id: - - flattened metadata with prefix r_metadata. e.g.:: - - f:r_metadata.display_name or f:r_metadata.tag - - - sources for all corresponding meters with prefix 's' - - all meters with prefix 'm' for this resource in format: - - .. code-block:: python - - "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type, - counter_unit) - """ - - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - _memory_instance = None - - RESOURCE_TABLE = "resource" - METER_TABLE = "meter" - - def __init__(self, url): - super(Connection, self).__init__(url) - - def upgrade(self): - tables = [self.RESOURCE_TABLE, self.METER_TABLE] - column_families = {'f': dict(max_versions=1)} - with self.conn_pool.connection() as conn: - hbase_utils.create_tables(conn, tables, column_families) - hbase_migration.migrate_tables(conn, tables) - - def clear(self): - LOG.debug('Dropping HBase schema...') - with self.conn_pool.connection() as conn: - for table in [self.RESOURCE_TABLE, - self.METER_TABLE]: - try: - conn.disable_table(table) - except Exception: - LOG.debug('Cannot disable table but ignoring error') - try: - conn.delete_table(table) - except Exception: - LOG.debug('Cannot delete table but ignoring error') - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - meter_table = conn.table(self.METER_TABLE) - - resource_metadata = data.get('resource_metadata', {}) - # Determine the name of new meter - rts = hbase_utils.timestamp(data['timestamp']) - new_meter = hbase_utils.prepare_key( - rts, data['source'], data['counter_name'], - data['counter_type'], data['counter_unit']) - - # TODO(nprivalova): try not to store resource_id - resource = hbase_utils.serialize_entry(**{ - 'source': data['source'], - 'meter': {new_meter: data['timestamp']}, - 'resource_metadata': resource_metadata, - 'resource_id': data['resource_id'], - 'project_id': data['project_id'], 'user_id': data['user_id']}) - # Here we put entry in HBase with our own timestamp. This is needed - # when samples arrive out-of-order - # If we use timestamp=data['timestamp'] the newest data will be - # automatically 'on the top'. It is needed to keep metadata - # up-to-date: metadata from newest samples is considered as actual. - ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) - resource_table.put(hbase_utils.encode_unicode(data['resource_id']), - resource, ts) - - # Rowkey consists of reversed timestamp, meter and a - # message uuid for purposes of uniqueness - row = hbase_utils.prepare_key(data['counter_name'], rts, - data['message_id']) - record = hbase_utils.serialize_entry( - data, **{'source': data['source'], 'rts': rts, - 'message': data, 'recorded_at': timeutils.utcnow()}) - meter_table.put(row, record) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like ge, gt. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - q = hbase_utils.make_query(metaquery=metaquery, user_id=user, - project_id=project, - resource_id=resource, source=source) - q = hbase_utils.make_meter_query_for_resource(start_timestamp, - start_timestamp_op, - end_timestamp, - end_timestamp_op, - source, q) - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - LOG.debug("Query Resource table: %s", q) - for resource_id, data in resource_table.scan(filter=q, - limit=limit): - f_res, meters, md = hbase_utils.deserialize_entry( - data) - resource_id = hbase_utils.encode_unicode(resource_id) - # Unfortunately happybase doesn't keep ordered result from - # HBase. So that's why it's needed to find min and max - # manually - first_ts = min(meters, key=operator.itemgetter(1))[1] - last_ts = max(meters, key=operator.itemgetter(1))[1] - source = meters[0][0][1] - # If we use QualifierFilter then HBase returns only - # qualifiers filtered by. It will not return the whole entry. - # That's why if we need to ask additional qualifiers manually. - if 'project_id' not in f_res and 'user_id' not in f_res: - row = resource_table.row( - resource_id, columns=['f:project_id', 'f:user_id', - 'f:resource_metadata']) - f_res, _m, md = hbase_utils.deserialize_entry(row) - yield models.Resource( - resource_id=resource_id, - first_sample_timestamp=first_ts, - last_sample_timestamp=last_ts, - project_id=f_res['project_id'], - source=source, - user_id=f_res['user_id'], - metadata=md) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - - metaquery = metaquery or {} - - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - q = hbase_utils.make_query(metaquery=metaquery, user_id=user, - project_id=project, - resource_id=resource, - source=source) - LOG.debug("Query Resource table: %s", q) - - gen = resource_table.scan(filter=q) - # We need result set to be sure that user doesn't receive several - # same meters. Please see bug - # https://bugs.launchpad.net/ceilometer/+bug/1301371 - result = set() - for ignored, data in gen: - flatten_result, meters, md = hbase_utils.deserialize_entry( - data) - for m in meters: - if limit and len(result) >= limit: - return - _m_rts, m_source, name, m_type, unit = m[0] - if unique: - meter_dict = {'name': name, - 'type': m_type, - 'unit': unit, - 'resource_id': None, - 'project_id': None, - 'user_id': None, - 'source': None} - else: - meter_dict = {'name': name, - 'type': m_type, - 'unit': unit, - 'resource_id': - flatten_result['resource_id'], - 'project_id': - flatten_result['project_id'], - 'user_id': - flatten_result['user_id']} - - frozen_meter = frozenset(meter_dict.items()) - if frozen_meter in result: - continue - result.add(frozen_meter) - if not unique: - meter_dict.update({'source': m_source - if m_source else None}) - - yield models.Meter(**meter_dict) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of models.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - with self.conn_pool.connection() as conn: - meter_table = conn.table(self.METER_TABLE) - q, start, stop, columns = (hbase_utils. - make_sample_query_from_filter - (sample_filter, require_meter=False)) - LOG.debug("Query Meter Table: %s", q) - gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, - limit=limit, columns=columns) - for ignored, meter in gen: - d_meter = hbase_utils.deserialize_entry(meter)[0] - d_meter['message']['counter_volume'] = ( - float(d_meter['message']['counter_volume'])) - d_meter['message']['recorded_at'] = d_meter['recorded_at'] - yield models.Sample(**d_meter['message']) - - @staticmethod - def _update_meter_stats(stat, meter): - """Do the stats calculation on a requested time bucket in stats dict - - :param stats: dict where aggregated stats are kept - :param index: time bucket index in stats - :param meter: meter record as returned from HBase - :param start_time: query start time - :param period: length of the time bucket - """ - vol = meter['counter_volume'] - ts = meter['timestamp'] - stat.unit = meter['counter_unit'] - stat.min = min(vol, stat.min or vol) - stat.max = max(vol, stat.max) - stat.sum = vol + (stat.sum or 0) - stat.count += 1 - stat.avg = (stat.sum / float(stat.count)) - stat.duration_start = min(ts, stat.duration_start or ts) - stat.duration_end = max(ts, stat.duration_end or ts) - stat.duration = (timeutils.delta_seconds(stat.duration_start, - stat.duration_end)) - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of models.Statistics instances. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - - .. note:: - - Due to HBase limitations the aggregations are implemented - in the driver itself, therefore this method will be quite slow - because of all the Thrift traffic it is going to create. - """ - if groupby: - raise ceilometer.NotImplementedError("Group by not implemented.") - - if aggregate: - raise ceilometer.NotImplementedError( - 'Selectable aggregates not implemented') - - with self.conn_pool.connection() as conn: - meter_table = conn.table(self.METER_TABLE) - q, start, stop, columns = (hbase_utils. - make_sample_query_from_filter - (sample_filter)) - # These fields are used in statistics' calculating - columns.extend(['f:timestamp', 'f:counter_volume', - 'f:counter_unit']) - meters = map(hbase_utils.deserialize_entry, - list(meter for (ignored, meter) in - meter_table.scan( - filter=q, row_start=start, - row_stop=stop, columns=columns))) - - if sample_filter.start_timestamp: - start_time = sample_filter.start_timestamp - elif meters: - start_time = meters[-1][0]['timestamp'] - else: - start_time = None - - if sample_filter.end_timestamp: - end_time = sample_filter.end_timestamp - elif meters: - end_time = meters[0][0]['timestamp'] - else: - end_time = None - - results = [] - - if not period: - period = 0 - period_start = start_time - period_end = end_time - - # As our HBase meters are stored as newest-first, we need to iterate - # in the reverse order - for meter in meters[::-1]: - ts = meter[0]['timestamp'] - if period: - offset = int(timeutils.delta_seconds( - start_time, ts) / period) * period - period_start = start_time + datetime.timedelta(0, offset) - - if not results or not results[-1].period_start == period_start: - if period: - period_end = period_start + datetime.timedelta( - 0, period) - results.append( - models.Statistics(unit='', - count=0, - min=0, - max=0, - avg=0, - sum=0, - period=period, - period_start=period_start, - period_end=period_end, - duration=None, - duration_start=None, - duration_end=None, - groupby=None) - ) - self._update_meter_stats(results[-1], meter[0]) - return results diff --git a/ceilometer/storage/impl_log.py b/ceilometer/storage/impl_log.py deleted file mode 100644 index ff52862e..00000000 --- a/ceilometer/storage/impl_log.py +++ /dev/null @@ -1,131 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Simple logging storage backend. -""" - -from oslo_log import log - -from ceilometer.i18n import _LI -from ceilometer.storage import base - -LOG = log.getLogger(__name__) - - -class Connection(base.Connection): - """Log the data.""" - - def upgrade(self): - pass - - def clear(self): - pass - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.meter.meter_message_from_counter. - """ - LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: ' - '%(counter_volume)s') - % ({'counter_name': data['counter_name'], - 'resource_id': data['resource_id'], - 'counter_volume': data['counter_volume']})) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - :param ttl: Number of seconds to keep records for. - """ - LOG.info(_LI("Dropping metering data with TTL %d"), ttl) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of dictionaries containing resource information. - - { 'resource_id': UUID of the resource, - 'project_id': UUID of project owning the resource, - 'user_id': UUID of user owning the resource, - 'timestamp': UTC datetime of last update to the resource, - 'metadata': most current metadata for the resource, - 'meter': list of the meters reporting data for the resource, - } - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - return [] - - def get_meters(self, user=None, project=None, resource=None, source=None, - limit=None, metaquery=None, unique=False): - """Return an iterable of dictionaries containing meter information. - - { 'name': name of the meter, - 'type': type of the meter (gauge, delta, cumulative), - 'resource_id': UUID of the resource, - 'project_id': UUID of project owning the resource, - 'user_id': UUID of user owning the resource, - } - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param limit: Maximum number of results to return. - :param metaquery: Optional dict with metadata to match on. - :param unique: If set to true, return only unique meter information. - """ - return [] - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of samples. - - Items are created by - ceilometer.publisher.utils.meter_message_from_counter. - """ - return [] - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return a dictionary containing meter statistics. - - Meter statistics is described by the query parameters. - The filter must have a meter value set. - - { 'min': - 'max': - 'avg': - 'sum': - 'count': - 'period': - 'period_start': - 'period_end': - 'duration': - 'duration_start': - 'duration_end': - } - """ - return [] diff --git a/ceilometer/storage/impl_mongodb.py b/ceilometer/storage/impl_mongodb.py deleted file mode 100644 index afe07df1..00000000 --- a/ceilometer/storage/impl_mongodb.py +++ /dev/null @@ -1,679 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# Copyright 2014 Red Hat, Inc -# -# Authors: Doug Hellmann -# Julien Danjou -# Eoghan Glynn -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""MongoDB storage backend""" - -import copy -import datetime -import uuid - -import bson.code -import bson.objectid -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import pymongo -import six - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import storage -from ceilometer.storage import base -from ceilometer.storage import models -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer.storage import pymongo_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -AVAILABLE_CAPABILITIES = { - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': {'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}}} -} - - -class Connection(pymongo_base.Connection): - """Put the data into a MongoDB database - - Collections:: - - - meter - - the raw incoming data - - resource - - the metadata for resources - - { _id: uuid of resource, - metadata: metadata dictionaries - user_id: uuid - project_id: uuid - meter: [ array of {counter_name: string, counter_type: string, - counter_unit: string} ] - } - """ - - CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - CONNECTION_POOL = pymongo_utils.ConnectionPool() - - STANDARD_AGGREGATES = dict([(a.name, a) for a in [ - pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, - pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, - pymongo_utils.COUNT_AGGREGATION, - ]]) - - AGGREGATES = dict([(a.name, a) for a in [ - pymongo_utils.SUM_AGGREGATION, - pymongo_utils.AVG_AGGREGATION, - pymongo_utils.MIN_AGGREGATION, - pymongo_utils.MAX_AGGREGATION, - pymongo_utils.COUNT_AGGREGATION, - pymongo_utils.STDDEV_AGGREGATION, - pymongo_utils.CARDINALITY_AGGREGATION, - ]]) - - SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'), - 'asc': (pymongo.ASCENDING, '$gt')} - - MAP_RESOURCES = bson.code.Code(""" - function () { - emit(this.resource_id, - {user_id: this.user_id, - project_id: this.project_id, - source: this.source, - first_timestamp: this.timestamp, - last_timestamp: this.timestamp, - metadata: this.resource_metadata}) - }""") - - REDUCE_RESOURCES = bson.code.Code(""" - function (key, values) { - var merge = {user_id: values[0].user_id, - project_id: values[0].project_id, - source: values[0].source, - first_timestamp: values[0].first_timestamp, - last_timestamp: values[0].last_timestamp, - metadata: values[0].metadata} - values.forEach(function(value) { - if (merge.first_timestamp - value.first_timestamp > 0) { - merge.first_timestamp = value.first_timestamp; - merge.user_id = value.user_id; - merge.project_id = value.project_id; - merge.source = value.source; - } else if (merge.last_timestamp - value.last_timestamp <= 0) { - merge.last_timestamp = value.last_timestamp; - merge.metadata = value.metadata; - } - }); - return merge; - }""") - - _GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1) - _APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31, - hour=23, minute=59, second=59) - - def __init__(self, url): - - # NOTE(jd) Use our own connection pooling on top of the Pymongo one. - # We need that otherwise we overflow the MongoDB instance with new - # connection since we instantiate a Pymongo client each time someone - # requires a new storage connection. - self.conn = self.CONNECTION_POOL.connect(url) - self.version = self.conn.server_info()['versionArray'] - # Require MongoDB 2.4 to use $setOnInsert - if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION: - raise storage.StorageBadVersion( - "Need at least MongoDB %s" % - pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION) - - connection_options = pymongo.uri_parser.parse_uri(url) - self.db = getattr(self.conn, connection_options['database']) - if connection_options.get('username'): - self.db.authenticate(connection_options['username'], - connection_options['password']) - - # NOTE(jd) Upgrading is just about creating index, so let's do this - # on connection to be sure at least the TTL is correctly updated if - # needed. - self.upgrade() - - @staticmethod - def update_ttl(ttl, ttl_index_name, index_field, coll): - """Update or create time_to_live indexes. - - :param ttl: time to live in seconds. - :param ttl_index_name: name of the index we want to update or create. - :param index_field: field with the index that we need to update. - :param coll: collection which indexes need to be updated. - """ - indexes = coll.index_information() - if ttl <= 0: - if ttl_index_name in indexes: - coll.drop_index(ttl_index_name) - return - - if ttl_index_name in indexes: - return coll.database.command( - 'collMod', coll.name, - index={'keyPattern': {index_field: pymongo.ASCENDING}, - 'expireAfterSeconds': ttl}) - - coll.create_index([(index_field, pymongo.ASCENDING)], - expireAfterSeconds=ttl, - name=ttl_index_name) - - def upgrade(self): - # Establish indexes - # - # We need variations for user_id vs. project_id because of the - # way the indexes are stored in b-trees. The user_id and - # project_id values are usually mutually exclusive in the - # queries, so the database won't take advantage of an index - # including both. - - # create collection if not present - if 'resource' not in self.db.conn.collection_names(): - self.db.conn.create_collection('resource') - if 'meter' not in self.db.conn.collection_names(): - self.db.conn.create_collection('meter') - - name_qualifier = dict(user_id='', project_id='project_') - background = dict(user_id=False, project_id=True) - for primary in ['user_id', 'project_id']: - name = 'meter_%sidx' % name_qualifier[primary] - self.db.meter.create_index([ - ('resource_id', pymongo.ASCENDING), - (primary, pymongo.ASCENDING), - ('counter_name', pymongo.ASCENDING), - ('timestamp', pymongo.ASCENDING), - ], name=name, background=background[primary]) - - self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], - name='timestamp_idx') - - # NOTE(ityaptin) This index covers get_resource requests sorting - # and MongoDB uses part of this compound index for different - # queries based on any of user_id, project_id, last_sample_timestamp - # fields - self.db.resource.create_index([('user_id', pymongo.DESCENDING), - ('project_id', pymongo.DESCENDING), - ('last_sample_timestamp', - pymongo.DESCENDING)], - name='resource_user_project_timestamp',) - self.db.resource.create_index([('last_sample_timestamp', - pymongo.DESCENDING)], - name='last_sample_timestamp_idx') - - # update or create time_to_live index - ttl = cfg.CONF.database.metering_time_to_live - self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter) - self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp', - self.db.resource) - - def clear(self): - self.conn.drop_database(self.db.name) - # Connection will be reopened automatically if needed - self.conn.close() - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - # Record the updated resource metadata - we use $setOnInsert to - # unconditionally insert sample timestamps and resource metadata - # (in the update case, this must be conditional on the sample not - # being out-of-order) - data = copy.deepcopy(data) - data['resource_metadata'] = pymongo_utils.improve_keys( - data.pop('resource_metadata')) - resource = self.db.resource.find_one_and_update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - 'source': data['source'], - }, - '$setOnInsert': {'metadata': data['resource_metadata'], - 'first_sample_timestamp': data['timestamp'], - 'last_sample_timestamp': data['timestamp'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - 'counter_unit': data['counter_unit'], - }, - }, - }, - upsert=True, - return_document=pymongo.ReturnDocument.AFTER, - ) - - # only update last sample timestamp if actually later (the usual - # in-order case) - last_sample_timestamp = resource.get('last_sample_timestamp') - if (last_sample_timestamp is None or - last_sample_timestamp <= data['timestamp']): - self.db.resource.update_one( - {'_id': data['resource_id']}, - {'$set': {'metadata': data['resource_metadata'], - 'last_sample_timestamp': data['timestamp']}} - ) - - # only update first sample timestamp if actually earlier (the unusual - # out-of-order case) - # NOTE: a null first sample timestamp is not updated as this indicates - # a pre-existing resource document dating from before we started - # recording these timestamps in the resource collection - first_sample_timestamp = resource.get('first_sample_timestamp') - if (first_sample_timestamp is not None and - first_sample_timestamp > data['timestamp']): - self.db.resource.update_one( - {'_id': data['resource_id']}, - {'$set': {'first_sample_timestamp': data['timestamp']}} - ) - - # Record the raw data for the meter. Use a copy so we do not - # modify a data structure owned by our caller (the driver adds - # a new key '_id'). - record = copy.copy(data) - record['recorded_at'] = timeutils.utcnow() - - self.db.meter.insert_one(record) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs with native MongoDB time-to-live feature. - """ - LOG.debug("Clearing expired metering data is based on native " - "MongoDB time to live feature and going in background.") - - @classmethod - def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): - """Returns a sort_instruction and paging operator. - - Sort instructions are used in the query to determine what attributes - to sort on and what direction to use. - :param q: The query dict passed in. - :param sort_keys: array of attributes by which results be sorted. - :param sort_dir: direction in which results be sorted (asc, desc). - :return: sort instructions and paging operator - """ - sort_keys = sort_keys or [] - sort_instructions = [] - _sort_dir, operation = cls.SORT_OPERATION_MAPPING.get( - sort_dir, cls.SORT_OPERATION_MAPPING['desc']) - - for _sort_key in sort_keys: - _instruction = (_sort_key, _sort_dir) - sort_instructions.append(_instruction) - - return sort_instructions, operation - - def _get_time_constrained_resources(self, query, - start_timestamp, start_timestamp_op, - end_timestamp, end_timestamp_op, - metaquery, resource, limit): - """Return an iterable of models.Resource instances - - Items are constrained by sample timestamp. - :param query: project/user/source query - :param start_timestamp: modified timestamp start range. - :param start_timestamp_op: start time operator, like gt, ge. - :param end_timestamp: modified timestamp end range. - :param end_timestamp_op: end time operator, like lt, le. - :param metaquery: dict with metadata to match on. - :param resource: resource filter. - """ - if resource is not None: - query['resource_id'] = resource - - # Add resource_ prefix so it matches the field in the db - query.update(dict(('resource_' + k, v) - for (k, v) in six.iteritems(metaquery))) - - # FIXME(dhellmann): This may not perform very well, - # but doing any better will require changing the database - # schema and that will need more thought than I have time - # to put into it today. - # Look for resources matching the above criteria and with - # samples in the time range we care about, then change the - # resource query to return just those resources by id. - ts_range = pymongo_utils.make_timestamp_range(start_timestamp, - end_timestamp, - start_timestamp_op, - end_timestamp_op) - if ts_range: - query['timestamp'] = ts_range - - sort_keys = base._handle_sort_key('resource') - sort_instructions = self._build_sort_instructions(sort_keys)[0] - - # use a unique collection name for the results collection, - # as result post-sorting (as oppposed to reduce pre-sorting) - # is not possible on an inline M-R - out = 'resource_list_%s' % uuid.uuid4() - self.db.meter.map_reduce(self.MAP_RESOURCES, - self.REDUCE_RESOURCES, - out=out, - sort={'resource_id': 1}, - query=query) - - try: - if limit is not None: - results = self.db[out].find(sort=sort_instructions, - limit=limit) - else: - results = self.db[out].find(sort=sort_instructions) - for r in results: - resource = r['value'] - yield models.Resource( - resource_id=r['_id'], - user_id=resource['user_id'], - project_id=resource['project_id'], - first_sample_timestamp=resource['first_timestamp'], - last_sample_timestamp=resource['last_timestamp'], - source=resource['source'], - metadata=pymongo_utils.unquote_keys(resource['metadata'])) - finally: - self.db[out].drop() - - def _get_floating_resources(self, query, metaquery, resource, limit): - """Return an iterable of models.Resource instances - - Items are unconstrained by timestamp. - :param query: project/user/source query - :param metaquery: dict with metadata to match on. - :param resource: resource filter. - """ - if resource is not None: - query['_id'] = resource - - query.update(dict((k, v) - for (k, v) in six.iteritems(metaquery))) - - keys = base._handle_sort_key('resource') - sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i - for i in keys] - sort_instructions = self._build_sort_instructions(sort_keys)[0] - - if limit is not None: - results = self.db.resource.find(query, sort=sort_instructions, - limit=limit) - else: - results = self.db.resource.find(query, sort=sort_instructions) - - for r in results: - yield models.Resource( - resource_id=r['_id'], - user_id=r['user_id'], - project_id=r['project_id'], - first_sample_timestamp=r.get('first_sample_timestamp', - self._GENESIS), - last_sample_timestamp=r.get('last_sample_timestamp', - self._APOCALYPSE), - source=r['source'], - metadata=pymongo_utils.unquote_keys(r['metadata'])) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} - - query = {} - if user is not None: - query['user_id'] = user - if project is not None: - query['project_id'] = project - if source is not None: - query['source'] = source - - if start_timestamp or end_timestamp: - return self._get_time_constrained_resources(query, - start_timestamp, - start_timestamp_op, - end_timestamp, - end_timestamp_op, - metaquery, resource, - limit) - else: - return self._get_floating_resources(query, metaquery, resource, - limit) - - @staticmethod - def _make_period_dict(period, first_ts): - """Create a period field for _id of grouped fields. - - :param period: Period duration in seconds - :param first_ts: First timestamp for first period - :return: - """ - if period >= 0: - period_unique_dict = { - "period_start": - { - "$divide": [ - {"$subtract": [ - {"$subtract": ["$timestamp", - first_ts]}, - {"$mod": [{"$subtract": ["$timestamp", - first_ts]}, - period * 1000] - } - ]}, - period * 1000 - ] - } - - } - else: - # Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older). - # Since 2.6+ we could use $literal operator - period_unique_dict = {"$period_start": {"$add": [0, 0]}} - return period_unique_dict - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of models.Statistics instance. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - """ - # NOTE(zqfan): We already have checked at API level, but - # still leave it here in case of directly storage calls. - if aggregate: - for a in aggregate: - if a.func not in self.AGGREGATES: - msg = _('Invalid aggregation function: %s') % a.func - raise storage.StorageBadAggregate(msg) - - if (groupby and set(groupby) - - set(['user_id', 'project_id', 'resource_id', 'source', - 'resource_metadata.instance_type'])): - raise ceilometer.NotImplementedError( - "Unable to group by these fields") - q = pymongo_utils.make_query_from_filter(sample_filter) - - group_stage = {} - project_stage = { - "unit": "$_id.unit", - "name": "$_id.name", - "first_timestamp": "$first_timestamp", - "last_timestamp": "$last_timestamp", - "period_start": "$_id.period_start", - } - - # Add timestamps to $group stage - group_stage.update({"first_timestamp": {"$min": "$timestamp"}, - "last_timestamp": {"$max": "$timestamp"}}) - - # Define a _id field for grouped documents - unique_group_field = {"name": "$counter_name", - "unit": "$counter_unit"} - - # Define a first timestamp for periods - if sample_filter.start_timestamp: - first_timestamp = sample_filter.start_timestamp - else: - first_timestamp_cursor = self.db.meter.find( - limit=1, sort=[('timestamp', - pymongo.ASCENDING)]) - if first_timestamp_cursor.count(): - first_timestamp = first_timestamp_cursor[0]['timestamp'] - else: - first_timestamp = utils.EPOCH_TIME - - # Add a start_period field to unique identifier of grouped documents - if period: - period_dict = self._make_period_dict(period, - first_timestamp) - unique_group_field.update(period_dict) - - # Add a groupby fields to unique identifier of grouped documents - if groupby: - unique_group_field.update(dict((field.replace(".", "/"), - "$%s" % field) - for field in groupby)) - - group_stage.update({"_id": unique_group_field}) - - self._compile_aggregate_stages(aggregate, group_stage, project_stage) - - # Aggregation stages list. It's work one by one and uses documents - # from previous stages. - aggregation_query = [{'$match': q}, - {"$sort": {"timestamp": 1}}, - {"$group": group_stage}, - {"$sort": {"_id.period_start": 1}}, - {"$project": project_stage}] - - # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0 - results = self.db.meter.aggregate(aggregation_query, - **self._make_aggregation_params()) - return [self._stats_result_to_model(point, groupby, aggregate, - period, first_timestamp) - for point in self._get_results(results)] - - def _stats_result_aggregates(self, result, aggregate): - stats_args = {} - for attr, func in Connection.STANDARD_AGGREGATES.items(): - if attr in result: - stats_args.update(func.finalize(result, - version_array=self.version)) - - if aggregate: - stats_args['aggregate'] = {} - for agr in aggregate: - stats_args['aggregate'].update( - Connection.AGGREGATES[agr.func].finalize( - result, agr.param, self.version)) - return stats_args - - def _stats_result_to_model(self, result, groupby, aggregate, period, - first_timestamp): - if period is None: - period = 0 - first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp) - stats_args = self._stats_result_aggregates(result, aggregate) - - stats_args['unit'] = result['unit'] - stats_args['duration'] = (result["last_timestamp"] - - result["first_timestamp"]).total_seconds() - stats_args['duration_start'] = result['first_timestamp'] - stats_args['duration_end'] = result['last_timestamp'] - stats_args['period'] = period - start = result.get("period_start", 0) * period - - stats_args['period_start'] = (first_timestamp + - datetime.timedelta(seconds=start)) - stats_args['period_end'] = (first_timestamp + - datetime.timedelta(seconds=start + period) - if period else result['last_timestamp']) - - stats_args['groupby'] = ( - dict((g, result['_id'].get(g.replace(".", "/"))) - for g in groupby) if groupby else None) - return models.Statistics(**stats_args) - - def _compile_aggregate_stages(self, aggregate, group_stage, project_stage): - if not aggregate: - for aggregation in Connection.STANDARD_AGGREGATES.values(): - group_stage.update( - aggregation.group(version_array=self.version) - ) - project_stage.update( - aggregation.project( - version_array=self.version - ) - ) - else: - for description in aggregate: - aggregation = Connection.AGGREGATES.get(description.func) - if aggregation: - if not aggregation.validate(description.param): - raise storage.StorageBadAggregate( - 'Bad aggregate: %s.%s' % (description.func, - description.param)) - group_stage.update( - aggregation.group(description.param, - version_array=self.version) - ) - project_stage.update( - aggregation.project(description.param, - version_array=self.version) - ) - - @staticmethod - def _get_results(results): - if isinstance(results, dict): - return results.get('result', []) - else: - return results - - def _make_aggregation_params(self): - if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION: - return {"allowDiskUse": True} - return {} diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py deleted file mode 100644 index 2b6c9af9..00000000 --- a/ceilometer/storage/impl_sqlalchemy.py +++ /dev/null @@ -1,822 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -from __future__ import absolute_import -import datetime -import hashlib -import os - -from oslo_config import cfg -from oslo_db import api -from oslo_db import exception as dbexc -from oslo_db.sqlalchemy import session as db_session -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import six -import sqlalchemy as sa -from sqlalchemy import and_ -from sqlalchemy import distinct -from sqlalchemy import func -from sqlalchemy.orm import aliased -from sqlalchemy.sql.expression import cast - -import ceilometer -from ceilometer.i18n import _, _LI -from ceilometer import storage -from ceilometer.storage import base -from ceilometer.storage import models as api_models -from ceilometer.storage.sqlalchemy import models -from ceilometer.storage.sqlalchemy import utils as sql_utils -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -STANDARD_AGGREGATES = dict( - avg=func.avg(models.Sample.volume).label('avg'), - sum=func.sum(models.Sample.volume).label('sum'), - min=func.min(models.Sample.volume).label('min'), - max=func.max(models.Sample.volume).label('max'), - count=func.count(models.Sample.volume).label('count') -) - -UNPARAMETERIZED_AGGREGATES = dict( - stddev=func.stddev_pop(models.Sample.volume).label('stddev') -) - -PARAMETERIZED_AGGREGATES = dict( - validate=dict( - cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id'] - ), - compute=dict( - cardinality=lambda p: func.count( - distinct(getattr(models.Resource, p)) - ).label('cardinality/%s' % p) - ) -) - -AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -def apply_metaquery_filter(session, query, metaquery): - """Apply provided metaquery filter to existing query. - - :param session: session used for original query - :param query: Query instance - :param metaquery: dict with metadata to match on. - """ - for k, value in six.iteritems(metaquery): - key = k[9:] # strip out 'metadata.' prefix - try: - _model = sql_utils.META_TYPE_MAP[type(value)] - except KeyError: - raise ceilometer.NotImplementedError( - 'Query on %(key)s is of %(value)s ' - 'type and is not supported' % - {"key": k, "value": type(value)}) - else: - meta_alias = aliased(_model) - on_clause = and_(models.Resource.internal_id == meta_alias.id, - meta_alias.meta_key == key) - # outer join is needed to support metaquery - # with or operator on non existent metadata field - # see: test_query_non_existing_metadata_with_result - # test case. - query = query.outerjoin(meta_alias, on_clause) - query = query.filter(meta_alias.value == value) - - return query - - -def make_query_from_filter(session, query, sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param session: session used for original query - :param query: Query instance - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - - if sample_filter.meter: - query = query.filter(models.Meter.name == sample_filter.meter) - elif require_meter: - raise RuntimeError('Missing required meter specifier') - if sample_filter.source: - query = query.filter( - models.Resource.source_id == sample_filter.source) - if sample_filter.start_timestamp: - ts_start = sample_filter.start_timestamp - if sample_filter.start_timestamp_op == 'gt': - query = query.filter(models.Sample.timestamp > ts_start) - else: - query = query.filter(models.Sample.timestamp >= ts_start) - if sample_filter.end_timestamp: - ts_end = sample_filter.end_timestamp - if sample_filter.end_timestamp_op == 'le': - query = query.filter(models.Sample.timestamp <= ts_end) - else: - query = query.filter(models.Sample.timestamp < ts_end) - if sample_filter.user: - if sample_filter.user == 'None': - sample_filter.user = None - query = query.filter(models.Resource.user_id == sample_filter.user) - if sample_filter.project: - if sample_filter.project == 'None': - sample_filter.project = None - query = query.filter( - models.Resource.project_id == sample_filter.project) - if sample_filter.resource: - query = query.filter( - models.Resource.resource_id == sample_filter.resource) - if sample_filter.message_id: - query = query.filter( - models.Sample.message_id == sample_filter.message_id) - - if sample_filter.metaquery: - query = apply_metaquery_filter(session, query, - sample_filter.metaquery) - - return query - - -class Connection(base.Connection): - """Put the data into a SQLAlchemy database. - - Tables:: - - - meter - - meter definition - - { id: meter id - name: meter name - type: meter type - unit: meter unit - } - - resource - - resource definition - - { internal_id: resource id - resource_id: resource uuid - user_id: user uuid - project_id: project uuid - source_id: source id - resource_metadata: metadata dictionary - metadata_hash: metadata dictionary hash - } - - sample - - the raw incoming data - - { id: sample id - meter_id: meter id (->meter.id) - resource_id: resource id (->resource.internal_id) - volume: sample volume - timestamp: datetime - recorded_at: datetime - message_signature: message signature - message_id: message uuid - } - """ - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - - def __init__(self, url): - # Set max_retries to 0, since oslo.db in certain cases may attempt - # to retry making the db connection retried max_retries ^ 2 times - # in failure case and db reconnection has already been implemented - # in storage.__init__.get_connection_from_config function - options = dict(cfg.CONF.database.items()) - options['max_retries'] = 0 - # oslo.db doesn't support options defined by Ceilometer - for opt in storage.OPTS: - options.pop(opt.name, None) - self._engine_facade = db_session.EngineFacade(url, **options) - - def upgrade(self): - # NOTE(gordc): to minimise memory, only import migration when needed - from oslo_db.sqlalchemy import migration - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', 'migrate_repo') - migration.db_sync(self._engine_facade.get_engine(), path) - - def clear(self): - engine = self._engine_facade.get_engine() - for table in reversed(models.Base.metadata.sorted_tables): - engine.execute(table.delete()) - engine.dispose() - - @staticmethod - def _create_meter(conn, name, type, unit): - # TODO(gordc): implement lru_cache to improve performance - try: - meter = models.Meter.__table__ - trans = conn.begin_nested() - if conn.dialect.name == 'sqlite': - trans = conn.begin() - with trans: - meter_row = conn.execute( - sa.select([meter.c.id]) - .where(sa.and_(meter.c.name == name, - meter.c.type == type, - meter.c.unit == unit))).first() - meter_id = meter_row[0] if meter_row else None - if meter_id is None: - result = conn.execute(meter.insert(), name=name, - type=type, unit=unit) - meter_id = result.inserted_primary_key[0] - except dbexc.DBDuplicateEntry: - # retry function to pick up duplicate committed object - meter_id = Connection._create_meter(conn, name, type, unit) - - return meter_id - - @staticmethod - def _create_resource(conn, res_id, user_id, project_id, source_id, - rmeta): - # TODO(gordc): implement lru_cache to improve performance - try: - res = models.Resource.__table__ - m_hash = jsonutils.dumps(rmeta, sort_keys=True) - if six.PY3: - m_hash = m_hash.encode('utf-8') - m_hash = hashlib.md5(m_hash).hexdigest() - trans = conn.begin_nested() - if conn.dialect.name == 'sqlite': - trans = conn.begin() - with trans: - res_row = conn.execute( - sa.select([res.c.internal_id]) - .where(sa.and_(res.c.resource_id == res_id, - res.c.user_id == user_id, - res.c.project_id == project_id, - res.c.source_id == source_id, - res.c.metadata_hash == m_hash))).first() - internal_id = res_row[0] if res_row else None - if internal_id is None: - result = conn.execute(res.insert(), resource_id=res_id, - user_id=user_id, - project_id=project_id, - source_id=source_id, - resource_metadata=rmeta, - metadata_hash=m_hash) - internal_id = result.inserted_primary_key[0] - if rmeta and isinstance(rmeta, dict): - meta_map = {} - for key, v in utils.dict_to_keyval(rmeta): - try: - _model = sql_utils.META_TYPE_MAP[type(v)] - if meta_map.get(_model) is None: - meta_map[_model] = [] - meta_map[_model].append( - {'id': internal_id, 'meta_key': key, - 'value': v}) - except KeyError: - LOG.warning(_("Unknown metadata type. Key " - "(%s) will not be queryable."), - key) - for _model in meta_map.keys(): - conn.execute(_model.__table__.insert(), - meta_map[_model]) - - except dbexc.DBDuplicateEntry: - # retry function to pick up duplicate committed object - internal_id = Connection._create_resource( - conn, res_id, user_id, project_id, source_id, rmeta) - - return internal_id - - @api.wrap_db_retry(retry_interval=cfg.CONF.database.retry_interval, - max_retries=cfg.CONF.database.max_retries, - retry_on_deadlock=True) - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - engine = self._engine_facade.get_engine() - with engine.begin() as conn: - # Record the raw data for the sample. - m_id = self._create_meter(conn, - data['counter_name'], - data['counter_type'], - data['counter_unit']) - res_id = self._create_resource(conn, - data['resource_id'], - data['user_id'], - data['project_id'], - data['source'], - data['resource_metadata']) - sample = models.Sample.__table__ - conn.execute(sample.insert(), meter_id=m_id, - resource_id=res_id, - timestamp=data['timestamp'], - volume=data['counter_volume'], - message_signature=data['message_signature'], - message_id=data['message_id']) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - :param ttl: Number of seconds to keep records for. - """ - # Prevent database deadlocks from occurring by - # using separate transaction for each delete - session = self._engine_facade.get_session() - with session.begin(): - end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) - sample_q = (session.query(models.Sample) - .filter(models.Sample.timestamp < end)) - rows = sample_q.delete() - LOG.info(_LI("%d samples removed from database"), rows) - - if not cfg.CONF.sql_expire_samples_only: - with session.begin(): - # remove Meter definitions with no matching samples - (session.query(models.Meter) - .filter(~models.Meter.samples.any()) - .delete(synchronize_session=False)) - - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(~models.Resource.samples.any())) - # mark resource with no matching samples for delete - resource_q.update({models.Resource.metadata_hash: "delete_" - + cast(models.Resource.internal_id, - sa.String)}, - synchronize_session=False) - - # remove metadata of resources marked for delete - for table in [models.MetaText, models.MetaBigInt, - models.MetaFloat, models.MetaBool]: - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(models.Resource.metadata_hash - .like('delete_%'))) - resource_subq = resource_q.subquery() - (session.query(table) - .filter(table.id.in_(resource_subq)) - .delete(synchronize_session=False)) - - # remove resource marked for delete - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(models.Resource.metadata_hash - .like('delete_%'))) - resource_q.delete(synchronize_session=False) - LOG.info(_LI("Expired residual resource and" - " meter definition data")) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of api_models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - s_filter = storage.SampleFilter(user=user, - project=project, - source=source, - start_timestamp=start_timestamp, - start_timestamp_op=start_timestamp_op, - end_timestamp=end_timestamp, - end_timestamp_op=end_timestamp_op, - metaquery=metaquery, - resource=resource) - - session = self._engine_facade.get_session() - # get list of resource_ids - has_timestamp = start_timestamp or end_timestamp - # NOTE: When sql_expire_samples_only is enabled, there will be some - # resources without any sample, in such case we should use inner - # join on sample table to avoid wrong result. - if cfg.CONF.sql_expire_samples_only or has_timestamp: - res_q = session.query(distinct(models.Resource.resource_id)).join( - models.Sample, - models.Sample.resource_id == models.Resource.internal_id) - else: - res_q = session.query(distinct(models.Resource.resource_id)) - res_q = make_query_from_filter(session, res_q, s_filter, - require_meter=False) - res_q = res_q.limit(limit) if limit else res_q - for res_id in res_q.all(): - - # get max and min sample timestamp value - min_max_q = (session.query(func.max(models.Sample.timestamp) - .label('max_timestamp'), - func.min(models.Sample.timestamp) - .label('min_timestamp')) - .join(models.Resource, - models.Resource.internal_id == - models.Sample.resource_id) - .filter(models.Resource.resource_id == - res_id[0])) - - min_max_q = make_query_from_filter(session, min_max_q, s_filter, - require_meter=False) - - min_max = min_max_q.first() - - # get resource details for latest sample - res_q = (session.query(models.Resource.resource_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.source_id, - models.Resource.resource_metadata) - .join(models.Sample, - models.Sample.resource_id == - models.Resource.internal_id) - .filter(models.Sample.timestamp == - min_max.max_timestamp) - .filter(models.Resource.resource_id == - res_id[0]) - .order_by(models.Sample.id.desc()).limit(1)) - - res = res_q.first() - - yield api_models.Resource( - resource_id=res.resource_id, - project_id=res.project_id, - first_sample_timestamp=min_max.min_timestamp, - last_sample_timestamp=min_max.max_timestamp, - source=res.source_id, - user_id=res.user_id, - metadata=res.resource_metadata - ) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of api_models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional ID of the resource. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - s_filter = storage.SampleFilter(user=user, - project=project, - source=source, - metaquery=metaquery, - resource=resource) - - # NOTE(gordc): get latest sample of each meter/resource. we do not - # filter here as we want to filter only on latest record. - session = self._engine_facade.get_session() - - subq = session.query(func.max(models.Sample.id).label('id')).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - - if unique: - subq = subq.group_by(models.Sample.meter_id) - else: - subq = subq.group_by(models.Sample.meter_id, - models.Resource.resource_id) - - if resource: - subq = subq.filter(models.Resource.resource_id == resource) - subq = subq.subquery() - - # get meter details for samples. - query_sample = (session.query(models.Sample.meter_id, - models.Meter.name, models.Meter.type, - models.Meter.unit, - models.Resource.resource_id, - models.Resource.project_id, - models.Resource.source_id, - models.Resource.user_id).join( - subq, subq.c.id == models.Sample.id) - .join(models.Meter, models.Meter.id == models.Sample.meter_id) - .join(models.Resource, - models.Resource.internal_id == models.Sample.resource_id)) - query_sample = make_query_from_filter(session, query_sample, s_filter, - require_meter=False) - - query_sample = query_sample.limit(limit) if limit else query_sample - - if unique: - for row in query_sample.all(): - yield api_models.Meter( - name=row.name, - type=row.type, - unit=row.unit, - resource_id=None, - project_id=None, - source=None, - user_id=None) - else: - for row in query_sample.all(): - yield api_models.Meter( - name=row.name, - type=row.type, - unit=row.unit, - resource_id=row.resource_id, - project_id=row.project_id, - source=row.source_id, - user_id=row.user_id) - - @staticmethod - def _retrieve_samples(query): - samples = query.all() - - for s in samples: - # Remove the id generated by the database when - # the sample was inserted. It is an implementation - # detail that should not leak outside of the driver. - yield api_models.Sample( - source=s.source_id, - counter_name=s.counter_name, - counter_type=s.counter_type, - counter_unit=s.counter_unit, - counter_volume=s.counter_volume, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - recorded_at=s.recorded_at, - resource_metadata=s.resource_metadata, - message_id=s.message_id, - message_signature=s.message_signature, - ) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of api_models.Samples. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return [] - - session = self._engine_facade.get_session() - query = session.query(models.Sample.timestamp, - models.Sample.recorded_at, - models.Sample.message_id, - models.Sample.message_signature, - models.Sample.volume.label('counter_volume'), - models.Meter.name.label('counter_name'), - models.Meter.type.label('counter_type'), - models.Meter.unit.label('counter_unit'), - models.Resource.source_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.resource_metadata, - models.Resource.resource_id).join( - models.Meter, models.Meter.id == models.Sample.meter_id).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id).order_by( - models.Sample.timestamp.desc()) - query = make_query_from_filter(session, query, sample_filter, - require_meter=False) - if limit: - query = query.limit(limit) - return self._retrieve_samples(query) - - def query_samples(self, filter_expr=None, orderby=None, limit=None): - if limit == 0: - return [] - - session = self._engine_facade.get_session() - engine = self._engine_facade.get_engine() - query = session.query(models.Sample.timestamp, - models.Sample.recorded_at, - models.Sample.message_id, - models.Sample.message_signature, - models.Sample.volume.label('counter_volume'), - models.Meter.name.label('counter_name'), - models.Meter.type.label('counter_type'), - models.Meter.unit.label('counter_unit'), - models.Resource.source_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.resource_metadata, - models.Resource.resource_id).join( - models.Meter, models.Meter.id == models.Sample.meter_id).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - transformer = sql_utils.QueryTransformer(models.FullSample, query, - dialect=engine.dialect.name) - if filter_expr is not None: - transformer.apply_filter(filter_expr) - - transformer.apply_options(orderby, limit) - return self._retrieve_samples(transformer.get_query()) - - @staticmethod - def _get_aggregate_functions(aggregate): - if not aggregate: - return [f for f in STANDARD_AGGREGATES.values()] - - functions = [] - - for a in aggregate: - if a.func in STANDARD_AGGREGATES: - functions.append(STANDARD_AGGREGATES[a.func]) - elif a.func in UNPARAMETERIZED_AGGREGATES: - functions.append(UNPARAMETERIZED_AGGREGATES[a.func]) - elif a.func in PARAMETERIZED_AGGREGATES['compute']: - validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func) - if not (validate and validate(a.param)): - raise storage.StorageBadAggregate('Bad aggregate: %s.%s' - % (a.func, a.param)) - compute = PARAMETERIZED_AGGREGATES['compute'][a.func] - functions.append(compute(a.param)) - else: - # NOTE(zqfan): We already have checked at API level, but - # still leave it here in case of directly storage calls. - msg = _('Invalid aggregation function: %s') % a.func - raise storage.StorageBadAggregate(msg) - - return functions - - def _make_stats_query(self, sample_filter, groupby, aggregate): - - select = [ - func.min(models.Sample.timestamp).label('tsmin'), - func.max(models.Sample.timestamp).label('tsmax'), - models.Meter.unit - ] - select.extend(self._get_aggregate_functions(aggregate)) - - session = self._engine_facade.get_session() - - if groupby: - group_attributes = [] - for g in groupby: - if g != 'resource_metadata.instance_type': - group_attributes.append(getattr(models.Resource, g)) - else: - group_attributes.append( - getattr(models.MetaText, 'value') - .label('resource_metadata.instance_type')) - - select.extend(group_attributes) - - query = ( - session.query(*select) - .join(models.Meter, - models.Meter.id == models.Sample.meter_id) - .join(models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - .group_by(models.Meter.unit)) - - if groupby: - for g in groupby: - if g == 'resource_metadata.instance_type': - query = query.join( - models.MetaText, - models.Resource.internal_id == models.MetaText.id) - query = query.filter( - models.MetaText.meta_key == 'instance_type') - query = query.group_by(*group_attributes) - - return make_query_from_filter(session, query, sample_filter) - - @staticmethod - def _stats_result_aggregates(result, aggregate): - stats_args = {} - if isinstance(result.count, six.integer_types): - stats_args['count'] = result.count - for attr in ['min', 'max', 'sum', 'avg']: - if hasattr(result, attr): - stats_args[attr] = getattr(result, attr) - if aggregate: - stats_args['aggregate'] = {} - for a in aggregate: - key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') - stats_args['aggregate'][key] = getattr(result, key) - return stats_args - - @staticmethod - def _stats_result_to_model(result, period, period_start, - period_end, groupby, aggregate): - stats_args = Connection._stats_result_aggregates(result, aggregate) - stats_args['unit'] = result.unit - duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) - if result.tsmin is not None and result.tsmax is not None - else None) - stats_args['duration'] = duration - stats_args['duration_start'] = result.tsmin - stats_args['duration_end'] = result.tsmax - stats_args['period'] = period - stats_args['period_start'] = period_start - stats_args['period_end'] = period_end - stats_args['groupby'] = (dict( - (g, getattr(result, g)) for g in groupby) if groupby else None) - return api_models.Statistics(**stats_args) - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of api_models.Statistics instances. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - """ - if groupby: - for group in groupby: - if group not in ['user_id', 'project_id', 'resource_id', - 'resource_metadata.instance_type']: - raise ceilometer.NotImplementedError('Unable to group by ' - 'these fields') - - if not period: - for res in self._make_stats_query(sample_filter, - groupby, - aggregate): - if res.count: - yield self._stats_result_to_model(res, 0, - res.tsmin, res.tsmax, - groupby, - aggregate) - return - - if not (sample_filter.start_timestamp and sample_filter.end_timestamp): - res = self._make_stats_query(sample_filter, - None, - aggregate).first() - if not res: - # NOTE(liusheng):The 'res' may be NoneType, because no - # sample has found with sample filter(s). - return - - query = self._make_stats_query(sample_filter, groupby, aggregate) - # HACK(jd) This is an awful method to compute stats by period, but - # since we're trying to be SQL agnostic we have to write portable - # code, so here it is, admire! We're going to do one request to get - # stats by period. We would like to use GROUP BY, but there's no - # portable way to manipulate timestamp in SQL, so we can't. - for period_start, period_end in base.iter_period( - sample_filter.start_timestamp or res.tsmin, - sample_filter.end_timestamp or res.tsmax, - period): - q = query.filter(models.Sample.timestamp >= period_start) - q = q.filter(models.Sample.timestamp < period_end) - for r in q.all(): - if r.count: - yield self._stats_result_to_model( - result=r, - period=int(timeutils.delta_seconds(period_start, - period_end)), - period_start=period_start, - period_end=period_end, - groupby=groupby, - aggregate=aggregate - ) diff --git a/ceilometer/storage/models.py b/ceilometer/storage/models.py deleted file mode 100644 index 816a4c5d..00000000 --- a/ceilometer/storage/models.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Model classes for use in the storage API. -""" -from ceilometer.storage import base - - -class Resource(base.Model): - """Something for which sample data has been collected.""" - - def __init__(self, resource_id, project_id, - first_sample_timestamp, - last_sample_timestamp, - source, user_id, metadata): - """Create a new resource. - - :param resource_id: UUID of the resource - :param project_id: UUID of project owning the resource - :param first_sample_timestamp: first sample timestamp captured - :param last_sample_timestamp: last sample timestamp captured - :param source: the identifier for the user/project id definition - :param user_id: UUID of user owning the resource - :param metadata: most current metadata for the resource (a dict) - """ - base.Model.__init__(self, - resource_id=resource_id, - first_sample_timestamp=first_sample_timestamp, - last_sample_timestamp=last_sample_timestamp, - project_id=project_id, - source=source, - user_id=user_id, - metadata=metadata, - ) - - -class Meter(base.Model): - """Definition of a meter for which sample data has been collected.""" - - def __init__(self, name, type, unit, resource_id, project_id, source, - user_id): - """Create a new meter. - - :param name: name of the meter - :param type: type of the meter (gauge, delta, cumulative) - :param unit: unit of the meter - :param resource_id: UUID of the resource - :param project_id: UUID of project owning the resource - :param source: the identifier for the user/project id definition - :param user_id: UUID of user owning the resource - """ - base.Model.__init__(self, - name=name, - type=type, - unit=unit, - resource_id=resource_id, - project_id=project_id, - source=source, - user_id=user_id, - ) - - -class Sample(base.Model): - """One collected data point.""" - def __init__(self, - source, - counter_name, counter_type, counter_unit, counter_volume, - user_id, project_id, resource_id, - timestamp, resource_metadata, - message_id, - message_signature, - recorded_at, - ): - """Create a new sample. - - :param source: the identifier for the user/project id definition - :param counter_name: the name of the measurement being taken - :param counter_type: the type of the measurement - :param counter_unit: the units for the measurement - :param counter_volume: the measured value - :param user_id: the user that triggered the measurement - :param project_id: the project that owns the resource - :param resource_id: the thing on which the measurement was taken - :param timestamp: the time of the measurement - :param resource_metadata: extra details about the resource - :param message_id: a message identifier - :param recorded_at: sample record timestamp - :param message_signature: a hash created from the rest of the - message data - """ - base.Model.__init__(self, - source=source, - counter_name=counter_name, - counter_type=counter_type, - counter_unit=counter_unit, - counter_volume=counter_volume, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=timestamp, - resource_metadata=resource_metadata, - message_id=message_id, - message_signature=message_signature, - recorded_at=recorded_at) - - -class Statistics(base.Model): - """Computed statistics based on a set of sample data.""" - def __init__(self, unit, - period, period_start, period_end, - duration, duration_start, duration_end, - groupby, **data): - """Create a new statistics object. - - :param unit: The unit type of the data set - :param period: The length of the time range covered by these stats - :param period_start: The timestamp for the start of the period - :param period_end: The timestamp for the end of the period - :param duration: The total time for the matching samples - :param duration_start: The earliest time for the matching samples - :param duration_end: The latest time for the matching samples - :param groupby: The fields used to group the samples. - :param data: some or all of the following aggregates - min: The smallest volume found - max: The largest volume found - avg: The average of all volumes found - sum: The total of all volumes found - count: The number of samples found - aggregate: name-value pairs for selectable aggregates - """ - base.Model.__init__(self, unit=unit, - period=period, period_start=period_start, - period_end=period_end, duration=duration, - duration_start=duration_start, - duration_end=duration_end, - groupby=groupby, - **data) diff --git a/ceilometer/storage/mongo/utils.py b/ceilometer/storage/mongo/utils.py index 3c1195a2..2b41872e 100644 --- a/ceilometer/storage/mongo/utils.py +++ b/ceilometer/storage/mongo/utils.py @@ -18,7 +18,6 @@ """Common functions for MongoDB backend """ -import datetime import time import weakref @@ -28,7 +27,6 @@ from oslo_utils import netutils import pymongo import pymongo.errors import six -from six.moves.urllib import parse from ceilometer.i18n import _, _LI @@ -43,13 +41,6 @@ OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'} MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] -FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result) -FINALIZE_INT_LAMBDA = lambda result, param=None: int(result) -CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id', - 'user_id', - 'project_id', - 'source']) - def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): @@ -125,118 +116,6 @@ def make_events_query_from_filter(event_filter): return query -def make_query_from_filter(sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - q = {} - - if sample_filter.user: - q['user_id'] = sample_filter.user - if sample_filter.project: - q['project_id'] = sample_filter.project - - if sample_filter.meter: - q['counter_name'] = sample_filter.meter - elif require_meter: - raise RuntimeError('Missing required meter specifier') - - ts_range = make_timestamp_range(sample_filter.start_timestamp, - sample_filter.end_timestamp, - sample_filter.start_timestamp_op, - sample_filter.end_timestamp_op) - - if ts_range: - q['timestamp'] = ts_range - - if sample_filter.resource: - q['resource_id'] = sample_filter.resource - if sample_filter.source: - q['source'] = sample_filter.source - if sample_filter.message_id: - q['message_id'] = sample_filter.message_id - - # so the samples call metadata resource_metadata, so we convert - # to that. - q.update(dict( - ('resource_%s' % k, v) for (k, v) in six.iteritems( - improve_keys(sample_filter.metaquery, metaquery=True)))) - return q - - -def quote_key(key, reverse=False): - """Prepare key for storage data in MongoDB. - - :param key: key that should be quoted - :param reverse: boolean, True --- if we need a reverse order of the keys - parts - :return: iter of quoted part of the key - """ - r = -1 if reverse else 1 - - for k in key.split('.')[::r]: - if k.startswith('$'): - k = parse.quote(k) - yield k - - -def improve_keys(data, metaquery=False): - """Improves keys in dict if they contained '.' or started with '$'. - - :param data: is a dictionary where keys need to be checked and improved - :param metaquery: boolean, if True dots are not escaped from the keys - :return: improved dictionary if keys contained dots or started with '$': - {'a.b': 'v'} -> {'a': {'b': 'v'}} - {'$ab': 'v'} -> {'%24ab': 'v'} - """ - if not isinstance(data, dict): - return data - - if metaquery: - for key in six.iterkeys(data): - if '.$' in key: - key_list = [] - for k in quote_key(key): - key_list.append(k) - new_key = '.'.join(key_list) - data[new_key] = data.pop(key) - else: - for key, value in data.items(): - if isinstance(value, dict): - improve_keys(value) - if '.' in key: - new_dict = {} - for k in quote_key(key, reverse=True): - new = {} - new[k] = new_dict if new_dict else data.pop(key) - new_dict = new - data.update(new_dict) - else: - if key.startswith('$'): - new_key = parse.quote(key) - data[new_key] = data.pop(key) - return data - - -def unquote_keys(data): - """Restores initial view of 'quoted' keys in dictionary data - - :param data: is a dictionary - :return: data with restored keys if they were 'quoted'. - """ - if isinstance(data, dict): - for key, value in data.items(): - if isinstance(value, dict): - unquote_keys(value) - if key.startswith('%24'): - k = parse.unquote(key) - data[k] = data.pop(key) - return data - - class ConnectionPool(object): def __init__(self): @@ -272,130 +151,6 @@ class ConnectionPool(object): raise -class QueryTransformer(object): - - operators = {"<": "$lt", - ">": "$gt", - "<=": "$lte", - "=<": "$lte", - ">=": "$gte", - "=>": "$gte", - "!=": "$ne", - "in": "$in", - "=~": "$regex"} - - complex_operators = {"or": "$or", - "and": "$and"} - - ordering_functions = {"asc": pymongo.ASCENDING, - "desc": pymongo.DESCENDING} - - def transform_orderby(self, orderby): - orderby_filter = [] - - for field in orderby: - field_name = list(field.keys())[0] - ordering = self.ordering_functions[list(field.values())[0]] - orderby_filter.append((field_name, ordering)) - return orderby_filter - - @staticmethod - def _move_negation_to_leaf(condition): - """Moves every not operator to the leafs. - - Moving is going by applying the De Morgan rules and annihilating - double negations. - """ - def _apply_de_morgan(tree, negated_subtree, negated_op): - if negated_op == "and": - new_op = "or" - else: - new_op = "and" - - tree[new_op] = [{"not": child} - for child in negated_subtree[negated_op]] - del tree["not"] - - def transform(subtree): - op = list(subtree.keys())[0] - if op in ["and", "or"]: - [transform(child) for child in subtree[op]] - elif op == "not": - negated_tree = subtree[op] - negated_op = list(negated_tree.keys())[0] - if negated_op == "and": - _apply_de_morgan(subtree, negated_tree, negated_op) - transform(subtree) - elif negated_op == "or": - _apply_de_morgan(subtree, negated_tree, negated_op) - transform(subtree) - elif negated_op == "not": - # two consecutive not annihilates themselves - value = list(negated_tree.values())[0] - new_op = list(value.keys())[0] - subtree[new_op] = negated_tree[negated_op][new_op] - del subtree["not"] - transform(subtree) - - transform(condition) - - def transform_filter(self, condition): - # in Mongo not operator can only be applied to - # simple expressions so we have to move every - # not operator to the leafs of the expression tree - self._move_negation_to_leaf(condition) - return self._process_json_tree(condition) - - def _handle_complex_op(self, complex_op, nodes): - element_list = [] - for node in nodes: - element = self._process_json_tree(node) - element_list.append(element) - complex_operator = self.complex_operators[complex_op] - op = {complex_operator: element_list} - return op - - def _handle_not_op(self, negated_tree): - # assumes that not is moved to the leaf already - # so we are next to a leaf - negated_op = list(negated_tree.keys())[0] - negated_field = list(negated_tree[negated_op].keys())[0] - value = negated_tree[negated_op][negated_field] - if negated_op == "=": - return {negated_field: {"$ne": value}} - elif negated_op == "!=": - return {negated_field: value} - else: - return {negated_field: {"$not": - {self.operators[negated_op]: value}}} - - def _handle_simple_op(self, simple_op, nodes): - field_name = list(nodes.keys())[0] - field_value = list(nodes.values())[0] - - # no operator for equal in Mongo - if simple_op == "=": - op = {field_name: field_value} - return op - - operator = self.operators[simple_op] - op = {field_name: {operator: field_value}} - return op - - def _process_json_tree(self, condition_tree): - operator_node = list(condition_tree.keys())[0] - nodes = list(condition_tree.values())[0] - - if operator_node in self.complex_operators: - return self._handle_complex_op(operator_node, nodes) - - if operator_node == "not": - negated_tree = condition_tree[operator_node] - return self._handle_not_op(negated_tree) - - return self._handle_simple_op(operator_node, nodes) - - def safe_mongo_call(call): def closure(*args, **kwargs): # NOTE(idegtiarov) options max_retries and retry_interval have been @@ -507,135 +262,3 @@ class CursorProxy(pymongo.cursor.Cursor): def __getattr__(self, item): return getattr(self.cursor, item) - - -class AggregationFields(object): - def __init__(self, version, - group, - project, - finalize=None, - parametrized=False, - validate=None): - self._finalize = finalize or FINALIZE_FLOAT_LAMBDA - self.group = lambda *args: group(*args) if parametrized else group - self.project = (lambda *args: project(*args) - if parametrized else project) - self.version = version - self.validate = validate or (lambda name, param: True) - - def finalize(self, name, data, param=None): - field = ("%s" % name) + ("/%s" % param if param else "") - return {field: (self._finalize(data.get(field)) - if self._finalize else data.get(field))} - - -class Aggregation(object): - def __init__(self, name, aggregation_fields): - self.name = name - aggregation_fields = (aggregation_fields - if isinstance(aggregation_fields, list) - else [aggregation_fields]) - self.aggregation_fields = sorted(aggregation_fields, - key=lambda af: getattr(af, "version"), - reverse=True) - - def _get_compatible_aggregation_field(self, version_array): - if version_array: - version_array = version_array[0:2] - else: - version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION - for aggregation_field in self.aggregation_fields: - if version_array >= aggregation_field.version: - return aggregation_field - - def group(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.group(param) - - def project(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.project(param) - - def finalize(self, data, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.finalize(self.name, data, param) - - def validate(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.validate(self.name, param) - -SUM_AGGREGATION = Aggregation( - "sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"sum": {"$sum": "$counter_volume"}}, - {"sum": "$sum"}, - )) -AVG_AGGREGATION = Aggregation( - "avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"avg": {"$avg": "$counter_volume"}}, - {"avg": "$avg"}, - )) -MIN_AGGREGATION = Aggregation( - "min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"min": {"$min": "$counter_volume"}}, - {"min": "$min"}, - )) -MAX_AGGREGATION = Aggregation( - "max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"max": {"$max": "$counter_volume"}}, - {"max": "$max"}, - )) -COUNT_AGGREGATION = Aggregation( - "count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"count": {"$sum": 1}}, - {"count": "$count"}, - FINALIZE_INT_LAMBDA)) -STDDEV_AGGREGATION = Aggregation( - "stddev", - AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"std_square": { - "$sum": { - "$multiply": ["$counter_volume", - "$counter_volume"] - }}, - "std_count": {"$sum": 1}, - "std_sum": {"$sum": "$counter_volume"}}, - {"stddev": { - "count": "$std_count", - "sum": "$std_sum", - "square_sum": "$std_square"}}, - lambda stddev: ((stddev['square_sum'] - * stddev['count'] - - stddev["sum"] ** 2) ** 0.5 - / stddev['count']))) - -CARDINALITY_AGGREGATION = Aggregation( - "cardinality", - # $cond operator available only in MongoDB 2.6+ - [AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION, - lambda field: ({"cardinality/%s" % field: - {"$addToSet": "$%s" % field}}), - lambda field: { - "cardinality/%s" % field: { - "$cond": [ - {"$eq": ["$cardinality/%s" % field, None]}, - 0, - {"$size": "$cardinality/%s" % field}] - }}, - validate=CARDINALITY_VALIDATION, - parametrized=True), - AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - lambda field: ({"cardinality/%s" % field: - {"$addToSet": "$%s" % field}}), - lambda field: ({"cardinality/%s" % field: - "$cardinality/%s" % field}), - finalize=len, - validate=CARDINALITY_VALIDATION, - parametrized=True)] -) - - -def from_unix_timestamp(timestamp): - if (isinstance(timestamp, six.integer_types) or - isinstance(timestamp, float)): - return datetime.datetime.fromtimestamp(timestamp) - return timestamp diff --git a/ceilometer/storage/pymongo_base.py b/ceilometer/storage/pymongo_base.py deleted file mode 100644 index bbb49ac5..00000000 --- a/ceilometer/storage/pymongo_base.py +++ /dev/null @@ -1,178 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common functions for MongoDB backend.""" -import pymongo - -from ceilometer.storage import base -from ceilometer.storage import models -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer import utils - - -COMMON_AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -class Connection(base.Connection): - """Base Connection class for MongoDB driver.""" - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - COMMON_AVAILABLE_CAPABILITIES) - - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - - metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} - - q = {} - if user == 'None': - q['user_id'] = None - elif user is not None: - q['user_id'] = user - if project == 'None': - q['project_id'] = None - elif project is not None: - q['project_id'] = project - if resource == 'None': - q['_id'] = None - elif resource is not None: - q['_id'] = resource - if source is not None: - q['source'] = source - q.update(metaquery) - - count = 0 - if unique: - meter_names = set() - - for r in self.db.resource.find(q): - for r_meter in r['meter']: - if unique: - if r_meter['counter_name'] in meter_names: - continue - else: - meter_names.add(r_meter['counter_name']) - - if limit and count >= limit: - return - else: - count += 1 - - if unique: - yield models.Meter( - name=r_meter['counter_name'], - type=r_meter['counter_type'], - # Return empty string if 'counter_unit' is not valid - # for backward compatibility. - unit=r_meter.get('counter_unit', ''), - resource_id=None, - project_id=None, - source=None, - user_id=None) - else: - yield models.Meter( - name=r_meter['counter_name'], - type=r_meter['counter_type'], - # Return empty string if 'counter_unit' is not valid - # for backward compatibility. - unit=r_meter.get('counter_unit', ''), - resource_id=r['_id'], - project_id=r['project_id'], - source=r['source'], - user_id=r['user_id']) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of model.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return [] - q = pymongo_utils.make_query_from_filter(sample_filter, - require_meter=False) - - return self._retrieve_samples(q, - [("timestamp", pymongo.DESCENDING)], - limit) - - def query_samples(self, filter_expr=None, orderby=None, limit=None): - if limit == 0: - return [] - query_filter = {} - orderby_filter = [("timestamp", pymongo.DESCENDING)] - transformer = pymongo_utils.QueryTransformer() - if orderby is not None: - orderby_filter = transformer.transform_orderby(orderby) - if filter_expr is not None: - query_filter = transformer.transform_filter(filter_expr) - - return self._retrieve_samples(query_filter, orderby_filter, limit) - - def _retrieve_samples(self, query, orderby, limit): - if limit is not None: - samples = self.db.meter.find(query, - limit=limit, - sort=orderby) - else: - samples = self.db.meter.find(query, - sort=orderby) - - for s in samples: - # Remove the ObjectId generated by the database when - # the sample was inserted. It is an implementation - # detail that should not leak outside of the driver. - del s['_id'] - # Backward compatibility for samples without units - s['counter_unit'] = s.get('counter_unit', '') - # Compatibility with MongoDB 3.+ - s['counter_volume'] = float(s.get('counter_volume')) - # Tolerate absence of recorded_at in older datapoints - s['recorded_at'] = s.get('recorded_at') - # Check samples for metadata and "unquote" key if initially it - # was started with '$'. - if s.get('resource_metadata'): - s['resource_metadata'] = pymongo_utils.unquote_keys( - s.get('resource_metadata')) - yield models.Sample(**s) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/README b/ceilometer/storage/sqlalchemy/migrate_repo/README deleted file mode 100644 index 42bddd18..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -sqlalchemy-migrate is DEPRECATED. - -All new migrations should be written using alembic. -Please see ceilometer/storage/sqlalchemy/alembic/README diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py b/ceilometer/storage/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index 39fa3892..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python -from migrate.versioning.shell import main - -if __name__ == '__main__': - main(debug='False') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg b/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index cd16764f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=ceilometer - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] - -# When creating new change scripts, Migrate will stamp the new script with -# a version number. By default this is latest_version + 1. You can set this -# to 'true' to tell Migrate to use the UTC timestamp instead. -use_timestamp_numbering=False diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py deleted file mode 100644 index 1032cb40..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import UniqueConstraint - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - - meter = Table( - 'meter', meta, - Column('id', Integer, primary_key=True, index=True), - Column('counter_name', String(255)), - Column('user_id', String(255), index=True), - Column('project_id', String(255), index=True), - Column('resource_id', String(255)), - Column('resource_metadata', String(5000)), - Column('counter_type', String(255)), - Column('counter_volume', Integer), - Column('counter_duration', Integer), - Column('timestamp', DateTime(timezone=False), index=True), - Column('message_signature', String(1000)), - Column('message_id', String(1000)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - resource = Table( - 'resource', meta, - Column('id', String(255), primary_key=True, index=True), - Column('resource_metadata', String(5000)), - Column('project_id', String(255), index=True), - Column('received_timestamp', DateTime(timezone=False)), - Column('timestamp', DateTime(timezone=False), index=True), - Column('user_id', String(255), index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - user = Table( - 'user', meta, - Column('id', String(255), primary_key=True, index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - project = Table( - 'project', meta, - Column('id', String(255), primary_key=True, index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - sourceassoc = Table( - 'sourceassoc', meta, - Column('source_id', String(255), index=True), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('resource_id', String(255)), - Column('meter_id', Integer), - Index('idx_su', 'source_id', 'user_id'), - Index('idx_sp', 'source_id', 'project_id'), - Index('idx_sr', 'source_id', 'resource_id'), - Index('idx_sm', 'source_id', 'meter_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - source = Table( - 'source', meta, - Column('id', String(255), primary_key=True, index=True), - UniqueConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - tables = [meter, project, resource, user, source, sourceassoc] - for i in sorted(tables, key=lambda table: table.fullname): - i.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py deleted file mode 100644 index 667654ef..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - duration = Column('counter_duration', Integer) - meter.drop_column(duration) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py deleted file mode 100644 index fecd65c5..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2012 Canonical. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - - if migrate_engine.name == "mysql": - tables = ['meter', 'user', 'resource', 'project', 'source', - 'sourceassoc'] - migrate_engine.execute("SET foreign_key_checks = 0") - - for table in tables: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % - migrate_engine.url.database) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py deleted file mode 100644 index ac4b1cb6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - unit = Column('counter_unit', String(255)) - meter.create_column(unit) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py deleted file mode 100644 index d85c7d73..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from sqlalchemy import MetaData, Table, Column, DateTime - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - timestamp = Column('timestamp', DateTime) - resource.drop_column(timestamp) - received_timestamp = Column('received_timestamp', DateTime) - resource.drop_column(received_timestamp) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py deleted file mode 100644 index 36a44846..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Float -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - meter.c.counter_volume.alter(type=Float(53)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py deleted file mode 100644 index 55f7f820..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2013 eNovance -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, Text -from sqlalchemy import Boolean, Integer, String, DateTime, Float - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table( - 'alarm', meta, - Column('id', String(255), primary_key=True, index=True), - Column('enabled', Boolean), - Column('name', Text()), - Column('description', Text()), - Column('timestamp', DateTime(timezone=False)), - Column('counter_name', String(255), index=True), - Column('user_id', String(255), index=True), - Column('project_id', String(255), index=True), - Column('comparison_operator', String(2)), - Column('threshold', Float), - Column('statistic', String(255)), - Column('evaluation_periods', Integer), - Column('period', Integer), - Column('state', String(255)), - Column('state_timestamp', DateTime(timezone=False)), - Column('ok_actions', Text()), - Column('alarm_actions', Text()), - Column('insufficient_data_actions', Text()), - Column('matching_metadata', Text()), - mysql_engine='InnoDB', - mysql_charset='utf8') - alarm.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py deleted file mode 100644 index 68119f4a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import Float -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - - unique_name = Table( - 'unique_name', meta, - Column('id', Integer, primary_key=True), - Column('key', String(32), index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - unique_name.create() - - event = Table( - 'event', meta, - Column('id', Integer, primary_key=True), - Column('generated', Float(asdecimal=True), index=True), - Column('unique_name_id', Integer, ForeignKey('unique_name.id')), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - event.create() - - trait = Table( - 'trait', meta, - Column('id', Integer, primary_key=True), - Column('name_id', Integer, ForeignKey('unique_name.id')), - Column('t_type', Integer, index=True), - Column('t_string', String(32), nullable=True, default=None, - index=True), - Column('t_float', Float, nullable=True, default=None, index=True), - Column('t_int', Integer, nullable=True, default=None, index=True), - Column('t_datetime', Float(asdecimal=True), nullable=True, - default=None, index=True), - Column('event_id', Integer, ForeignKey('event.id')), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py deleted file mode 100644 index b02f781a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import VARCHAR - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - name = Table('unique_name', meta, autoload=True) - name.c.key.alter(type=VARCHAR(length=255)) - trait = Table('trait', meta, autoload=True) - trait.c.t_string.alter(type=VARCHAR(length=255)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py deleted file mode 100644 index 1ca58c6f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, - meter.c.counter_name) - index.create(bind=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py deleted file mode 100644 index f5f2728a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py +++ /dev/null @@ -1,37 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Index, MetaData, Table - - -INDEXES = { - # `table_name`: ((`index_name`, `column`),) - "user": (('ix_user_id', 'id'),), - "source": (('ix_source_id', 'id'),), - "project": (('ix_project_id', 'id'),), - "meter": (('ix_meter_id', 'id'),), - "alarm": (('ix_alarm_id', 'id'),), - "resource": (('ix_resource_id', 'id'),) -} - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in INDEXES.keys()) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for index_name, column in indexes: - index = Index(index_name, table.c[column]) - index.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py deleted file mode 100644 index fa77c311..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table -from sqlalchemy.sql.expression import select - -TABLES = ['resource', 'sourceassoc', 'user', - 'project', 'meter', 'source', 'alarm'] - -INDEXES = { - "resource": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "sourceassoc": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('resource_id', 'resource', 'id'), - ('meter_id', 'meter', 'id'), - ('source_id', 'source', 'id')), - "alarm": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "meter": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('resource_id', 'resource', 'id'),) -} - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - subq = select([getattr(ref_table.c, ref_column_name)]) - sql_del = table.delete().where( - ~ getattr(table.c, column).in_(subq)) - migrate_engine.execute(sql_del) - - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - fkey = ForeignKeyConstraint(**params) - fkey.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py deleted file mode 100644 index c35ba173..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - alarm = Table('alarm', meta, autoload=True) - alarm.c.counter_name.alter(name='meter_name') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py deleted file mode 100644 index f3c0c09f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate.changeset.constraint import UniqueConstraint -import sqlalchemy - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - - event = sqlalchemy.Table('event', meta, autoload=True) - message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) - event.create_column(message_id) - - cons = UniqueConstraint('message_id', table=event) - cons.create() - - index = sqlalchemy.Index('idx_event_message_id', event.c.message_id) - index.create(bind=migrate_engine) - - # Populate the new column ... - trait = sqlalchemy.Table('trait', meta, autoload=True) - unique_name = sqlalchemy.Table('unique_name', meta, autoload=True) - join = trait.join(unique_name, unique_name.c.id == trait.c.name_id) - traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string], - whereclause=(unique_name.c.key == 'message_id'), - from_obj=join) - - for event_id, value in traits.execute(): - (event.update().where(event.c.id == event_id).values(message_id=value). - execute()) - - # Leave the Trait, makes the rollback easier and won't really hurt anyone. diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py deleted file mode 100644 index 9a9f07ad..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table, Column, Index -from sqlalchemy import String, DateTime - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - project = Table('project', meta, autoload=True) - user = Table('user', meta, autoload=True) - - alarm_history = Table( - 'alarm_history', meta, - Column('event_id', String(255), primary_key=True, index=True), - Column('alarm_id', String(255)), - Column('on_behalf_of', String(255)), - Column('project_id', String(255)), - Column('user_id', String(255)), - Column('type', String(20)), - Column('detail', String(255)), - Column('timestamp', DateTime(timezone=False)), - mysql_engine='InnoDB', - mysql_charset='utf8') - - alarm_history.create() - - if migrate_engine.name in ['mysql', 'postgresql']: - indices = [Index('ix_alarm_history_alarm_id', - alarm_history.c.alarm_id), - Index('ix_alarm_history_on_behalf_of', - alarm_history.c.on_behalf_of), - Index('ix_alarm_history_project_id', - alarm_history.c.project_id), - Index('ix_alarm_history_on_user_id', - alarm_history.c.user_id)] - - for index in indices: - index.create(migrate_engine) - - fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of], - refcolumns=[project.c.id]), - ForeignKeyConstraint(columns=[alarm_history.c.project_id], - refcolumns=[project.c.id]), - ForeignKeyConstraint(columns=[alarm_history.c.user_id], - refcolumns=[user.c.id])] - for fkey in fkeys: - fkey.create(engine=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py deleted file mode 100644 index f82ab5ec..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from sqlalchemy import MetaData, Table, Column, Index -from sqlalchemy import String, Text - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - table = Table('alarm', meta, autoload=True) - - type = Column('type', String(50), default='threshold') - type.create(table, populate_default=True) - - rule = Column('rule', Text()) - rule.create(table) - - for row in table.select().execute().fetchall(): - query = [] - if row.matching_metadata is not None: - matching_metadata = json.loads(row.matching_metadata) - for key in matching_metadata: - query.append({'field': key, - 'op': 'eq', - 'value': matching_metadata[key]}) - rule = { - 'meter_name': row.meter_name, - 'comparison_operator': row.comparison_operator, - 'threshold': row.threshold, - 'statistic': row.statistic, - 'evaluation_periods': row.evaluation_periods, - 'period': row.period, - 'query': query - } - table.update().where(table.c.id == row.id).values(rule=rule).execute() - - index = Index('ix_alarm_counter_name', table.c.meter_name) - index.drop(bind=migrate_engine) - table.c.meter_name.drop() - table.c.comparison_operator.drop() - table.c.threshold.drop() - table.c.statistic.drop() - table.c.evaluation_periods.drop() - table.c.period.drop() - table.c.matching_metadata.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py deleted file mode 100644 index f5e58d94..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - -_col = 'timestamp' - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - _convert_data_type(meter, _col, sa.DateTime(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py deleted file mode 100644 index 76c1fa2a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - resource.c.resource_metadata.alter(type=Text) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py deleted file mode 100644 index 539d02fa..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alm_hist = Table('alarm_history', meta, autoload=True) - alm_hist.c.detail.alter(type=Text) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py deleted file mode 100644 index 0748dcff..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json - -import six -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import Float -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy.sql import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import Text - -from ceilometer import utils - -tables = [('metadata_text', Text, True), - ('metadata_bool', Boolean, False), - ('metadata_int', Integer, False), - ('metadata_float', Float, False)] - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - meta_tables = {} - for t_name, t_type, t_nullable in tables: - meta_tables[t_name] = Table( - t_name, meta, - Column('id', Integer, ForeignKey('meter.id'), primary_key=True), - Column('meta_key', String(255), index=True, primary_key=True), - Column('value', t_type, nullable=t_nullable), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - meta_tables[t_name].create() - - for row in select([meter]).execute(): - if row['resource_metadata']: - meter_id = row['id'] - rmeta = json.loads(row['resource_metadata']) - for key, v in utils.dict_to_keyval(rmeta): - ins = None - if isinstance(v, six.string_types) or v is None: - ins = meta_tables['metadata_text'].insert() - elif isinstance(v, bool): - ins = meta_tables['metadata_bool'].insert() - elif isinstance(v, six.integer_types): - ins = meta_tables['metadata_int'].insert() - elif isinstance(v, float): - ins = meta_tables['metadata_float'].insert() - if ins is not None: - ins.values(id=meter_id, meta_key=key, value=v).execute() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py deleted file mode 100644 index 056f3f5c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import select -from sqlalchemy import String -from sqlalchemy import Table - -from ceilometer.storage.sqlalchemy import migration - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - event_type = Table( - 'event_type', meta, - Column('id', Integer, primary_key=True), - Column('desc', String(255), unique=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - event_type.create() - event = Table('event', meta, autoload=True) - unique_name = Table('unique_name', meta, autoload=True) - - # Event type is a specialization of Unique name, so - # we insert into the event_type table all the distinct - # unique names from the event.unique_name field along - # with the key from the unique_name table, and - # then rename the event.unique_name field to event.event_type - conn = migrate_engine.connect() - sql = ("INSERT INTO event_type " - "SELECT unique_name.id, unique_name.key FROM event " - "INNER JOIN unique_name " - "ON event.unique_name_id = unique_name.id " - "GROUP BY unique_name.id") - conn.execute(sql) - conn.close() - # Now we need to drop the foreign key constraint, rename - # the event.unique_name column, and re-add a new foreign - # key constraint - params = {'columns': [event.c.unique_name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "event_ibfk_1" - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - Column('event_type_id', Integer).create(event) - - # Move data from unique_name_id column into event_type_id column - # and delete the entry from the unique_name table - query = select([event.c.id, event.c.unique_name_id]) - for key, value in migration.paged(query): - (event.update().where(event.c.id == key). - values({"event_type_id": value}).execute()) - unique_name.delete().where(unique_name.c.id == key).execute() - - params = {'columns': [event.c.event_type_id], - 'refcolumns': [event_type.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'event_type', 'id')) - fkey = ForeignKeyConstraint(**params) - fkey.create() - - event.c.unique_name_id.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql deleted file mode 100644 index 19030113..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql +++ /dev/null @@ -1,29 +0,0 @@ -CREATE TABLE event_type ( - id INTEGER PRIMARY KEY ASC, - desc STRING NOT NULL -); - -INSERT INTO event_type -SELECT un.id, un.key -FROM unique_name un -JOIN event e ON un.id = e.unique_name_id -GROUP BY un.id; - -ALTER TABLE event RENAME TO event_orig; - -CREATE TABLE event ( - id INTEGER PRIMARY KEY ASC, - generated FLOAT NOT NULL, - message_id VARCHAR(50) UNIQUE, - event_type_id INTEGER NOT NULL, - FOREIGN KEY (event_type_id) REFERENCES event_type (id) -); - -INSERT INTO event -SELECT id, generated, message_id, unique_name_id -FROM event_orig; - -DROP TABLE event_orig; - -DELETE FROM unique_name -WHERE id IN (SELECT id FROM event_type); diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py deleted file mode 100644 index ebbb6e0c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import BigInteger -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('metadata_int', meta, autoload=True) - resource.c.value.alter(type=BigInteger) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py deleted file mode 100644 index 23c864bc..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import UniqueConstraint - -from ceilometer.storage.sqlalchemy import migration - - -def upgrade(migrate_engine): - meta = MetaData(migrate_engine) - trait_type = Table( - 'trait_type', meta, - Column('id', Integer, primary_key=True), - Column('desc', String(255)), - Column('data_type', Integer), - UniqueConstraint('desc', 'data_type', name="tt_unique"), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait = Table('trait', meta, autoload=True) - unique_name = Table('unique_name', meta, autoload=True) - trait_type.create(migrate_engine) - # Trait type extracts data from Trait and Unique name. - # We take all trait names from Unique Name, and data types - # from Trait. We then remove dtype and name from trait, and - # remove the name field. - - conn = migrate_engine.connect() - sql = ("INSERT INTO trait_type " - "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait " - "INNER JOIN unique_name " - "ON trait.name_id = unique_name.id " - "GROUP BY unique_name.id, unique_name.key, trait.t_type") - conn.execute(sql) - conn.close() - - # Now we need to drop the foreign key constraint, rename - # the trait.name column, and re-add a new foreign - # key constraint - params = {'columns': [trait.c.name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "trait_ibfk_1" # foreign key to the unique name table - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - Column('trait_type_id', Integer).create(trait) - - # Move data from name_id column into trait_type_id column - query = select([trait.c.id, trait.c.name_id]) - for key, value in migration.paged(query): - (trait.update().where(trait.c.id == key). - values({"trait_type_id": value}).execute()) - - trait.c.name_id.drop() - - params = {'columns': [trait.c.trait_type_id], - 'refcolumns': [trait_type.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'trait_type', 'id')) - - fkey = ForeignKeyConstraint(**params) - fkey.create() - - # Drop the t_type column to data_type. - trait.c.t_type.drop() - - # Finally, drop the unique_name table - we don't need it - # anymore. - unique_name.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql deleted file mode 100644 index ac4dfc7f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql +++ /dev/null @@ -1,34 +0,0 @@ -ALTER TABLE trait RENAME TO trait_orig; - -CREATE TABLE trait_type ( - id INTEGER PRIMARY KEY ASC, - 'desc' STRING NOT NULL, - data_type INTEGER NOT NULL, - UNIQUE ('desc', data_type) -); - -INSERT INTO trait_type -SELECT un.id, un.key, t.t_type -FROM unique_name un -JOIN trait_orig t ON un.id = t.name_id -GROUP BY un.id; - -CREATE TABLE trait ( - id INTEGER PRIMARY KEY ASC, - t_string VARCHAR(255), - t_int INTEGER, - t_float FLOAT, - t_datetime FLOAT, - trait_type_id INTEGER NOT NULL, - event_id INTEGER NOT NULL, - FOREIGN KEY (trait_type_id) REFERENCES trait_type (id) - FOREIGN KEY (event_id) REFERENCES event (id) -); - -INSERT INTO trait -SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id, - t.event_id -FROM trait_orig t; - -DROP TABLE trait_orig; -DROP TABLE unique_name; \ No newline at end of file diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py deleted file mode 100644 index e97f24bb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py deleted file mode 100644 index 457a9fd5..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id'): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - - -to_convert = [ - ('alarm', 'timestamp', 'id'), - ('alarm', 'state_timestamp', 'id'), - ('alarm_history', 'timestamp', 'alarm_id'), -] - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - for table_name, col_name, pk_attr in to_convert: - table = sa.Table(table_name, meta, autoload=True) - _convert_data_type(table, col_name, sa.DateTime(), - models.PreciseTimestamp(), - pk_attr=pk_attr) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py deleted file mode 100644 index 959c1fb6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Float -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - metadata_float = Table('metadata_float', meta, autoload=True) - metadata_float.c.value.alter(type=Float(53)) - trait = Table('trait', meta, autoload=True) - trait.c.t_float.alter(type=Float(53)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py deleted file mode 100644 index 98377628..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright 2014 Intel Crop. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table - -TABLES = ['user', 'project', 'alarm'] - -INDEXES = { - "alarm": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), -} - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - fkey = ForeignKeyConstraint(**params) - fkey.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py deleted file mode 100644 index 1778a0b2..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py +++ /dev/null @@ -1,138 +0,0 @@ -# -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import migrate -import sqlalchemy as sa - - -def get_alembic_version(meta): - """Return Alembic version or None if no Alembic table exists.""" - try: - a_ver = sa.Table( - 'alembic_version', - meta, - autoload=True) - return sa.select([a_ver.c.version_num]).scalar() - except sa.exc.NoSuchTableError: - return None - - -def delete_alembic(meta): - try: - sa.Table( - 'alembic_version', - meta, - autoload=True).drop(checkfirst=True) - except sa.exc.NoSuchTableError: - pass - - -INDEXES = ( - # ([dialects], table_name, index_name, create/delete, uniq/not_uniq) - (['mysql', 'sqlite', 'postgresql'], - 'resource', - 'resource_user_id_project_id_key', - ('user_id', 'project_id'), True, False, True), - (['mysql'], 'source', 'id', ('id',), False, True, False)) - - -def index_cleanup(meta, table_name, uniq_name, columns, - create, unique, limited): - table = sa.Table(table_name, meta, autoload=True) - if create: - if limited and meta.bind.engine.name == 'mysql': - # For some versions of mysql we can get an error - # "Specified key was too long; max key length is 1000 bytes". - # We should create an index by hand in this case with limited - # length of columns. - columns_mysql = ",".join((c + "(100)" for c in columns)) - sql = ("create index %s ON %s (%s)" % (uniq_name, table, - columns_mysql)) - meta.bind.engine.execute(sql) - else: - cols = [table.c[col] for col in columns] - sa.Index(uniq_name, *cols, unique=unique).create() - else: - if unique: - migrate.UniqueConstraint(*columns, table=table, - name=uniq_name).drop() - else: - cols = [table.c[col] for col in columns] - sa.Index(uniq_name, *cols).drop() - - -def change_uniq(meta): - uniq_name = 'uniq_sourceassoc0meter_id0user_id' - columns = ('meter_id', 'user_id') - - if meta.bind.engine.name == 'sqlite': - return - - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - if meta.bind.engine.name == 'mysql': - # For mysql dialect all dependent FK should be removed - # before renaming of constraint. - params = {'columns': [sourceassoc.c.meter_id], - 'refcolumns': [meter.c.id], - 'name': 'fk_sourceassoc_meter_id'} - migrate.ForeignKeyConstraint(**params).drop() - params = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id], - 'name': 'fk_sourceassoc_user_id'} - migrate.ForeignKeyConstraint(**params).drop() - - migrate.UniqueConstraint(*columns, table=sourceassoc, - name=uniq_name).create() - if meta.bind.engine.name == 'mysql': - params = {'columns': [sourceassoc.c.meter_id], - 'refcolumns': [meter.c.id], - 'name': 'fk_sourceassoc_meter_id'} - migrate.ForeignKeyConstraint(**params).create() - params = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id], - 'name': 'fk_sourceassoc_user_id'} - migrate.ForeignKeyConstraint(**params).create() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - a_ver = get_alembic_version(meta) - - if not a_ver: - alarm = sa.Table('alarm', meta, autoload=True) - repeat_act = sa.Column('repeat_actions', sa.Boolean, - server_default=sa.sql.expression.false()) - alarm.create_column(repeat_act) - a_ver = '43b1a023dfaa' - - if a_ver == '43b1a023dfaa': - meter = sa.Table('meter', meta, autoload=True) - meter.c.resource_metadata.alter(type=sa.Text) - a_ver = '17738166b91' - - if a_ver == '17738166b91': - for (engine_names, table_name, uniq_name, - columns, create, uniq, limited) in INDEXES: - if migrate_engine.name in engine_names: - index_cleanup(meta, table_name, uniq_name, - columns, create, uniq, limited) - a_ver = 'b6ae66d05e3' - - if a_ver == 'b6ae66d05e3': - change_uniq(meta) - - delete_alembic(meta) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py deleted file mode 100644 index 0c692bfa..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import timeutils -import sqlalchemy - -from ceilometer.storage.sqlalchemy import models - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - meter = sqlalchemy.Table('meter', meta, autoload=True) - c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(), - default=timeutils.utcnow) - meter.create_column(c) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py deleted file mode 100644 index 39ecf057..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py +++ /dev/null @@ -1,110 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import migrate -import sqlalchemy as sa - - -def _handle_meter_indices(meta): - if meta.bind.engine.name == 'sqlite': - return - - resource = sa.Table('resource', meta, autoload=True) - project = sa.Table('project', meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - - indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp), - sa.Index('ix_sample_timestamp', meter.c.timestamp)), - (sa.Index('ix_meter_user_id', meter.c.user_id), - sa.Index('ix_sample_user_id', meter.c.user_id)), - (sa.Index('ix_meter_project_id', meter.c.project_id), - sa.Index('ix_sample_project_id', meter.c.project_id)), - (sa.Index('idx_meter_rid_cname', meter.c.resource_id, - meter.c.counter_name), - sa.Index('idx_sample_rid_cname', meter.c.resource_id, - meter.c.counter_name))] - - fk_params = [({'columns': [meter.c.resource_id], - 'refcolumns': [resource.c.id]}, - 'fk_meter_resource_id', - 'fk_sample_resource_id'), - ({'columns': [meter.c.project_id], - 'refcolumns': [project.c.id]}, - 'fk_meter_project_id', - 'fk_sample_project_id'), - ({'columns': [meter.c.user_id], - 'refcolumns': [user.c.id]}, - 'fk_meter_user_id', - 'fk_sample_user_id')] - - for fk in fk_params: - params = fk[0] - if meta.bind.engine.name == 'mysql': - params['name'] = fk[1] - migrate.ForeignKeyConstraint(**params).drop() - - for meter_ix, sample_ix in indices: - meter_ix.drop() - sample_ix.create() - - for fk in fk_params: - params = fk[0] - if meta.bind.engine.name == 'mysql': - params['name'] = fk[2] - migrate.ForeignKeyConstraint(**params).create() - - -def _alter_sourceassoc(meta, t_name, ix_name, post_action=False): - if meta.bind.engine.name == 'sqlite': - return - - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - table = sa.Table(t_name, meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - - c_name = '%s_id' % t_name - col = getattr(sourceassoc.c, c_name) - uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name - - uniq_cols = (c_name, 'user_id') - param = {'columns': [col], - 'refcolumns': [table.c.id]} - user_param = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id]} - if meta.bind.engine.name == 'mysql': - param['name'] = 'fk_sourceassoc_%s' % c_name - user_param['name'] = 'fk_sourceassoc_user_id' - - actions = [migrate.ForeignKeyConstraint(**user_param), - migrate.ForeignKeyConstraint(**param), - sa.Index(ix_name, sourceassoc.c.source_id, col), - migrate.UniqueConstraint(*uniq_cols, table=sourceassoc, - name=uniq_name)] - for action in actions: - action.create() if post_action else action.drop() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - - _handle_meter_indices(meta) - meter = sa.Table('meter', meta, autoload=True) - meter.rename('sample') - - _alter_sourceassoc(meta, 'meter', 'idx_sm') - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - sourceassoc.c.meter_id.alter(name='sample_id') - # re-bind metadata to pick up alter name change - meta = sa.MetaData(bind=migrate_engine) - _alter_sourceassoc(meta, 'sample', 'idx_ss', True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py deleted file mode 100644 index 3dd8e469..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import migrate -import sqlalchemy as sa - - -def handle_rid_index(meta): - if meta.bind.engine.name == 'sqlite': - return - - resource = sa.Table('resource', meta, autoload=True) - sample = sa.Table('sample', meta, autoload=True) - params = {'columns': [sample.c.resource_id], - 'refcolumns': [resource.c.id], - 'name': 'fk_sample_resource_id'} - if meta.bind.engine.name == 'mysql': - # For mysql dialect all dependent FK should be removed - # before index create/delete - migrate.ForeignKeyConstraint(**params).drop() - - index = sa.Index('idx_sample_rid_cname', sample.c.resource_id, - sample.c.counter_name) - index.drop() - - if meta.bind.engine.name == 'mysql': - migrate.ForeignKeyConstraint(**params).create() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table( - 'meter', meta, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('type', sa.String(255)), - sa.Column('unit', sa.String(255)), - sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - meter.create() - sample = sa.Table('sample', meta, autoload=True) - query = sa.select([sample.c.counter_name, sample.c.counter_type, - sample.c.counter_unit]).distinct() - for row in query.execute(): - meter.insert().values(name=row['counter_name'], - type=row['counter_type'], - unit=row['counter_unit']).execute() - - meter_id = sa.Column('meter_id', sa.Integer) - meter_id.create(sample) - params = {'columns': [sample.c.meter_id], - 'refcolumns': [meter.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_meter_id' - if migrate_engine.name != 'sqlite': - migrate.ForeignKeyConstraint(**params).create() - - index = sa.Index('ix_meter_name', meter.c.name) - index.create(bind=migrate_engine) - - for row in sa.select([meter]).execute(): - (sample.update(). - where(sa.and_(sample.c.counter_name == row['name'], - sample.c.counter_type == row['type'], - sample.c.counter_unit == row['unit'])). - values({sample.c.meter_id: row['id']}).execute()) - - handle_rid_index(meta) - - sample.c.counter_name.drop() - sample.c.counter_type.drop() - sample.c.counter_unit.drop() - sample.c.counter_volume.alter(name='volume') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py deleted file mode 100644 index ec0b537c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - time_constraints = Column('time_constraints', Text()) - alarm.create_column(time_constraints) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py deleted file mode 100644 index bb0264eb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - users = Table('alarm', meta, autoload=True) - users.c.id.alter(name='alarm_id') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py deleted file mode 100644 index ba4e3160..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sqlalchemy as sa - -TABLES_012 = ['resource', 'sourceassoc', 'user', - 'project', 'meter', 'source', 'alarm'] -TABLES_027 = ['user', 'project', 'alarm'] - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - for table_name in TABLES_027: - try: - (sa.Table('dump027_' + table_name, meta, autoload=True). - drop(checkfirst=True)) - except sa.exc.NoSuchTableError: - pass - for table_name in TABLES_012: - try: - (sa.Table('dump_' + table_name, meta, autoload=True). - drop(checkfirst=True)) - except sa.exc.NoSuchTableError: - pass diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py deleted file mode 100644 index e58915af..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py +++ /dev/null @@ -1,84 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint, UniqueConstraint -import sqlalchemy as sa - -TABLES_DROP = ['user', 'project'] -TABLES = ['user', 'project', 'sourceassoc', 'sample', - 'resource', 'alarm_history'] - -INDEXES = { - "sample": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "sourceassoc": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "resource": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "alarm_history": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('on_behalf_of', 'project', 'id')), -} - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - load_tables = dict((table_name, sa.Table(table_name, meta, - autoload=True)) - for table_name in TABLES) - - if migrate_engine.name != 'sqlite': - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - - if (migrate_engine.name == "mysql" and - table_name != 'alarm_history'): - params['name'] = "_".join(('fk', table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == "sample"): - # The fk contains the old table name - params['name'] = "_".join(('meter', column, 'fkey')) - - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - sourceassoc = load_tables['sourceassoc'] - if migrate_engine.name != 'sqlite': - idx = sa.Index('idx_su', sourceassoc.c.source_id, - sourceassoc.c.user_id) - idx.drop(bind=migrate_engine) - idx = sa.Index('idx_sp', sourceassoc.c.source_id, - sourceassoc.c.project_id) - idx.drop(bind=migrate_engine) - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id'} - uc = UniqueConstraint('sample_id', table=sourceassoc, **params) - uc.create() - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id0user_id'} - uc = UniqueConstraint('sample_id', 'user_id', - table=sourceassoc, **params) - uc.drop() - sourceassoc.c.user_id.drop() - sourceassoc.c.project_id.drop() - - for table_name in TABLES_DROP: - sa.Table(table_name, meta, autoload=True).drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py deleted file mode 100644 index b8a1a3db..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration - - -TABLES = ['sample', 'resource', 'source', 'sourceassoc'] -DROP_TABLES = ['resource', 'source', 'sourceassoc'] - -INDEXES = { - "sample": (('resource_id', 'resource', 'id'),), - "sourceassoc": (('sample_id', 'sample', 'id'), - ('resource_id', 'resource', 'id'), - ('source_id', 'source', 'id')) -} - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - load_tables = dict((table_name, sa.Table(table_name, meta, - autoload=True)) - for table_name in TABLES) - - # drop foreign keys - if migrate_engine.name != 'sqlite': - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - fk_table_name = table_name - if migrate_engine.name == "mysql": - params['name'] = "_".join(('fk', fk_table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == 'sample'): - # fk was not renamed in script 030 - params['name'] = "_".join(('meter', column, 'fkey')) - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - # create source field in sample - sample = load_tables['sample'] - sample.create_column(sa.Column('source_id', sa.String(255))) - - # move source values to samples - sourceassoc = load_tables['sourceassoc'] - query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]). - where(sourceassoc.c.sample_id.isnot(None))) - for sample_id, source_id in migration.paged(query): - (sample.update().where(sample_id == sample.c.id). - values({'source_id': source_id}).execute()) - - # drop tables - for table_name in DROP_TABLES: - sa.Table(table_name, meta, autoload=True).drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py deleted file mode 100644 index 18ee7a67..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -import sqlalchemy as sa - - -class ForeignKeyHandle(object): - def __init__(self, meta): - sample = sa.Table('sample', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - self.sample_params = {'columns': [sample.c.meter_id], - 'refcolumns': [meter.c.id]} - if meta.bind.engine.name == 'mysql': - self.sample_params['name'] = "fk_sample_meter_id" - - def __enter__(self): - ForeignKeyConstraint(**self.sample_params).drop() - - def __exit__(self, type, value, traceback): - ForeignKeyConstraint(**self.sample_params).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - - with ForeignKeyHandle(meta): - # remove stray indexes implicitly created by InnoDB - for index in sample.indexes: - if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']: - index.drop() - sa.Index('ix_sample_meter_id', sample.c.meter_id).create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py deleted file mode 100644 index 2fb7b47b..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py +++ /dev/null @@ -1,131 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import hashlib - -import migrate -from oslo_serialization import jsonutils -import sqlalchemy as sa - - -m_tables = [('metadata_text', sa.Text, True), - ('metadata_bool', sa.Boolean, False), - ('metadata_int', sa.BigInteger, False), - ('metadata_float', sa.Float(53), False)] - - -def _migrate_meta_tables(meta, col, new_col, new_fk): - for t_name, t_type, t_nullable in m_tables: - m_table = sa.Table(t_name, meta, autoload=True) - m_table_new = sa.Table( - '%s_new' % t_name, meta, - sa.Column('id', sa.Integer, sa.ForeignKey(new_fk), - primary_key=True), - sa.Column('meta_key', sa.String(255), - primary_key=True), - sa.Column('value', t_type, nullable=t_nullable), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - m_table_new.create() - - if m_table.select().scalar() is not None: - m_table_new.insert().from_select( - ['id', 'meta_key', 'value'], - sa.select([new_col, m_table.c.meta_key, - m_table.c.value]).where( - col == m_table.c.id).group_by( - new_col, m_table.c.meta_key, m_table.c.value)).execute() - - m_table.drop() - if meta.bind.engine.name != 'sqlite': - sa.Index('ix_%s_meta_key' % t_name, - m_table_new.c.meta_key).create() - m_table_new.rename(t_name) - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - resource = sa.Table( - 'resource', meta, - sa.Column('internal_id', sa.Integer, primary_key=True), - sa.Column('resource_id', sa.String(255)), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('source_id', sa.String(255)), - sa.Column('resource_metadata', sa.Text), - sa.Column('metadata_hash', sa.String(32)), - mysql_engine='InnoDB', - mysql_charset='utf8') - resource.create() - - # copy resource data in to resource table - sample = sa.Table('sample', meta, autoload=True) - sa.Column('metadata_hash', sa.String(32)).create(sample) - for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): - sample.update().where(sample.c.id == row['id']).values( - {sample.c.metadata_hash: - hashlib.md5(jsonutils.dumps( - row['resource_metadata'], - sort_keys=True)).hexdigest()}).execute() - query = sa.select([sample.c.resource_id, sample.c.user_id, - sample.c.project_id, sample.c.source_id, - sample.c.resource_metadata, - sample.c.metadata_hash]).distinct() - for row in query.execute(): - resource.insert().values( - resource_id=row['resource_id'], - user_id=row['user_id'], - project_id=row['project_id'], - source_id=row['source_id'], - resource_metadata=row['resource_metadata'], - metadata_hash=row['metadata_hash']).execute() - # link sample records to new resource records - sa.Column('resource_id_new', sa.Integer).create(sample) - for row in sa.select([resource]).execute(): - (sample.update(). - where(sa.and_( - sample.c.resource_id == row['resource_id'], - sample.c.user_id == row['user_id'], - sample.c.project_id == row['project_id'], - sample.c.source_id == row['source_id'], - sample.c.metadata_hash == row['metadata_hash'])). - values({sample.c.resource_id_new: row['internal_id']}).execute()) - - sample.c.resource_id.drop() - sample.c.metadata_hash.drop() - sample.c.resource_id_new.alter(name='resource_id') - # re-bind metadata to pick up alter name change - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - resource = sa.Table('resource', meta, autoload=True) - if migrate_engine.name != 'sqlite': - sa.Index('ix_resource_resource_id', resource.c.resource_id).create() - sa.Index('ix_sample_user_id', sample.c.user_id).drop() - sa.Index('ix_sample_project_id', sample.c.project_id).drop() - sa.Index('ix_sample_resource_id', sample.c.resource_id).create() - sa.Index('ix_sample_meter_id_resource_id', - sample.c.meter_id, sample.c.resource_id).create() - - params = {'columns': [sample.c.resource_id], - 'refcolumns': [resource.c.internal_id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_resource_internal_id' - migrate.ForeignKeyConstraint(**params).create() - - sample.c.user_id.drop() - sample.c.project_id.drop() - sample.c.source_id.drop() - sample.c.resource_metadata.drop() - - _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, - 'resource.internal_id') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py deleted file mode 100644 index 055f2ee6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE (gordc): this is a copy of 024 migration script which missed pgsql - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'postgresql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py deleted file mode 100644 index 07a94deb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - severity = Column('severity', String(50)) - alarm.create_column(severity) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py deleted file mode 100644 index a9492381..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import models - -tables = [('trait_text', sa.String(255), True, 't_string', 1), - ('trait_int', sa.Integer, False, 't_int', 2), - ('trait_float', sa.Float(53), False, 't_float', 3), - ('trait_datetime', models.PreciseTimestamp(), - False, 't_datetime', 4)] - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - trait = sa.Table('trait', meta, autoload=True) - event = sa.Table('event', meta, autoload=True) - trait_type = sa.Table('trait_type', meta, autoload=True) - for t_name, t_type, t_nullable, col_name, __ in tables: - t_table = sa.Table( - t_name, meta, - sa.Column('event_id', sa.Integer, - sa.ForeignKey(event.c.id), primary_key=True), - sa.Column('key', sa.String(255), primary_key=True), - sa.Column('value', t_type, nullable=t_nullable), - sa.Index('ix_%s_event_id_key' % t_name, - 'event_id', 'key'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - t_table.create() - query = sa.select( - [trait.c.event_id, - trait_type.c.desc, - trait.c[col_name]]).select_from( - trait.join(trait_type, - trait.c.trait_type_id == trait_type.c.id)).where( - trait.c[col_name] != sa.null()) - if query.alias().select().scalar() is not None: - t_table.insert().from_select( - ['event_id', 'key', 'value'], query).execute() - trait.drop() - trait_type.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py deleted file mode 100644 index 1e8b4614..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - raw = sa.Column('raw', sa.Text) - event.create_column(raw) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py deleted file mode 100644 index 03a5525b..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - # NOTE(gordc): this is a noop script to handle bug1468916 - # previous lowering of id length will fail if db contains data longer. - # this skips migration for those failing. the next script will resize - # if this original migration passed. - pass diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py deleted file mode 100644 index a7db70cb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - resource.c.user_id.alter(type=String(255)) - resource.c.project_id.alter(type=String(255)) - resource.c.resource_id.alter(type=String(255)) - resource.c.source_id.alter(type=String(255)) - sample = Table('sample', meta, autoload=True) - sample.c.message_signature.alter(type=String(64)) - sample.c.message_id.alter(type=String(128)) - alarm = Table('alarm', meta, autoload=True) - alarm.c.alarm_id.alter(type=String(128)) - alarm.c.user_id.alter(type=String(255)) - alarm.c.project_id.alter(type=String(255)) - alarm_history = Table('alarm_history', meta, autoload=True) - alarm_history.c.alarm_id.alter(type=String(128)) - alarm_history.c.user_id.alter(type=String(255)) - alarm_history.c.project_id.alter(type=String(255)) - alarm_history.c.event_id.alter(type=String(128)) - alarm_history.c.on_behalf_of.alter(type=String(255)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py deleted file mode 100644 index ac59595d..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -# Add index on metadata_hash column of resource -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - resource = sa.Table('resource', meta, autoload=True) - index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash) - index.create(bind=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/storage/sqlalchemy/migration.py b/ceilometer/storage/sqlalchemy/migration.py deleted file mode 100644 index 160e68e5..00000000 --- a/ceilometer/storage/sqlalchemy/migration.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def paged(query, size=1000): - """Page query results - - :param query: the SQLAlchemy query to execute - :param size: the max page size - return: generator with query data - """ - offset = 0 - while True: - page = query.offset(offset).limit(size).execute() - if page.rowcount <= 0: - # There are no more rows - break - for row in page: - yield row - offset += size diff --git a/ceilometer/storage/sqlalchemy/models.py b/ceilometer/storage/sqlalchemy/models.py index 223ef0f5..c726e7bf 100644 --- a/ceilometer/storage/sqlalchemy/models.py +++ b/ceilometer/storage/sqlalchemy/models.py @@ -13,15 +13,11 @@ """ SQLAlchemy models for Ceilometer data. """ -import hashlib import json -from oslo_utils import timeutils import six -from sqlalchemy import (Column, Integer, String, ForeignKey, Index, - UniqueConstraint, BigInteger) -from sqlalchemy import event -from sqlalchemy import Float, Boolean, Text, DateTime +from sqlalchemy import Column, Integer, String, ForeignKey, Index +from sqlalchemy import Float, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import deferred @@ -99,148 +95,6 @@ class CeilometerBase(object): Base = declarative_base(cls=CeilometerBase) -class MetaText(Base): - """Metering text metadata.""" - - __tablename__ = 'metadata_text' - __table_args__ = ( - Index('ix_meta_text_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Text) - - -class MetaBool(Base): - """Metering boolean metadata.""" - - __tablename__ = 'metadata_bool' - __table_args__ = ( - Index('ix_meta_bool_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Boolean) - - -class MetaBigInt(Base): - """Metering integer metadata.""" - - __tablename__ = 'metadata_int' - __table_args__ = ( - Index('ix_meta_int_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(BigInteger, default=False) - - -class MetaFloat(Base): - """Metering float metadata.""" - - __tablename__ = 'metadata_float' - __table_args__ = ( - Index('ix_meta_float_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Float(53), default=False) - - -class Meter(Base): - """Meter definition data.""" - - __tablename__ = 'meter' - __table_args__ = ( - UniqueConstraint('name', 'type', 'unit', name='def_unique'), - Index('ix_meter_name', 'name'), - ) - id = Column(Integer, primary_key=True) - name = Column(String(255), nullable=False) - type = Column(String(255)) - unit = Column(String(255)) - samples = relationship("Sample", backref="meter") - - -class Resource(Base): - """Resource data.""" - - __tablename__ = 'resource' - __table_args__ = ( - # TODO(gordc): this should exist but the attribute values we set - # for user/project/source/resource id's are too large - # for an uuid. - # UniqueConstraint('resource_id', 'user_id', 'project_id', - # 'source_id', 'metadata_hash', - # name='res_def_unique'), - Index('ix_resource_resource_id', 'resource_id'), - Index('ix_resource_metadata_hash', 'metadata_hash'), - ) - - internal_id = Column(Integer, primary_key=True) - user_id = Column(String(255)) - project_id = Column(String(255)) - source_id = Column(String(255)) - resource_id = Column(String(255), nullable=False) - resource_metadata = deferred(Column(JSONEncodedDict())) - metadata_hash = deferred(Column(String(32))) - samples = relationship("Sample", backref="resource") - meta_text = relationship("MetaText", backref="resource", - cascade="all, delete-orphan") - meta_float = relationship("MetaFloat", backref="resource", - cascade="all, delete-orphan") - meta_int = relationship("MetaBigInt", backref="resource", - cascade="all, delete-orphan") - meta_bool = relationship("MetaBool", backref="resource", - cascade="all, delete-orphan") - - -@event.listens_for(Resource, "before_insert") -def before_insert(mapper, connection, target): - metadata = json.dumps(target.resource_metadata, sort_keys=True) - target.metadata_hash = hashlib.md5(metadata).hexdigest() - - -class Sample(Base): - """Metering data.""" - - __tablename__ = 'sample' - __table_args__ = ( - Index('ix_sample_timestamp', 'timestamp'), - Index('ix_sample_resource_id', 'resource_id'), - Index('ix_sample_meter_id', 'meter_id'), - Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id') - ) - id = Column(Integer, primary_key=True) - meter_id = Column(Integer, ForeignKey('meter.id')) - resource_id = Column(Integer, ForeignKey('resource.internal_id')) - volume = Column(Float(53)) - timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) - recorded_at = Column(PreciseTimestamp(), - default=lambda: timeutils.utcnow()) - message_signature = Column(String(64)) - message_id = Column(String(128)) - - -class FullSample(object): - """A fake model for query samples.""" - id = Sample.id - timestamp = Sample.timestamp - message_id = Sample.message_id - message_signature = Sample.message_signature - recorded_at = Sample.recorded_at - counter_name = Meter.name - counter_type = Meter.type - counter_unit = Meter.unit - counter_volume = Sample.volume - resource_id = Resource.resource_id - source_id = Resource.source_id - user_id = Resource.user_id - project_id = Resource.project_id - resource_metadata = Resource.resource_metadata - internal_id = Resource.internal_id - - class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' diff --git a/ceilometer/storage/sqlalchemy/utils.py b/ceilometer/storage/sqlalchemy/utils.py deleted file mode 100644 index 2003c24c..00000000 --- a/ceilometer/storage/sqlalchemy/utils.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import operator - -import six -from sqlalchemy import and_ -from sqlalchemy import asc -from sqlalchemy import desc -from sqlalchemy import not_ -from sqlalchemy import or_ -from sqlalchemy.orm import aliased - -import ceilometer -from ceilometer.storage.sqlalchemy import models - - -META_TYPE_MAP = {bool: models.MetaBool, - str: models.MetaText, - six.text_type: models.MetaText, - type(None): models.MetaText, - int: models.MetaBigInt, - float: models.MetaFloat} -if six.PY2: - META_TYPE_MAP[long] = models.MetaBigInt - - -class QueryTransformer(object): - operators = {"=": operator.eq, - "<": operator.lt, - ">": operator.gt, - "<=": operator.le, - "=<": operator.le, - ">=": operator.ge, - "=>": operator.ge, - "!=": operator.ne, - "in": lambda field_name, values: field_name.in_(values), - "=~": lambda field, value: field.op("regexp")(value)} - - # operators which are different for different dialects - dialect_operators = {'postgresql': {'=~': (lambda field, value: - field.op("~")(value))}} - - complex_operators = {"or": or_, - "and": and_, - "not": not_} - - ordering_functions = {"asc": asc, - "desc": desc} - - def __init__(self, table, query, dialect='mysql'): - self.table = table - self.query = query - self.dialect_name = dialect - - def _get_operator(self, op): - return (self.dialect_operators.get(self.dialect_name, {}).get(op) - or self.operators[op]) - - def _handle_complex_op(self, complex_op, nodes): - op = self.complex_operators[complex_op] - if op == not_: - nodes = [nodes] - element_list = [] - for node in nodes: - element = self._transform(node) - element_list.append(element) - return op(*element_list) - - def _handle_simple_op(self, simple_op, nodes): - op = self._get_operator(simple_op) - field_name, value = list(nodes.items())[0] - if field_name.startswith('resource_metadata.'): - return self._handle_metadata(op, field_name, value) - else: - return op(getattr(self.table, field_name), value) - - def _handle_metadata(self, op, field_name, value): - if op == self.operators["in"]: - raise ceilometer.NotImplementedError('Metadata query with in ' - 'operator is not implemented') - field_name = field_name[len('resource_metadata.'):] - meta_table = META_TYPE_MAP[type(value)] - meta_alias = aliased(meta_table) - on_clause = and_(self.table.internal_id == meta_alias.id, - meta_alias.meta_key == field_name) - # outer join is needed to support metaquery - # with or operator on non existent metadata field - # see: test_query_non_existing_metadata_with_result - # test case. - self.query = self.query.outerjoin(meta_alias, on_clause) - return op(meta_alias.value, value) - - def _transform(self, sub_tree): - operator, nodes = list(sub_tree.items())[0] - if operator in self.complex_operators: - return self._handle_complex_op(operator, nodes) - else: - return self._handle_simple_op(operator, nodes) - - def apply_filter(self, expression_tree): - condition = self._transform(expression_tree) - self.query = self.query.filter(condition) - - def apply_options(self, orderby, limit): - self._apply_order_by(orderby) - if limit is not None: - self.query = self.query.limit(limit) - - def _apply_order_by(self, orderby): - if orderby is not None: - for field in orderby: - attr, order = list(field.items())[0] - ordering_function = self.ordering_functions[order] - self.query = self.query.order_by(ordering_function( - getattr(self.table, attr))) - else: - self.query = self.query.order_by(desc(self.table.timestamp)) - - def get_query(self): - return self.query diff --git a/ceilometer/telemetry/__init__.py b/ceilometer/telemetry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/telemetry/notifications.py b/ceilometer/telemetry/notifications.py deleted file mode 100644 index db825ac0..00000000 --- a/ceilometer/telemetry/notifications.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('ceilometer_control_exchange', - default='ceilometer', - help="Exchange name for ceilometer notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class TelemetryBase(plugin_base.NotificationBase): - """Convert telemetry notification into Samples.""" - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target( - topic=topic, exchange=conf.ceilometer_control_exchange) - for topic in self.get_notification_topics(conf)] - - -class TelemetryIpc(TelemetryBase): - """Handle sample from notification bus - - Telemetry samples can be posted via API or polled by Polling agent. - """ - - event_types = ['telemetry.api', 'telemetry.polling'] - - def process_notification(self, message): - samples = message['payload']['samples'] - for sample_dict in samples: - yield sample.Sample( - name=sample_dict['counter_name'], - type=sample_dict['counter_type'], - unit=sample_dict['counter_unit'], - volume=sample_dict['counter_volume'], - user_id=sample_dict['user_id'], - project_id=sample_dict['project_id'], - resource_id=sample_dict['resource_id'], - timestamp=sample_dict['timestamp'], - resource_metadata=sample_dict['resource_metadata'], - source=sample_dict['source'], - id=sample_dict['message_id']) diff --git a/ceilometer/tests/base.py b/ceilometer/tests/base.py index ca6b0071..2156e003 100644 --- a/ceilometer/tests/base.py +++ b/ceilometer/tests/base.py @@ -16,33 +16,16 @@ import functools import os.path -import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base -from oslotest import mockpatch import six from testtools import testcase import webtest import ceilometer -from ceilometer import messaging class BaseTestCase(base.BaseTestCase): - def setup_messaging(self, conf, exchange=None): - self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) - conf.set_override("notification_driver", "messaging") - if not exchange: - exchange = 'ceilometer' - conf.set_override("control_exchange", exchange) - - # NOTE(sileht): Ensure a new oslo.messaging driver is loaded - # between each tests - self.transport = messaging.get_transport("fake://", cache=False) - self.useFixture(mockpatch.Patch( - 'ceilometer.messaging.get_transport', - return_value=self.transport)) - def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. diff --git a/ceilometer/tests/db.py b/ceilometer/tests/db.py index d8c74f78..38542489 100644 --- a/ceilometer/tests/db.py +++ b/ceilometer/tests/db.py @@ -48,10 +48,8 @@ class MongoDbManager(fixtures.Fixture): action='ignore', message='.*you must provide a username and password.*') try: - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) except storage.StorageBadVersion as e: raise testcase.TestSkipped(six.text_type(e)) @@ -77,10 +75,7 @@ class SQLManager(fixtures.Fixture): def setUp(self): super(SQLManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') - self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.event_connection = storage.get_connection(self.url) class PgSQLManager(SQLManager): @@ -103,10 +98,8 @@ class ElasticSearchManager(fixtures.Fixture): def setUp(self): super(ElasticSearchManager, self).setUp() - self.connection = storage.get_connection( - 'sqlite://', 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) # prefix each test with unique index name self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex # force index on write so data is queryable right away @@ -119,10 +112,8 @@ class HBaseManager(fixtures.Fixture): def setUp(self): super(HBaseManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = str(uuid.uuid4().hex) @@ -159,10 +150,8 @@ class SQLiteManager(fixtures.Fixture): def setUp(self): super(SQLiteManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) @six.add_metaclass(test_base.SkipNotImplementedMeta) @@ -206,35 +195,19 @@ class TestBase(test_base.BaseTestCase): self.useFixture(self.db_manager) - self.conn = self.db_manager.connection - self.conn.upgrade() - self.event_conn = self.db_manager.event_connection self.event_conn.upgrade() self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection', side_effect=self._get_connection)) - # Set a default location for the pipeline config file so the - # tests work even if ceilometer is not installed globally on - # the system. - self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline') - self.CONF.set_override( - 'pipeline_cfg_file', - self.path_get('etc/ceilometer/pipeline.yaml') - ) - def tearDown(self): self.event_conn.clear() self.event_conn = None - self.conn.clear() - self.conn = None super(TestBase, self).tearDown() - def _get_connection(self, url, namespace): - if namespace == "ceilometer.event.storage": - return self.event_conn - return self.conn + def _get_connection(self, url): + return self.event_conn def run_with(*drivers): diff --git a/ceilometer/tests/functional/api/__init__.py b/ceilometer/tests/functional/api/__init__.py index 6dde5fae..aa111e54 100644 --- a/ceilometer/tests/functional/api/__init__.py +++ b/ceilometer/tests/functional/api/__init__.py @@ -41,7 +41,6 @@ class FunctionalTest(db_test_base.TestBase): def setUp(self): super(FunctionalTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) opts.set_defaults(self.CONF) self.CONF.set_override("auth_version", "v2.0", @@ -50,9 +49,6 @@ class FunctionalTest(db_test_base.TestBase): self.path_get('etc/ceilometer/policy.json'), group='oslo_policy') - self.CONF.set_override('gnocchi_is_enabled', False, group='api') - self.CONF.set_override('aodh_is_enabled', False, group='api') - self.app = self._make_app() def _make_app(self, enable_acl=False): diff --git a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py b/ceilometer/tests/functional/api/v2/test_acl_scenarios.py index f30e090d..4c76c73f 100644 --- a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py +++ b/ceilometer/tests/functional/api/v2/test_acl_scenarios.py @@ -25,8 +25,6 @@ import webtest from ceilometer.api import app from ceilometer.event.storage import models as ev_model -from ceilometer.publisher import utils -from ceilometer import sample from ceilometer.tests.functional.api import v2 VALID_TOKEN = uuid.uuid4().hex @@ -57,35 +55,6 @@ class TestAPIACL(v2.FunctionalTest): user_id='user_id1', is_v2=True) - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-good', - 'project-good', - 'resource-good', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample'}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-fred', - 'project-good', - 'resource-56', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4'}, - source='test_source')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - def get_json(self, path, expect_errors=False, headers=None, q=None, **params): return super(TestAPIACL, self).get_json(path, @@ -99,90 +68,6 @@ class TestAPIACL(v2.FunctionalTest): self.CONF.set_override("api_paste_config", file_name) return webtest.TestApp(app.load_app()) - def test_non_authenticated(self): - response = self.get_json('/meters', expect_errors=True) - self.assertEqual(401, response.status_int) - - def test_authenticated_wrong_role(self): - response = self.get_json('/meters', - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - self.assertEqual(401, response.status_int) - - # FIXME(dhellmann): This test is not properly looking at the tenant - # info. We do not correctly detect the improper tenant. That's - # really something the keystone middleware would have to do using - # the incoming token, which we aren't providing. - # - # def test_authenticated_wrong_tenant(self): - # response = self.get_json('/meters', - # expect_errors=True, - # headers={ - # "X-Roles": "admin", - # "X-Tenant-Name": "achoo", - # "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", - # }) - # self.assertEqual(401, response.status_int) - - def test_authenticated(self): - data = self.get_json('/meters', - headers={"X-Auth-Token": VALID_TOKEN, - "X-Roles": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin_missing_project_query(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }]) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_non_admin_wrong_project(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-wrong', - }]) - self.assertEqual(401, data.status_int) - - def test_non_admin_two_projects(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }, - {'field': 'project_id', - 'value': 'project-naughty', - }]) - self.assertEqual(401, data.status_int) - class TestAPIEventACL(TestAPIACL): diff --git a/ceilometer/tests/functional/api/v2/test_api_upgrade.py b/ceilometer/tests/functional/api/v2/test_api_upgrade.py deleted file mode 100644 index 7e4b427f..00000000 --- a/ceilometer/tests/functional/api/v2/test_api_upgrade.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import fileutils -from oslotest import mockpatch -import six - -from ceilometer.tests.functional.api import v2 - - -class TestAPIUpgradePath(v2.FunctionalTest): - def _make_app(self): - content = ('{"default": ""}') - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='policy', - suffix='.json') - self.CONF.set_override("policy_file", self.tempfile, - group='oslo_policy') - return super(TestAPIUpgradePath, self)._make_app() - - def _setup_osloconfig_options(self): - self.CONF.set_override('gnocchi_is_enabled', True, group='api') - self.CONF.set_override('aodh_is_enabled', True, group='api') - self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', - group='api') - - def _setup_keystone_mock(self): - self.CONF.set_override('gnocchi_is_enabled', None, group='api') - self.CONF.set_override('aodh_is_enabled', None, group='api') - self.CONF.set_override('aodh_url', None, group='api') - self.CONF.set_override('meter_dispatchers', ['database']) - self.ks = mock.Mock() - self.catalog = (self.ks.session.auth.get_access. - return_value.service_catalog) - self.catalog.url_for.side_effect = self._url_for - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', return_value=self.ks)) - - @staticmethod - def _url_for(service_type=None): - if service_type == 'metric': - return 'http://gnocchi/' - elif service_type == 'alarming': - return 'http://alarm-endpoint:8008/' - - def _do_test_gnocchi_enabled_without_database_backend(self): - self.CONF.set_override('meter_dispatchers', 'gnocchi') - for endpoint in ['meters', 'samples', 'resources']: - response = self.app.get(self.PATH_PREFIX + '/' + endpoint, - status=410) - self.assertIn(b'Gnocchi API', response.body) - - headers_events = {"X-Roles": "admin", - "X-User-Id": "user1", - "X-Project-Id": "project1"} - for endpoint in ['events', 'event_types']: - self.app.get(self.PATH_PREFIX + '/' + endpoint, - headers=headers_events, - status=200) - - response = self.post_json('/query/samples', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=410) - self.assertIn(b'Gnocchi API', response.body) - sample_params = { - "counter_type": "gauge", - "counter_name": "fake_counter", - "resource_id": "fake_resource_id", - "counter_unit": "fake_unit", - "counter_volume": "1" - } - self.post_json('/meters/fake_counter', - params=[sample_params], - status=201) - response = self.post_json('/meters/fake_counter?direct=1', - params=[sample_params], - status=400) - self.assertIn(b'direct option cannot be true when Gnocchi is enabled', - response.body) - - def _do_test_alarm_redirect(self): - response = self.app.get(self.PATH_PREFIX + '/alarms', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms", - response.headers['Location']) - - response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.post_json('/query/alarms', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=307) - self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", - response.headers['Location']) - - def test_gnocchi_enabled_without_database_backend_keystone(self): - self._setup_keystone_mock() - self._do_test_gnocchi_enabled_without_database_backend() - self.catalog.url_for.assert_has_calls([ - mock.call(service_type="alarming"), - mock.call(service_type="metric")], - any_order=True) - - def test_gnocchi_enabled_without_database_backend_configoptions(self): - self._setup_osloconfig_options() - self._do_test_gnocchi_enabled_without_database_backend() - - def test_alarm_redirect_keystone(self): - self._setup_keystone_mock() - self._do_test_alarm_redirect() - self.assertEqual([mock.call(service_type="alarming")], - self.catalog.url_for.mock_calls) - - def test_alarm_redirect_configoptions(self): - self._setup_osloconfig_options() - self._do_test_alarm_redirect() diff --git a/ceilometer/tests/functional/api/v2/test_app.py b/ceilometer/tests/functional/api/v2/test_app.py index 9aef1612..55943005 100644 --- a/ceilometer/tests/functional/api/v2/test_app.py +++ b/ceilometer/tests/functional/api/v2/test_app.py @@ -18,14 +18,6 @@ from ceilometer.tests.functional.api import v2 -class TestPecanApp(v2.FunctionalTest): - - def test_pecan_extension_guessing_unset(self): - # check Pecan does not assume .jpg is an extension - response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') - self.assertEqual('application/json', response.content_type) - - class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' diff --git a/ceilometer/tests/functional/api/v2/test_capabilities.py b/ceilometer/tests/functional/api/v2/test_capabilities.py index f3c880ef..774eda52 100644 --- a/ceilometer/tests/functional/api/v2/test_capabilities.py +++ b/ceilometer/tests/functional/api/v2/test_capabilities.py @@ -29,4 +29,4 @@ class TestCapabilitiesController(tests_api.FunctionalTest): self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) - self.assertIn('storage', data) + self.assertIn('event_storage', data) diff --git a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py b/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py deleted file mode 100644 index b9646ab5..00000000 --- a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py +++ /dev/null @@ -1,314 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests complex queries for samples -""" - -import datetime - -from oslo_utils import timeutils - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 as tests_api - - -admin_header = {"X-Roles": "admin", - "X-Project-Id": - "project-id1"} -non_admin_header = {"X-Roles": "Member", - "X-Project-Id": - "project-id1"} - - -class TestQueryMetersController(tests_api.FunctionalTest): - def setUp(self): - super(TestQueryMetersController, self).setUp() - self.url = '/query/samples' - - for cnt in [ - sample.Sample('meter.test', - 'cumulative', - '', - 1, - 'user-id1', - 'project-id1', - 'resource-id1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server1', - 'tag': 'self.sample', - 'size': 456, - 'util': 0.25, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 2, - 'user-id2', - 'project-id2', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server2', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 3, - 'user-id3', - 'project-id3', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server3', - 'tag': 'self.sample', - 'size': 789, - 'util': 0.95, - 'is_public': True}, - source='test_source')]: - - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - def test_query_fields_are_optional(self): - data = self.post_json(self.url, params={}) - self.assertEqual(3, len(data.json)) - - def test_query_with_isotime(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - - data = self.post_json(self.url, - params={"filter": - '{">=": {"timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - result_time = timeutils.parse_isotime(sample_item['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time >= date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.url, - params={}, - headers=non_admin_header) - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id1"}}'}, - headers=non_admin_header) - - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.url, - params={}, - headers=admin_header) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2", "project-id3"])) - - def test_admin_tenant_sees_every_project_with_complex_filter(self): - filter = ('{"OR": ' + - '[{"=": {"project_id": "project-id1"}}, ' + - '{"=": {"project_id": "project-id2"}}]}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_sees_every_project_with_in_filter(self): - filter = ('{"In": ' + - '{"project_id": ["project-id1", "project-id2"]}}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_can_query_any_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - headers=admin_header) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DESC"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_field_name_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project": "project-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_field_name_resource(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"resource": "resource-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['resource_id'], set(["resource-id2"])) - - def test_query_with_wrong_field_name(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"unknown": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"is not valid under any of the given schemas", - data.body) - - def test_query_with_wrong_json(self): - data = self.post_json(self.url, - params={"filter": - '{"=": "resource": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Filter expression not valid", data.body) - - def test_query_with_field_name_user(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"user": "user-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['user_id'], set(["user-id2"])) - - def test_query_with_field_name_meter(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"meter": "meter.test"}}'}) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['meter'], set(["meter.test"])) - - def test_query_with_lower_and_upper_case_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DeSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_user_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"user": "aSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["user-id1", "user-id2", "user-id3"], - [s["user_id"] for s in data.json]) - - def test_query_with_volume_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"volume": "deSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual([3, 2, 1], - [s["volume"] for s in data.json]) - - def test_query_with_missing_order_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": ""}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) - - def test_query_with_wrong_json_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '{"project_id": "desc"}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Order-by expression not valid: Extra data", data.body) - - def test_filter_with_metadata(self): - data = self.post_json(self.url, - params={"filter": - '{">=": {"metadata.util": 0.5}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) - - def test_filter_with_negation(self): - filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' - data = self.post_json(self.url, - params={"filter": filter_expr}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) - - def test_limit_must_be_positive(self): - data = self.post_json(self.url, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit must be positive", data.body) - - def test_default_limit(self): - self.CONF.set_override('default_api_return_limit', 1, group='api') - data = self.post_json(self.url, params={}) - self.assertEqual(1, len(data.json)) diff --git a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py b/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py deleted file mode 100644 index fb633035..00000000 --- a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py +++ /dev/null @@ -1,193 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import datetime - -import mock -from oslo_utils import timeutils - -from ceilometer.storage import models -from ceilometer.tests.functional.api import v2 - - -class TestComputeDurationByResource(v2.FunctionalTest): - - def setUp(self): - super(TestComputeDurationByResource, self).setUp() - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def _patch_get_interval(self, start, end): - def get_interval(sample_filter, period, groupby, aggregate): - self.assertIsNotNone(sample_filter.start_timestamp) - self.assertIsNotNone(sample_filter.end_timestamp) - if (sample_filter.start_timestamp > end or - sample_filter.end_timestamp < start): - return [] - duration_start = max(sample_filter.start_timestamp, start) - duration_end = min(sample_filter.end_timestamp, end) - duration = timeutils.delta_seconds(duration_start, duration_end) - return [ - models.Statistics( - unit='', - min=0, - max=0, - avg=0, - sum=0, - count=0, - period=None, - period_start=None, - period_end=None, - duration=duration, - duration_start=duration_start, - duration_end=duration_end, - groupby=None, - ) - ] - return mock.patch.object(type(self.conn), 'get_meter_statistics', - side_effect=get_interval) - - def _invoke_api(self): - return self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.start.isoformat()}, - {'field': 'timestamp', - 'op': 'le', - 'value': self.end.isoformat()}, - {'field': 'search_offset', - 'value': 10}]) - - def test_before_range(self): - with self._patch_get_interval(self.early1, self.early2): - data = self._invoke_api() - self.assertEqual([], data) - - def _assert_times_match(self, actual, expected): - if actual: - actual = timeutils.parse_isotime(actual) - actual = actual.replace(tzinfo=None) - self.assertEqual(expected, actual) - - def test_overlap_range_start(self): - with self._patch_get_interval(self.early1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.start) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(8 * 60 * 60, data[0]['duration']) - - def test_within_range(self): - with self._patch_get_interval(self.middle1, self.middle2): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle2) - self.assertEqual(10 * 60 * 60, data[0]['duration']) - - def test_within_range_zero_duration(self): - with self._patch_get_interval(self.middle1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(0, data[0]['duration']) - - def test_overlap_range_end(self): - with self._patch_get_interval(self.middle2, self.late1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle2) - self._assert_times_match(data[0]['duration_end'], self.end) - self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) - - def test_after_range(self): - with self._patch_get_interval(self.late1, self.late2): - data = self._invoke_api() - self.assertEqual([], data) - - def test_without_end_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.late1, - duration_end=self.late2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.late1.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.late1) - self._assert_times_match(data[0]['duration_end'], self.late2) - - def test_without_start_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.early1, - duration_end=self.early2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'le', - 'value': self.early2.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.early1) - self._assert_times_match(data[0]['duration_end'], self.early2) diff --git a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py deleted file mode 100644 index 6ed3bdd9..00000000 --- a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py +++ /dev/null @@ -1,797 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing meters. -""" - -import base64 -import datetime - -from oslo_serialization import jsonutils -import six -import webtest.app - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 - - -class TestListEmptyMeters(v2.FunctionalTest): - - def test_empty(self): - data = self.get_json('/meters') - self.assertEqual([], data) - - -class TestValidateUserInput(v2.FunctionalTest): - - def test_list_meters_query_float_metadata(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '0.7.5', - 'type': 'float'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': 'abacaba', - 'type': 'boolean'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '45.765', - 'type': 'integer'}]) - - -class TestListMetersRestriction(v2.FunctionalTest): - - def setUp(self): - super(TestListMetersRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 3, group='api') - for x in range(5): - for i in range(5): - s = sample.Sample( - 'volume.size%s' % x, - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_meter_limit(self): - data = self.get_json('/meters?limit=1') - self.assertEqual(1, len(data)) - - def test_meter_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters?limit=-2') - - def test_meter_limit_bigger(self): - data = self.get_json('/meters?limit=42') - self.assertEqual(5, len(data)) - - def test_meter_default_limit(self): - data = self.get_json('/meters') - self.assertEqual(3, len(data)) - - def test_old_sample_limit(self): - data = self.get_json('/meters/volume.size0?limit=1') - self.assertEqual(1, len(data)) - - def test_old_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters/volume.size0?limit=-2') - - def test_old_sample_limit_bigger(self): - data = self.get_json('/meters/volume.size0?limit=42') - self.assertEqual(5, len(data)) - - def test_old_sample_default_limit(self): - data = self.get_json('/meters/volume.size0') - self.assertEqual(3, len(data)) - - def test_sample_limit(self): - data = self.get_json('/samples?limit=1') - self.assertEqual(1, len(data)) - - def test_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/samples?limit=-2') - - def test_sample_limit_bigger(self): - data = self.get_json('/samples?limit=42') - self.assertEqual(25, len(data)) - - def test_sample_default_limit(self): - data = self.get_json('/samples') - self.assertEqual(3, len(data)) - - -class TestListMeters(v2.FunctionalTest): - - def setUp(self): - super(TestListMeters, self).setUp() - self.messages = [] - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 3, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 11, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - 'size': 0, - 'util': 0.47, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id', - 'project-id', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - 'size': 456, - 'util': 0.64, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test.new', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4', - 'properties': { - 'prop_1': 'prop_value', - 'prop_2': {'sub_prop_1': - 'sub_prop_value'}, - 'prop.3': {'$sub_prop.2': - 'sub_prop_value2'} - }, - 'size': 0, - 'util': 0.58, - 'is_public': True}, - source='test_source1'), - sample.Sample( - u'meter.accent\xe9\u0437', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2014, 7, 2, 10, 43), - resource_metadata={}, - source='test_source1')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.messages.append(msg) - self.conn.record_metering_data(msg) - - def test_list_meters(self): - data = self.get_json('/meters') - self.assertEqual(6, len(data)) - self.assertEqual(set(['resource-id', - 'resource-id2', - 'resource-id3', - 'resource-id4']), - set(r['resource_id'] for r in data)) - self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', - u'meter.accent\xe9\u0437']), - set(r['name'] for r in data)) - self.assertEqual(set(['test_source', 'test_source1']), - set(r['source'] for r in data)) - - def test_list_unique_meters(self): - data = self.get_json('/meters?unique=True') - self.assertEqual(4, len(data)) - self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', - u'meter.accent\xe9\u0437']), - set(r['name'] for r in data)) - - def test_meters_query_with_timestamp(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - resp = self.get_json('/meters', - q=[{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - expect_errors=True) - self.assertEqual(400, resp.status_code) - self.assertEqual('Unknown argument: "timestamp": ' - 'not valid for this resource', - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_list_samples(self): - data = self.get_json('/samples') - self.assertEqual(7, len(data)) - - def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): - resp = self.get_json('/samples', - q=[{'field': 'non_valid_field_name', - 'op': 'gt', - 'value': 3}], - expect_errors=True) - resp_string = jsonutils.loads(resp.body) - fault_string = resp_string['error_message']['faultstring'] - msg = ('Unknown argument: "non_valid_field_name"' - ': unrecognized field in query: ' - '[= res['first_sample_timestamp']) - self.assertIn('last_sample_timestamp', res) - self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) - - def test_instance_no_metadata(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata=None, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], timestamp, timestamp) - - def test_instances(self): - timestamps = { - 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), - 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), - } - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamps['resource-id'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=timestamps['resource-id-alternate'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources') - self.assertEqual(2, len(data)) - for res in data: - timestamp = timestamps.get(res['resource_id']) - self._verify_resource_timestamps(res, timestamp, timestamp) - - def test_instance_multiple_samples(self): - timestamps = [ - datetime.datetime(2012, 7, 2, 10, 41), - datetime.datetime(2012, 7, 2, 10, 42), - datetime.datetime(2012, 7, 2, 10, 40), - ] - for timestamp in timestamps: - datapoint = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample-%s' % timestamp, - }, - source='test', - ) - msg = utils.meter_message_from_counter( - datapoint, - self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], - timestamps[-1], timestamps[1]) - - def test_instances_one(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources/resource-id') - self.assertEqual('resource-id', data['resource_id']) - - def test_with_source(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'source', - 'value': 'test_list_resources', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - sources = [r['source'] for r in data] - self.assertEqual(['test_list_resources'], sources) - - def test_resource_id_with_slash(self): - s = sample.Sample( - 'storage.containers.objects', - 'gauge', - '', - 1, - '19fbed01c21f4912901057021b9e7111', - '45acc90399134206b3b41f3d3a0a06d6', - '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance', - timestamp=datetime.datetime(2012, 7, 2, 10, 40).isoformat(), - resource_metadata={}, - source='test_show_special_resource', - ) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - msg['timestamp'] = datetime.datetime(2012, 7, 2, 10, 40) - self.conn.record_metering_data(msg) - - rid_encoded = '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb%252Fglance' - resp = self.get_json('/resources/%s' % rid_encoded) - self.assertEqual("19fbed01c21f4912901057021b9e7111", resp["user_id"]) - self.assertEqual('29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance', - resp["resource_id"]) - - def test_with_invalid_resource_id(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test_list_resources', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - resp1 = self.get_json('/resources/resource-id-1') - self.assertEqual("resource-id-1", resp1["resource_id"]) - - resp2 = self.get_json('/resources/resource-id-2') - self.assertEqual("resource-id-2", resp2["resource_id"]) - - resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) - self.assertEqual(404, resp3.status_code) - json_data = resp3.body - if six.PY3: - json_data = json_data.decode('utf-8') - self.assertEqual("Resource resource-id-3 Not Found", - json.loads(json_data)['error_message'] - ['faultstring']) - - def test_with_user(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'user_id', - 'value': 'user-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_project(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'project_id', - 'value': 'project-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_user_non_admin(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-id2"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-id-alternate']), ids) - - def test_with_user_wrong_tenant(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-wrong"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(), ids) - - def test_metadata(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key.$1': {'$key': 'val'}}, - 'not_ignored_list': ['returned'], - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - metadata = data[0]['metadata'] - self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), - (u'display_name', u'test-server'), - (u'not_ignored_list', u"['returned']"), - (u'tag', u'self.sample')], - list(sorted(six.iteritems(metadata)))) - - def test_resource_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - links = data[0]['links'] - self.assertEqual(2, len(links)) - self.assertEqual('self', links[0]['rel']) - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - self.assertEqual('instance', links[1]['rel']) - self.assertTrue((self.PATH_PREFIX + '/meters/instance?' - 'q.field=resource_id&q.value=resource-id') - in links[1]['href']) - - def test_resource_skip_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources?meter_links=0') - links = data[0]['links'] - self.assertEqual(len(links), 1) - self.assertEqual(links[0]['rel'], 'self') - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - - -class TestListResourcesRestriction(v2.FunctionalTest): - def setUp(self): - super(TestListResourcesRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 10, group='api') - for i in range(20): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id%s' % i, - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_resource_limit(self): - data = self.get_json('/resources?limit=1') - self.assertEqual(1, len(data)) - - def test_resource_limit_negative(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/resources?limit=-2') - - def test_resource_limit_bigger(self): - data = self.get_json('/resources?limit=42') - self.assertEqual(20, len(data)) - - def test_resource_default_limit(self): - data = self.get_json('/resources') - self.assertEqual(10, len(data)) diff --git a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py deleted file mode 100644 index 7134a8ca..00000000 --- a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw samples. -""" - -import datetime - -import mock -from oslo_utils import timeutils -import six - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 - - -class TestListSamples(v2.FunctionalTest): - - def setUp(self): - super(TestListSamples, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42) - self.sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key': 'value'}, - 'not_ignored_list': ['returned'], - }, - source='test_source', - ) - msg = utils.meter_message_from_counter( - self.sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - self.sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='source2', - ) - msg2 = utils.meter_message_from_counter( - self.sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - def test_all(self): - data = self.get_json('/meters/instance') - self.assertEqual(2, len(data)) - for s in data: - self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at']) - - def test_all_trailing_slash(self): - data = self.get_json('/meters/instance/') - self.assertEqual(2, len(data)) - - def test_empty_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'no-such-project', - }]) - self.assertEqual([], data) - - def test_by_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(1, len(data)) - - def test_empty_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'no-such-resource', - }]) - self.assertEqual([], data) - - def test_by_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(1, len(data)) - - def test_empty_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'no-such-source', - }]) - self.assertEqual(0, len(data)) - - def test_by_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'test_source', - }]) - self.assertEqual(1, len(data)) - - def test_empty_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'no-such-user', - }]) - self.assertEqual([], data) - - def test_by_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'user-id', - }]) - self.assertEqual(1, len(data)) - - def test_metadata(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - sample = data[0] - self.assertIn('resource_metadata', sample) - self.assertEqual( - [('dict_properties.key', 'value'), - ('display_name', 'test-server'), - ('not_ignored_list', "['returned']"), - ('tag', 'self.sample'), - ], - list(sorted(six.iteritems(sample['resource_metadata'])))) diff --git a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py deleted file mode 100644 index 033f2925..00000000 --- a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py +++ /dev/null @@ -1,367 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import copy -import datetime -import os - -import mock -from oslo_utils import fileutils -from oslo_utils import timeutils -from oslotest import mockpatch -import six - -from ceilometer.tests.functional.api import v2 - - -class TestPostSamples(v2.FunctionalTest): - def fake_notifier_sample(self, ctxt, event_type, payload): - samples = payload['samples'] - for m in samples: - del m['message_signature'] - self.published.append(samples) - - def _make_app(self, enable_acl=False): - content = ('{"context_is_project": "project_id:%(project_id)s",' - '"default" : "!",' - '"telemetry:create_samples": ""}') - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='policy', - suffix='.json') - self.CONF.set_override("policy_file", self.tempfile, - group='oslo_policy') - return super(TestPostSamples, self)._make_app() - - def tearDown(self): - os.remove(self.tempfile) - super(TestPostSamples, self).tearDown() - - def setUp(self): - self.published = [] - notifier = mock.Mock() - notifier.sample.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=notifier)) - super(TestPostSamples, self).setUp() - - def test_one(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - self.assertEqual(s1, data.json) - self.assertEqual(s1[0], self.published[0][0]) - - def test_nested_metadata(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'nest.name1': 'value1', - 'name2': 'value2', - 'nest.name2': 'value3'}}] - - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - unwound = copy.copy(s1[0]) - unwound['resource_metadata'] = {'nest': {'name1': 'value1', - 'name2': 'value3'}, - 'name2': 'value2'} - # only the published sample should be unwound, not the representation - # in the API response - self.assertEqual(s1[0], data.json[0]) - self.assertEqual(unwound, self.published[0][0]) - - def test_invalid_counter_type(self): - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'INVALID_TYPE', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_messsage_id_provided(self): - """Do not accept sample with message_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'message_id': 'evil', - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_wrong_project_id(self): - """Do not accept cross posting samples to different projects.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "lu-tenant", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_multiple_samples(self): - """Send multiple samples. - - The usecase here is to reduce the chatter and send the counters - at a slower cadence. - """ - samples = [] - for x in range(6): - dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) - s = {'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': float(x * 3), - 'source': 'evil', - 'timestamp': dt.isoformat(), - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': str(x), - 'name2': str(x + 4)}} - samples.append(s) - - data = self.post_json('/meters/apples/', samples) - - for x, s in enumerate(samples): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - - # remove tzinfo to compare generated timestamp - # with the provided one - c = data.json[x] - timestamp = timeutils.parse_isotime(c['timestamp']) - c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - # do the same on the pipeline - msg = self.published[0][x] - timestamp = timeutils.parse_isotime(msg['timestamp']) - msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - self.assertEqual(s, c) - self.assertEqual(s, self.published[0][x]) - - def test_missing_mandatory_fields(self): - """Do not accept posting samples with missing mandatory fields.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - # one by one try posting without a mandatory field. - for m in ['counter_volume', 'counter_unit', 'counter_type', - 'resource_id', 'counter_name']: - s_broke = copy.copy(s1) - del s_broke[0][m] - print('posting without %s' % m) - data = self.post_json('/meters/my_counter_name', s_broke, - expect_errors=True) - self.assertEqual(400, data.status_int) - - def test_multiple_project_id_and_admin(self): - """Allow admin is allowed to set multiple project_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'source': 'closedstack', - 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - headers={"X-Roles": "admin"}) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - 'closedstack') - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_multiple_samples_multiple_sources(self): - """Test posting with special conditions. - - Do accept a single post with some multiples sources with some of them - null. - """ - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'paperstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 5, - 'source': 'waterstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % ( - s['project_id'], - s.get('source', self.CONF.sample_source) - ) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_missing_project_user_id(self): - """Ensure missing project & user IDs are defaulted appropriately.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - project_id = 'bc23a9d531064583ace8f67dad60f6bb' - user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff' - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - 'X-Roles': 'chief-bottle-washer', - 'X-Project-Id': project_id, - 'X-User-Id': user_id, - }) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (project_id, - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s['user_id'] = user_id - s['project_id'] = project_id - - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) diff --git a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py b/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py deleted file mode 100644 index cafa1c80..00000000 --- a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py +++ /dev/null @@ -1,1693 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test events statistics retrieval.""" - -import datetime - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.api import v2 - - -class TestMaxProjectVolume(v2.FunctionalTest): - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestMaxResourceVolume(v2.FunctionalTest): - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=3600) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(3600, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_period_with_negative_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=-1) - self.assertEqual(400, resp.status_code) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_period_with_large_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'user_id', - 'value': 'user-id'}], - period=10000000000000) - self.assertEqual(400, resp.status_code) - self.assertIn(b"Invalid period", resp.body) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestSumProjectVolume(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - expected = 5 + 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - expected = 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestSumResourceVolume(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(5 + 6 + 7, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=1800) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(1800, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(6 + 7, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T10:15:00'}], - period=7200) - self.assertEqual(2, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(7200, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:15:00', - u'2012-09-25T12:15:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'lt', - 'value': '2012-09-25T11:32:00', - }]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestGroupByInstance(v2.FunctionalTest): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupByInstance, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - data = self.get_json(self.PATH, groupby=['user_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_resource(self): - data = self.get_json(self.PATH, groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_project(self): - data = self.get_json(self.PATH, groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_unknown_field(self): - response = self.get_json(self.PATH, - expect_errors=True, - groupby=['wtf']) - self.assertEqual(400, response.status_code) - - def test_group_by_multiple_regular(self): - data = self.get_json(self.PATH, groupby=['user_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1', - 'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-2'}) - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-2', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-1'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-2'}) - - def test_group_by_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_with_query_filter_multiple(self): - data = self.get_json(self.PATH, - q=[{'field': 'user_id', - 'op': 'eq', - 'value': 'user-2'}, - {'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'project_id': 'project-2', - 'resource_id': 'resource-1'}) - - def test_group_by_with_period(self): - data = self.get_json(self.PATH, - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T11:22:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - - def test_group_by_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:11:00']) - - def test_group_by_start_timestamp_after(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T17:28:01'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_end_timestamp_before(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T10:10:59'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_start_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:58:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T11:45:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - - def test_group_by_start_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T08:17:03'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T23:59:59'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_start_end_timestamp_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T11:01:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T20:00:00'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_start_end_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T17:00:00'}], - groupby=['project_id'], - period=3600) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T14:00:00', - u'2013-08-01T15:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T15:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T17:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T15:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T15:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T16:00:00']) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T10:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T18:00:00'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:00:00', - u'2013-08-01T14:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:00:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - - -@tests_db.run_with('mongodb', 'hbase') -class TestGroupBySource(v2.FunctionalTest): - - # FIXME(terriyu): We have to put test_group_by_source in its own class - # because SQLAlchemy currently doesn't support group by source statistics. - # When group by source is supported in SQLAlchemy, this test should be - # moved to TestGroupByInstance with all the other group by statistics - # tests. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupBySource, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def tearDown(self): - self.conn.clear() - super(TestGroupBySource, self).tearDown() - - def test_group_by_source(self): - data = self.get_json(self.PATH, groupby=['source']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'source': 'source-1'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - -class TestSelectableAggregates(v2.FunctionalTest): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestSelectableAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def _do_test_per_tenant_selectable_standard_aggregate(self, - aggregate, - expected_values): - agg_args = {'aggregate.func': aggregate} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertAlmostEqual(r[aggregate], expected) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], expected) - for a in standard_aggregates - set([aggregate]): - self.assertNotIn(a, r) - - def test_per_tenant_selectable_max(self): - self._do_test_per_tenant_selectable_standard_aggregate('max', - [5, 4, 9]) - - def test_per_tenant_selectable_min(self): - self._do_test_per_tenant_selectable_standard_aggregate('min', - [2, 1, 9]) - - def test_per_tenant_selectable_sum(self): - self._do_test_per_tenant_selectable_standard_aggregate('sum', - [9, 9, 9]) - - def test_per_tenant_selectable_avg(self): - self._do_test_per_tenant_selectable_standard_aggregate('avg', - [3, 2.25, 9]) - - def test_per_tenant_selectable_count(self): - self._do_test_per_tenant_selectable_standard_aggregate('count', - [3, 4, 1]) - - def test_per_tenant_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'cardinality/resource_id' - expected_values = [2.0, 3.0, 1.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_large_quantum_selectable_parameterized_aggregate(self): - # add a large number of datapoints that won't impact on cardinality - # if the computation logic is tolerant of different DB behavior on - # larger numbers of samples per-period - for i in range(200): - s = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=i * 1.0, - user_id='user-1', - project_id='project-1', - resource_id='resource-1', - timestamp=datetime.datetime(2013, 8, 1, 11, i % 60), - resource_metadata={'flavor': 'm1.tiny', - 'event': 'event-1', }, - source='source', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, **agg_args) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_repeated_unparameterized_aggregate(self): - agg_params = 'aggregate.func=count&aggregate.func=count' - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'count' - expected_value = 8.0 - standard_aggregates = set(['min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn(aggregate, r) - self.assertEqual(expected_value, r[aggregate]) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_fully_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=resource_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_partially_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=project_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - expected_values = {'cardinality/resource_id': 5.0, - 'cardinality/project_id': 3.0} - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - for aggregate in expected_values.keys(): - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_values[aggregate], - r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_bad_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'injection_attack'} - resp = self.get_json(self.PATH, status=[400], - groupby=['project_id'], **agg_args) - self.assertIn('error_message', resp) - self.assertEqual(resp['error_message'].get('faultcode'), - 'Client') - self.assertEqual(resp['error_message'].get('faultstring'), - 'Bad aggregate: cardinality.injection_attack') - - -@tests_db.run_with('mongodb', 'hbase') -class TestUnparameterizedAggregates(v2.FunctionalTest): - - # We put the stddev test case in a separate class so that we - # can easily exclude the sqlalchemy scenario, as sqlite doesn't - # support the stddev_pop function and fails ungracefully with - # OperationalError when it is used. However we still want to - # test the corresponding functionality in the mongo driver. - # For hbase, the skip on NotImplementedError logic works - # in the usual way. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestUnparameterizedAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_per_tenant_selectable_unparameterized_aggregate(self): - agg_args = {'aggregate.func': 'stddev'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'stddev' - expected_values = [1.4142, 1.0897, 0.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], - expected, - places=4) - for a in standard_aggregates: - self.assertNotIn(a, r) - - -@tests_db.run_with('mongodb') -class TestBigValueStatistics(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestBigValueStatistics, self).setUp() - for i in range(0, 3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - (i + 1) * (10 ** 12), - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_big_value_statistics(self): - data = self.get_json(self.PATH) - - expected_values = {'count': 3, - 'min': 10 ** 12, - 'max': 3 * 10 ** 12, - 'sum': 6 * 10 ** 12, - 'avg': 2 * 10 ** 12} - self.assertEqual(1, len(data)) - for d in data: - for name, expected_value in expected_values.items(): - self.assertIn(name, d) - self.assertEqual(expected_value, d[name]) diff --git a/ceilometer/tests/functional/gabbi/fixtures.py b/ceilometer/tests/functional/gabbi/fixtures.py index a8e81059..44af6adc 100644 --- a/ceilometer/tests/functional/gabbi/fixtures.py +++ b/ceilometer/tests/functional/gabbi/fixtures.py @@ -17,7 +17,6 @@ import datetime import os -import random from unittest import case import uuid @@ -30,8 +29,6 @@ import six from six.moves.urllib import parse as urlparse from ceilometer.event.storage import models -from ceilometer.publisher import utils -from ceilometer import sample from ceilometer import storage # TODO(chdent): For now only MongoDB is supported, because of easy @@ -63,8 +60,6 @@ class ConfigFixture(fixture.GabbiFixture): self.conf([], project='ceilometer', validate_default_values=True) opts.set_defaults(self.conf) conf.import_group('api', 'ceilometer.api.controllers.v2.root') - conf.import_opt('store_events', 'ceilometer.notification', - group='notification') content = ('{"default": ""}') if six.PY3: @@ -81,20 +76,11 @@ class ConfigFixture(fixture.GabbiFixture): 'ceilometer/tests/functional/gabbi/gabbi_paste.ini') ) - # A special pipeline is required to use the direct publisher. - conf.set_override('pipeline_cfg_file', - 'ceilometer/tests/functional/gabbi_pipeline.yaml') - database_name = '%s-%s' % (db_url, str(uuid.uuid4())) conf.set_override('connection', database_name, group='database') - conf.set_override('metering_connection', '', group='database') conf.set_override('event_connection', '', group='database') conf.set_override('pecan_debug', True, group='api') - conf.set_override('gnocchi_is_enabled', False, group='api') - conf.set_override('aodh_is_enabled', False, group='api') - - conf.set_override('store_events', True, group='notification') def stop_fixture(self): """Reset the config and remove data.""" @@ -103,50 +89,13 @@ class ConfigFixture(fixture.GabbiFixture): self.conf.reset() -class SampleDataFixture(fixture.GabbiFixture): - """Instantiate some sample data for use in testing.""" - - def start_fixture(self): - """Create some samples.""" - conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf) - timestamp = datetime.datetime.utcnow() - project_id = str(uuid.uuid4()) - self.source = str(uuid.uuid4()) - resource_metadata = {'farmed_by': 'nancy'} - - for name in ['cow', 'pig', 'sheep']: - resource_metadata.update({'breed': name}), - c = sample.Sample(name='livestock', - type='gauge', - unit='head', - volume=int(10 * random.random()), - user_id='farmerjon', - project_id=project_id, - resource_id=project_id, - timestamp=timestamp, - resource_metadata=resource_metadata, - source=self.source) - data = utils.meter_message_from_counter( - c, conf.publisher.telemetry_secret) - self.conn.record_metering_data(data) - - def stop_fixture(self): - """Destroy the samples.""" - # NOTE(chdent): print here for sake of info during testing. - # This will go away eventually. - print('resource', - self.conn.db.resource.remove({'source': self.source})) - print('meter', self.conn.db.meter.remove({'source': self.source})) - - class EventDataFixture(fixture.GabbiFixture): """Instantiate some sample event data for use in testing.""" def start_fixture(self): """Create some events.""" conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf, 'event') + self.conn = storage.get_connection_from_config(conf) events = [] name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] for ix, name in enumerate(name_list): diff --git a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml b/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml deleted file mode 100644 index 35250176..00000000 --- a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# A limited pipeline for use with the Gabbi spike. -# direct writes to the metering database without using an -# intermediary dispatcher. -# -# This is one of several things that will need some extensive -# tidying to be more right. ---- -sources: - - name: meter_source - interval: 1 - meters: - - "*" - sinks: - - meter_sink -sinks: - - name: meter_sink - transformers: - publishers: - - direct:// diff --git a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml b/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml index 5b9c9164..1a44c8d5 100644 --- a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml +++ b/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml @@ -11,4 +11,3 @@ tests: url: /v2/capabilities response_json_paths: $.event_storage.['storage:production_ready']: true - $.storage.['storage:production_ready']: true diff --git a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml deleted file mode 100644 index 0d5927a8..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Post a simple sample, sir, and the retrieve it in various ways. -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - -# When POSTing a sample perhaps we should get back a location header -# with the URI of the posted sample - - - name: post a sample expect location - desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 - user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - resource_metadata: - name2: value2 - name1: value1 - counter_type: gauge - response_headers: - location: /$SCHEME://$NETLOC/ - -# GET all the samples created for the apples meter - - - name: get samples for meter - desc: get all the samples at that meter - url: /v2/meters/apples - response_json_paths: - $.[0].counter_name: apples - $.[0].counter_volume: 1 - $.[0].resource_metadata.name2: value2 - -# POSTing a sample to a meter will implicitly create a resource - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - -# NOTE(chdent): We assume that the first item in links is self. -# Need to determine how to express the more correct JSONPath here -# (if possible). - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - -# GET the created samples - - - name: get samples - desc: get all the created samples - url: /v2/samples - response_json_paths: - $.[0].metadata.name2: value2 - $.[0].meter: apples - - - name: get one sample - desc: get the one sample that exists - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.metadata.name2: value2 - $.meter: apples diff --git a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml deleted file mode 100644 index 94369703..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# -# Demonstrate a simple sample fixture. -# -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: -- name: get fixture samples - desc: get all the samples at livestock - url: /v2/meters/livestock - response_json_paths: - $.[0].counter_name: livestock - $.[1].counter_name: livestock - $.[2].counter_name: livestock - $.[2].user_id: farmerjon - $.[0].resource_metadata.breed: cow - $.[1].resource_metadata.farmed_by: nancy diff --git a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml b/ceilometer/tests/functional/gabbi/gabbits/meters.yaml deleted file mode 100644 index 65bb45a5..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml +++ /dev/null @@ -1,401 +0,0 @@ -# -# Tests to explore and cover the /v2/meters section of the -# Ceilometer API. -# - -fixtures: - - ConfigFixture - -tests: - -# Generic HTTP health explorations of all meters. - - - name: empty meters list - url: /v2/meters - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: meters list bad accept - url: /v2/meters - request_headers: - accept: text/plain - status: 406 - - - name: meters list bad method - url: /v2/meters - method: POST - status: 405 - response_headers: - allow: GET - - - name: try to delete meters - url: /v2/meters - method: DELETE - status: 405 - response_headers: - allow: GET - -# Generic HTTP health explorations of single meter. - - - name: get non exist meter - url: /v2/meters/noexist - response_strings: - - "[]" - - - name: meter bad accept - url: /v2/meters/noexist?direct=True - request_headers: - accept: text/plain - status: 406 - - - name: meter delete noexist - url: /v2/meters/noexist - method: DELETE - status: "404 || 405" - - - name: post meter no data - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - - - name: post meter error is JSON - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: "Samples should be included in request body" - - - name: post meter bad content-type - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: text/plain - data: hello - status: 415 - - - name: post bad samples to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - samples: - - red - - blue - - yellow - status: 400 - -# POST variations on a malformed sample - - - name: post limited counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - - name: post mismatched counter name to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: cars - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - "should be apples" - - - name: post counter no resource to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - status: 400 - response_strings: - - "Invalid input for field/attribute resource_id" - - "Mandatory field missing." - - - name: post counter bad type to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: elevation - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_type." - - "The counter type must be: gauge, delta, cumulative" - -# Manipulate samples - - - name: post counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 201 - - - name: list apple samples - url: /v2/meters/apples - response_json_paths: - $[0].counter_volume: 1.0 - $[0].counter_name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - - - name: list meters - url: /v2/meters - response_json_paths: - $[0].name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - $[0].type: gauge - $[-1].name: apples - - - name: negative limit on meter list - url: /v2/meters/apples?limit=-5 - status: 400 - response_strings: - - Limit must be positive - - - name: nan limit on meter list - url: /v2/meters/apples?limit=NaN - status: 400 - response_strings: - - unable to convert to int - - - name: post counter to meter different resource - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 2 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - - - name: query for resource - url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq - response_json_paths: - $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - -# Explore posting samples with less than perfect data. - - - name: post counter with bad timestamp - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-bad 23:23:20" - status: 400 - response_strings: - - 'Invalid input for field/attribute samples' - - - name: post counter with good timestamp - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - - - name: post counter with wrong metadata - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: "a string" - status: 400 - response_strings: - - "Invalid input for field/attribute samples" - - - name: post counter with empty metadata - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: {} - -# Statistics - - - name: get sample statistics - url: /v2/meters/apples/statistics - response_json_paths: - $[0].groupby: null - $[0].unit: instance - $[0].sum: 9.0 - $[0].min: 1.0 - $[0].max: 3.0 - $[0].count: 4 - - - name: get incorrectly grouped sample statistics - url: /v2/meters/apples/statistics?groupby=house_id - status: 400 - response_strings: - - Invalid groupby fields - - - name: get grouped sample statistics - url: /v2/meters/apples/statistics?groupby=resource_id - response_json_paths: - $[1].max: 3.0 - $[0].max: 1.0 - - - name: get sample statistics bad period - url: /v2/meters/apples/statistics?period=seven - status: 400 - response_strings: - - unable to convert to int - - - name: get sample statistics negative period - url: /v2/meters/apples/statistics?period=-7 - status: 400 - response_strings: - - Period must be positive. - - - name: get sample statistics 600 period - url: /v2/meters/apples/statistics?period=600 - response_json_paths: - $[0].period: 600 - - - name: get sample statistics time limit not time - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember - status: 400 - response_strings: - - invalid timestamp format - - - name: get sample statistics time limit gt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit lt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit bounded - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_strings: - - "[]" - - - name: get sample statistics select aggregate bad format - url: /v2/meters/apples/statistics?aggregate=max - status: 400 - - - name: get sample statistics select aggregate - url: /v2/meters/apples/statistics?aggregate.func=max - response_json_paths: - $[0].aggregate.max: 3.0 - - - name: get sample statistics select aggregate multiple - url: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=count - response_json_paths: - $[0].aggregate.max: 3.0 - $[0].aggregate.count: 4 - - - name: get sample statistics select aggregate bad function - url: /v2/meters/apples/statistics?aggregate.func=mmm - status: 400 - response_strings: - - 'Invalid aggregation function: mmm' - - - name: get sample statistics select aggregate good function and bad function - url: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=mmm - status: 400 - response_strings: - - 'Invalid aggregation function: mmm' - -# limit meters results - - - name: get meters ulimited - url: /v2/meters - response_json_paths: - $.`len`: 2 - - - name: get meters limited - url: /v2/meters?limit=1 - response_json_paths: - $.`len`: 1 - - - name: get meters double limit - url: /v2/meters?limit=1&limit=1 - status: 400 - - - name: get meters filter limit - desc: expressing limit this way is now disallowed - url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' - - - name: get meters filter limit and limit - url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml deleted file mode 100644 index 44d407ce..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are no -# resources. -# - -fixtures: - - ConfigFixture - -tests: - -# Check for a list of resources, modifying the request in various -# ways. - - - name: list resources no extra - desc: Provide no additional header guidelines - url: /v2/resources - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: list resources but get url wrong - url: /v2/resrces - status: 404 - - - name: list resources explicit accept - url: /v2/resources - request_headers: - accept: application/json - response_strings: - - "[]" - - - name: list resources bad accept - url: /v2/resources - request_headers: - accept: text/plain - status: 406 - - - name: list resources with bad query field - url: /v2/resources?q.field=id&q.value=cars - status: 400 - response_strings: - - unrecognized field in query - - - name: list resources with query - url: /v2/resources?q.field=resource&q.value=cars - response_strings: - - "[]" - - - name: list resource bad type meter links - url: /v2/resources?meter_links=yes%20please - status: 400 - response_strings: - - unable to convert to int - - - name: list resource meter links int - url: /v2/resources?meter_links=0 - response_strings: - - "[]" diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml deleted file mode 100644 index cf138c27..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] - - - name: list resources limit user_id - url: /v2/resources?q.field=user_id&q.value=farmerjon - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata - url: /v2/resources?q.field=metadata.breed&q.value=sheep - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata no match - url: /v2/resources?q.field=metadata.breed&q.value=llamma - response_strings: - - "[]" - - - name: fail to get one resource - url: /v2/resources/nosirnothere - status: 404 - - - name: list resource meter links present - url: /v2/resources?meter_links=1 - response_json_paths: - $[0].links[0].rel: self - $[0].links[1].rel: livestock - $[0].links[-1].rel: livestock - - - name: list resource meter links not present - url: /v2/resources?meter_links=0 - desc: there is only one links entry when meter_links is 0 - response_json_paths: - $[0].links[0].rel: self - $[0].links[-1].rel: self - -# limit resource results - - - name: get resources ulimited - url: /v2/resources - response_json_paths: - $.`len`: 1 - - - name: get resources limited - url: /v2/resources?limit=1 - response_json_paths: - $.`len`: 1 - - - name: get resources double limit - url: /v2/resources?limit=1&limit=1 - status: 400 - - - name: get resources filter limit - desc: expressing limit this way is now disallowed - url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' - - - name: get resources filter limit and limit - url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' diff --git a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/samples.yaml deleted file mode 100644 index be568c32..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# -# Explore and test the samples controller, using samples supplied by -# the SampleDataFixture. -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - -# Confirm all the samples are there and expected requests behave. -# TODO(chdent): There's a danger here that the ordering of multiple -# samples will not be consistent. - - - name: lists samples - url: /v2/samples - response_headers: - content-type: /application/json/ - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[1].metadata.breed: pig - $[2].metadata.breed: sheep - - - name: get just one - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.meter: livestock - $.metadata.breed: cow - - - name: list samples with limit - url: /v2/samples?limit=1 - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: list zero samples with zero limit - url: /v2/samples?limit=0 - status: 400 - - - name: list samples with query - url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: query by user - url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by user_id - url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by project - url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - - - name: query by project_id - url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - -# Explore failure modes for listing samples - - - name: list samples with bad field - url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq - status: 400 - response_strings: - - timestamp - - project - - unrecognized field in query - - - name: list samples with bad metaquery field - url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq - status: 400 - response_strings: - - unrecognized field in query - - - name: bad limit value - url: /v2/samples?limit=happiness - status: 400 - response_strings: - - Invalid input for field/attribute limit - - - name: negative limit value 400 - url: /v2/samples?limit=-99 - status: 400 - - - name: negative limit value error message - url: /v2/samples?limit=-99 - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Limit must be positive - - - name: bad accept - desc: try an unexpected content type - url: /v2/samples - request_headers: - accept: text/plain - status: 406 - - - name: complex good accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/json; q=0.8 - - - name: complex bad accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/binary; q=0.8 - status: 406 - - - name: bad method - url: /v2/samples - method: POST - status: 405 - response_headers: - allow: GET - -# Work with just one sample. - - - name: list one of the samples - url: /v2/samples?limit=1 - - - name: retrieve one sample - url: /v2/samples/$RESPONSE['$[0].id'] - response_headers: - content-type: /application/json/ - response_json_paths: - $.meter: livestock - - - name: retrieve sample with useless query - url: /v2/samples/$RESPONSE['$.id']?limit=5 - status: 400 - response_strings: - - "Unknown argument:" - - - name: attempt missing sample - url: /v2/samples/davesnothere - status: 404 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Sample davesnothere Not Found diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml deleted file mode 100644 index 61f7c816..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Post a simple sample and confirm the created resource has -# reasonable URLs -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - response_strings: - - /telemetry/ diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml deleted file mode 100644 index ce4811de..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - response_strings: - - /telemetry/ - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] diff --git a/ceilometer/tests/functional/publisher/test_direct.py b/ceilometer/tests/functional/publisher/test_direct.py deleted file mode 100644 index bc800e8e..00000000 --- a/ceilometer/tests/functional/publisher/test_direct.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Copyright 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/direct.py -""" - -import datetime -import uuid - -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import direct -from ceilometer import sample -from ceilometer.tests import db as tests_db - - -class TestDirectPublisher(tests_db.TestBase): - - resource_id = str(uuid.uuid4()) - - test_data = [ - sample.Sample( - name='alpha', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='beta', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='gamma', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.now().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_direct_publisher(self): - """Test samples are saved.""" - self.CONF.set_override('connection', self.db_manager.url, - group='database') - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_samples(self.test_data) - - meters = list(self.conn.get_meters(resource=self.resource_id)) - names = sorted([meter.name for meter in meters]) - - self.assertEqual(3, len(meters), 'There should be 3 samples') - self.assertEqual(['alpha', 'beta', 'gamma'], names) - - -class TestEventDirectPublisher(tests_db.TestBase): - test_data = [event.Event(message_id=str(uuid.uuid4()), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5)] - - def test_direct_publisher(self): - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_events(self.test_data) - - e_types = list(self.event_conn.get_event_types()) - self.assertEqual(5, len(e_types)) - self.assertEqual(['event_%d' % i for i in range(0, 5)], - sorted(e_types)) diff --git a/ceilometer/tests/functional/storage/test_impl_hbase.py b/ceilometer/tests/functional/storage/test_impl_hbase.py deleted file mode 100644 index 16ec441a..00000000 --- a/ceilometer/tests/functional/storage/test_impl_hbase.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# Copyright 2012, 2013 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_hbase.py - -.. note:: - In order to run the tests against real HBase server set the environment - variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before - running the tests. Make sure the Thrift server is running on that server. - -""" -import mock - - -try: - import happybase # noqa -except ImportError: - import testtools.testcase - raise testtools.testcase.TestSkipped("happybase is needed") - -from ceilometer.event.storage import impl_hbase as hbase_event -from ceilometer.storage import impl_hbase as hbase -from ceilometer.tests import base as test_base -from ceilometer.tests import db as tests_db - - -class ConnectionTest(tests_db.TestBase): - - @tests_db.run_with('hbase') - def test_hbase_connection(self): - - class TestConn(object): - def __init__(self, host, port): - self.netloc = '%s:%s' % (host, port) - - def open(self): - pass - - def get_connection_pool(conf): - return TestConn(conf['host'], conf['port']) - - with mock.patch.object(hbase.Connection, '_get_connection_pool', - side_effect=get_connection_pool): - conn = hbase.Connection('hbase://test_hbase:9090') - self.assertIsInstance(conn.conn_pool, TestConn) - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'statistics': {'groupby': False, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - actual_capabilities = hbase.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - - actual_capabilities = hbase_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = hbase.Connection.get_storage_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) diff --git a/ceilometer/tests/functional/storage/test_impl_log.py b/ceilometer/tests/functional/storage/test_impl_log.py deleted file mode 100644 index 2637e034..00000000 --- a/ceilometer/tests/functional/storage/test_impl_log.py +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_log.py -""" -from oslotest import base - -from ceilometer.storage import impl_log - - -class ConnectionTest(base.BaseTestCase): - @staticmethod - def test_get_connection(): - conn = impl_log.Connection(None) - conn.record_metering_data({'counter_name': 'test', - 'resource_id': __name__, - 'counter_volume': 1, - }) diff --git a/ceilometer/tests/functional/storage/test_impl_mongodb.py b/ceilometer/tests/functional/storage/test_impl_mongodb.py index 87076809..3dd3ce74 100644 --- a/ceilometer/tests/functional/storage/test_impl_mongodb.py +++ b/ceilometer/tests/functional/storage/test_impl_mongodb.py @@ -21,24 +21,11 @@ """ -from ceilometer.event.storage import impl_mongodb as impl_mongodb_event -from ceilometer.storage import impl_mongodb +from ceilometer.event.storage import impl_mongodb from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db -@tests_db.run_with('mongodb') -class MongoDBConnection(tests_db.TestBase): - def test_connection_pooling(self): - test_conn = impl_mongodb.Connection(self.db_manager.url) - self.assertEqual(self.conn.conn, test_conn.conn) - - def test_replica_set(self): - url = self.db_manager._url + '?replicaSet=foobar' - conn = impl_mongodb.Connection(url) - self.assertTrue(conn.conn) - - @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): @@ -56,10 +43,6 @@ class IndexTest(tests_db.TestBase): coll.index_information() [index_name]['expireAfterSeconds']) - def test_meter_ttl_index_absent(self): - self._test_ttl_index_absent(self.conn, 'meter', - 'metering_time_to_live') - def test_event_ttl_index_absent(self): self._test_ttl_index_absent(self.event_conn, 'event', 'event_time_to_live') @@ -77,10 +60,6 @@ class IndexTest(tests_db.TestBase): conn.upgrade() self.assertNotIn(index_name, coll.index_information()) - def test_meter_ttl_index_present(self): - self._test_ttl_index_present(self.conn, 'meter', - 'metering_time_to_live') - def test_event_ttl_index_present(self): self._test_ttl_index_present(self.event_conn, 'event', 'event_time_to_live') @@ -90,44 +69,9 @@ class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_mongodb.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } - actual_capabilities = impl_mongodb_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_mongodb.Connection. - get_storage_capabilities()) + actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) diff --git a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py b/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py index 5854ba12..877b08b2 100644 --- a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py +++ b/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py @@ -19,23 +19,14 @@ """ import datetime -import warnings -import mock -from oslo_db import exception -from oslo_utils import timeutils from six.moves import reprlib from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event from ceilometer.event.storage import models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.storage import impl_sqlalchemy from ceilometer.storage.sqlalchemy import models as sql_models from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.storage \ - import test_storage_scenarios as scenarios @tests_db.run_with('sqlite', 'mysql', 'pgsql') @@ -47,17 +38,6 @@ class CeilometerBaseTest(tests_db.TestBase): self.assertEqual('value', base['key']) -@tests_db.run_with('sqlite') -class EngineFacadeTest(tests_db.TestBase): - - @mock.patch.object(warnings, 'warn') - def test_no_not_supported_warning(self, mocked): - impl_sqlalchemy.Connection('sqlite://') - impl_sqla_event.Connection('sqlite://') - self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning), - mocked.call_args_list) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTypeTest(tests_db.TestBase): # EventType is a construct specific to sqlalchemy @@ -118,108 +98,13 @@ class EventTest(tests_db.TestBase): self.assertTrue(reprlib.repr(ev)) -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class RelationshipTest(scenarios.DBTestBase): - # Note: Do not derive from SQLAlchemyEngineTestBase, since we - # don't want to automatically inherit all the Meter setup. - - @mock.patch.object(timeutils, 'utcnow') - def test_clear_metering_data_meta_tables(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - - session = self.conn._engine_facade.get_session() - self.assertEqual(5, session.query(sql_models.Sample).count()) - - resource_ids = (session.query(sql_models.Resource.internal_id) - .group_by(sql_models.Resource.internal_id)) - meta_tables = [sql_models.MetaText, sql_models.MetaFloat, - sql_models.MetaBigInt, sql_models.MetaBool] - s = set() - for table in meta_tables: - self.assertEqual(0, (session.query(table) - .filter(~table.id.in_(resource_ids)).count() - )) - s.update(session.query(table.id).all()) - self.assertEqual(set(resource_ids.all()), s) - - class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_sqla_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_sqlalchemy. - Connection.get_storage_capabilities()) - self.assertEqual(expected_capabilities, actual_capabilities) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class FilterQueryTestForMeters(scenarios.DBTestBase): - def prepare_data(self): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - user_id=None, - project_id=None, - resource_id='fake_id', - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(msg) - - def test_get_meters_by_user(self): - meters = list(self.conn.get_meters(user='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_project(self): - meters = list(self.conn.get_meters(project='None')) - self.assertEqual(1, len(meters)) diff --git a/ceilometer/tests/functional/storage/test_pymongo_base.py b/ceilometer/tests/functional/storage/test_pymongo_base.py deleted file mode 100644 index 6dadffad..00000000 --- a/ceilometer/tests/functional/storage/test_pymongo_base.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests the mongodb functionality -""" - -import copy -import datetime - -import mock - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.storage import test_storage_scenarios - - -@tests_db.run_with('mongodb') -class CompatibilityTest(test_storage_scenarios.DBTestBase): - - def prepare_data(self): - def old_record_metering_data(self, data): - received_timestamp = datetime.datetime.utcnow() - self.db.resource.update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - # Current metadata being used and when it was - # last updated. - 'timestamp': data['timestamp'], - 'received_timestamp': received_timestamp, - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - }, - }, - }, - upsert=True, - ) - - record = copy.copy(data) - self.db.meter.insert(record) - - # Stubout with the old version DB schema, the one w/o 'counter_unit' - with mock.patch.object(self.conn, 'record_metering_data', - side_effect=old_record_metering_data): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(self.conn, msg) - - def test_counter_unit(self): - meters = list(self.conn.get_meters()) - self.assertEqual(1, len(meters)) - - -@tests_db.run_with('mongodb') -class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase): - def prepare_data(self): - def old_record_metering_data(self, data): - received_timestamp = datetime.datetime.utcnow() - self.db.resource.update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - # Current metadata being used and when it was - # last updated. - 'timestamp': data['timestamp'], - 'received_timestamp': received_timestamp, - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - }, - }, - }, - upsert=True, - ) - - record = copy.copy(data) - self.db.meter.insert(record) - - # Stubout with the old version DB schema, the one w/o 'counter_unit' - with mock.patch.object(self.conn, 'record_metering_data', - side_effect=old_record_metering_data): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - None, - None, - None, - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(self.conn, msg) - - def test_get_meters_by_user(self): - meters = list(self.conn.get_meters(user='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_resource(self): - meters = list(self.conn.get_meters(resource='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_project(self): - meters = list(self.conn.get_meters(project='None')) - self.assertEqual(1, len(meters)) diff --git a/ceilometer/tests/functional/storage/test_storage_scenarios.py b/ceilometer/tests/functional/storage/test_storage_scenarios.py index 2f78b100..bedf4837 100644 --- a/ceilometer/tests/functional/storage/test_storage_scenarios.py +++ b/ceilometer/tests/functional/storage/test_storage_scenarios.py @@ -19,2622 +19,13 @@ import datetime import operator import mock -from oslo_config import cfg -from oslo_db import api -from oslo_db import exception as dbexc from oslo_utils import timeutils -import pymongo -import ceilometer from ceilometer.event import storage as event_storage from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import storage from ceilometer.tests import db as tests_db -class DBTestBase(tests_db.TestBase): - @staticmethod - def create_side_effect(method, exception_type, test_exception): - def side_effect(*args, **kwargs): - if test_exception.pop(): - raise exception_type - else: - return method(*args, **kwargs) - return side_effect - - def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), - metadata=None, - name='instance', - sample_type=sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user-id', - project_id='project-id', - resource_id='resource-id', source=None): - metadata = metadata or {'display_name': 'test-server', - 'tag': 'self.counter'} - s = sample.Sample( - name, sample_type, unit=unit, volume=volume, user_id=user_id, - project_id=project_id, resource_id=resource_id, - timestamp=timestamp, - resource_metadata=metadata, source=source - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - self.conn.record_metering_data(msg) - return msg - - def setUp(self): - super(DBTestBase, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) - self.prepare_data() - - def prepare_data(self): - original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), - (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), - (2012, 7, 2, 10, 43)] - - timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), - (2011, 5, 30, 18, 3), - (2012, 12, 1, 1, 25), - (2012, 2, 29, 6, 59), - (2013, 5, 31, 23, 7)] - timestamp_list = (original_timestamps + - timestamps_for_test_samples_default_order) - - self.msgs = [] - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[0]), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[1]), - resource_id='resource-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, - source='test-2') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[2]), - resource_id='resource-id-alternate', - user_id='user-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, - source='test-3') - ) - - start_idx = 3 - end_idx = len(timestamp_list) - - for i, ts in zip(range(start_idx - 1, end_idx - 1), - timestamp_list[start_idx:end_idx]): - self.msgs.append( - self.create_and_store_sample( - timestamp=datetime.datetime(*ts), - user_id='user-id-%s' % i, - project_id='project-id-%s' % i, - resource_id='resource-id-%s' % i, - metadata={ - 'display_name': 'test-server', - 'tag': 'counter-%s' % i - }, - source='test') - ) - - -class ResourceTest(DBTestBase): - def prepare_data(self): - super(ResourceTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_resources(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) - msgs_sources = [msg['source'] for msg in self.msgs] - resources = list(self.conn.get_resources()) - self.assertEqual(10, len(resources)) - for resource in resources: - if resource.resource_id != 'resource-id': - continue - self.assertEqual(expected_first_sample_timestamp, - resource.first_sample_timestamp) - self.assertEqual(expected_last_sample_timestamp, - resource.last_sample_timestamp) - self.assertEqual('resource-id', resource.resource_id) - self.assertEqual('project-id', resource.project_id) - self.assertIn(resource.source, msgs_sources) - self.assertEqual('user-id', resource.user_id) - self.assertEqual('test-server', resource.metadata['display_name']) - break - else: - self.fail('Never found resource-id') - - def test_get_resources_start_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', - 'resource-id-6', 'resource-id-8']) - - resources = list(self.conn.get_resources(start_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='ge')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='gt')) - resource_ids = [r.resource_id for r in resources] - expected.remove('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_end_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id', 'resource-id-alternate', - 'resource-id-5', 'resource-id-7', - 'resource-id-mongo_bad_key']) - - resources = list(self.conn.get_resources(end_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - expected.add('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_both_timestamps(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(0, len(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-3']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2', 'resource-id-3']), - set(resource_ids)) - - def test_get_resources_by_source(self): - resources = list(self.conn.get_resources(source='test-1')) - self.assertEqual(1, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id']), ids) - - def test_get_resources_by_user(self): - resources = list(self.conn.get_resources(user='user-id')) - self.assertTrue(len(resources) == 2 or len(resources) == 1) - ids = set(r.resource_id for r in resources) - # tolerate storage driver only reporting latest owner of resource - resources_ever_owned_by = set(['resource-id', - 'resource-id-alternate']) - resources_now_owned_by = set(['resource-id']) - self.assertTrue(ids == resources_ever_owned_by or - ids == resources_now_owned_by, - 'unexpected resources: %s' % ids) - - def test_get_resources_by_alternate_user(self): - resources = list(self.conn.get_resources(user='user-id-alternate')) - self.assertEqual(1, len(resources)) - # only a single resource owned by this user ever - self.assertEqual('resource-id-alternate', resources[0].resource_id) - - def test_get_resources_by_project(self): - resources = list(self.conn.get_resources(project='project-id')) - self.assertEqual(2, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) - - def test_get_resources_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(9, len(resources)) - - def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.$name_2': 'test-server2', - 'metadata.display.name.name.$1': 'test-server1'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(1, len(resources)) - - def test_get_resources_by_empty_metaquery(self): - resources = list(self.conn.get_resources(metaquery={})) - self.assertEqual(10, len(resources)) - - def test_get_resources_most_recent_metadata_all(self): - resources = self.conn.get_resources() - expected_tags = ['self.counter', 'self.counter3', 'counter-2', - 'counter-3', 'counter-4', 'counter-5', 'counter-6', - 'counter-7', 'counter-8'] - - for resource in resources: - self.assertIn(resource.metadata['tag'], expected_tags) - - def test_get_resources_most_recent_metadata_single(self): - resource = list( - self.conn.get_resources(resource='resource-id-alternate') - )[0] - expected_tag = 'self.counter3' - self.assertEqual(expected_tag, resource.metadata['tag']) - - -class ResourceTestOrdering(DBTestBase): - def prepare_data(self): - sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)]), - ('resource-id-2', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 47)]), - ('resource-id-3', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 50), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)])] - - counter = 0 - for resource, timestamps in sample_timings: - for timestamp in timestamps: - self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp), - resource_id=resource, - user_id=str(counter % 2), - project_id=str(counter % 3), - metadata={ - 'display_name': 'test-server', - 'tag': 'sample-%s' % counter - }, - source='test' - ) - counter += 1 - - def test_get_resources_ordering_all(self): - resources = list(self.conn.get_resources()) - expected = set([ - ('resource-id-1', 'sample-3'), - ('resource-id-2', 'sample-8'), - ('resource-id-3', 'sample-12') - ]) - received = set([(r.resource_id, r.metadata['tag']) for r in resources]) - self.assertEqual(expected, received) - - def test_get_resources_ordering_single(self): - resource = list(self.conn.get_resources(resource='resource-id-2'))[0] - self.assertEqual('resource-id-2', resource.resource_id) - self.assertEqual('sample-8', resource.metadata['tag']) - - -class MeterTest(DBTestBase): - def test_get_meters(self): - msgs_sources = [msg['source'] for msg in self.msgs] - results = list(self.conn.get_meters()) - self.assertEqual(9, len(results)) - for meter in results: - self.assertIn(meter.source, msgs_sources) - - def test_get_meters_by_user(self): - results = list(self.conn.get_meters(user='user-id')) - self.assertEqual(1, len(results)) - - def test_get_meters_by_project(self): - results = list(self.conn.get_meters(project='project-id')) - self.assertEqual(2, len(results)) - - def test_get_meters_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - results = list(self.conn.get_meters(metaquery=q)) - self.assertIsNotEmpty(results) - self.assertEqual(9, len(results)) - - def test_get_meters_by_empty_metaquery(self): - results = list(self.conn.get_meters(metaquery={})) - self.assertEqual(9, len(results)) - - -class RawSampleTest(DBTestBase): - - def prepare_data(self): - super(RawSampleTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_sample_counter_volume(self): - # NOTE(idegtiarov) Because wsme expected a float type of data this test - # checks type of counter_volume received from database. - f = storage.SampleFilter() - result = next(self.conn.get_samples(f, limit=1)) - self.assertIsInstance(result.counter_volume, float) - - def test_get_samples_limit_zero(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=0)) - self.assertEqual(0, len(results)) - - def test_get_samples_limit(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=3)) - self.assertEqual(3, len(results)) - for result in results: - self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) - - def test_get_samples_in_default_order(self): - f = storage.SampleFilter() - prev_timestamp = None - for sample_item in self.conn.get_samples(f): - if prev_timestamp is not None: - self.assertTrue(prev_timestamp >= sample_item.timestamp) - prev_timestamp = sample_item.timestamp - - def test_get_samples_by_user(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(3, len(results)) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:3]) - - def test_get_samples_by_user_limit(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=1)) - self.assertEqual(1, len(results)) - - def test_get_samples_by_user_limit_bigger(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=42)) - self.assertEqual(3, len(results)) - - def test_get_samples_by_project(self): - f = storage.SampleFilter(project='project-id') - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:4]) - - def test_get_samples_by_resource(self): - f = storage.SampleFilter(user='user-id', resource='resource-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - d = results[1].as_dict() - self.assertEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertEqual(self.msgs[0], d) - - def test_get_samples_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.name.$1': 'test-server1', - 'metadata.display.name.$name_2': 'test-server2'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - self.assertEqual(1, len(results)) - - def test_get_samples_by_start_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 41) - f = storage.SampleFilter( - user='user-id', - start_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'ge' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'gt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - def test_get_samples_by_end_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - f = storage.SampleFilter( - user='user-id', - end_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), - results[1].timestamp) - - def test_get_samples_by_both_times(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - f = storage.SampleFilter( - start_timestamp=start_ts, - end_timestamp=end_ts, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - self.assertEqual(start_ts, results[1].timestamp) - - def test_get_samples_by_name(self): - f = storage.SampleFilter(user='user-id', meter='no-such-meter') - results = list(self.conn.get_samples(f)) - self.assertIsEmpty(results) - - def test_get_samples_by_name2(self): - f = storage.SampleFilter(user='user-id', meter='instance') - results = list(self.conn.get_samples(f)) - self.assertIsNotEmpty(results) - - def test_get_samples_by_source(self): - f = storage.SampleFilter(source='test-1') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_clear_metering_data(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(5, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(5, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_clear_metering_data_no_data_to_remove(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(12, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(10, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_clear_metering_data_expire_samples_only(self): - - cfg.CONF.set_override('sql_expire_samples_only', True) - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(4 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(7, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(6, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_success_on_deadlock(self): - raise_deadlock = [False, True] - self.CONF.set_override('max_retries', 2, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - self.conn.record_metering_data(msg) - self.assertEqual(1, retry_sleep.call_count) - - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(13, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_failure_on_deadlock(self): - raise_deadlock = [True, True, True] - self.CONF.set_override('max_retries', 3, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - try: - self.conn.record_metering_data(msg) - except dbexc.DBError as err: - self.assertIn('DBDeadlock', str(type(err))) - self.assertEqual(3, retry_sleep.call_count) - - -class ComplexSampleQueryTest(DBTestBase): - def setUp(self): - super(ComplexSampleQueryTest, self).setUp() - self.complex_filter = { - "and": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}]}]}]} - or_expression = [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-43"}}, - {"=": {"resource_id": "resource-id-44"}}] - and_expression = [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}] - self.complex_filter_list = {"and": - [{"or": or_expression}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - in_expression = {"in": {"resource_id": ["resource-id-42", - "resource-id-43", - "resource-id-44"]}} - self.complex_filter_in = {"and": - [in_expression, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - - def _create_samples(self): - for resource in range(42, 45): - for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: - metadata = {'a_string_key': "meta-value" + str(volume), - 'a_float_key': volume, - 'an_int_key': resource, - 'a_bool_key': (resource == 43)} - - self.create_and_store_sample(resource_id="resource-id-%s" - % resource, - metadata=metadata, - name="cpu_util", - volume=volume) - - def test_no_filter(self): - results = list(self.conn.query_samples()) - self.assertEqual(len(self.msgs), len(results)) - for sample_item in results: - d = sample_item.as_dict() - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_query_complex_filter_with_regexp(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_id": "resource-id.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - - def test_query_complex_filter_with_regexp_metadata(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.4", - sample_item.resource_metadata['a_string_key']) - - def test_no_filter_with_zero_limit(self): - limit = 0 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_no_filter_with_limit(self): - limit = 3 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_simple_filter(self): - simple_filter = {"=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(1, len(results)) - for sample_item in results: - self.assertEqual("resource-id-8", sample_item.resource_id) - - def test_query_simple_filter_with_not_equal_relation(self): - simple_filter = {"!=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(len(self.msgs) - 1, len(results)) - for sample_item in results: - self.assertNotEqual("resource-id-8", sample_item.resource_id) - - def test_query_complex_filter(self): - self._create_samples() - results = list(self.conn.query_samples(filter_expr=( - self.complex_filter))) - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_limit(self): - self._create_samples() - limit = 3 - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filter_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_list(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_list_with_limit(self): - self._create_samples() - limit = 3 - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_list_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filterwith_list_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-43", - "resource-id-42", "resource-id-44", - "resource-id-43", "resource-id-42", - "resource-id-44", "resource-id-43", - "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_wrong_order_in_orderby(self): - self._create_samples() - - orderby = [{"counter_volume": "not valid order"}, - {"resource_id": "desc"}] - - query = lambda: list(self.conn.query_samples(filter_expr=( - self.complex_filter), - orderby=orderby)) - self.assertRaises(KeyError, query) - - def test_query_complex_filter_with_in(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_in)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_simple_metadata_filter(self): - self._create_samples() - - filter_expr = {"=": {"resource_metadata.a_bool_key": True}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertTrue(sample_item.resource_metadata["a_bool_key"]) - - def test_query_simple_metadata_with_in_op(self): - self._create_samples() - - filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(12, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_metadata["an_int_key"], - [42, 43]) - - def test_query_complex_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(8, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) - - def test_query_mixed_data_and_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - - filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertEqual("resource-id-42", sample_item.resource_id) - - def test_query_non_existing_metadata_with_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.81", - sample_item.resource_metadata["a_string_key"]) - - def test_query_non_existing_metadata_without_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.key_not_exists": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - self.assertEqual(0, len(results)) - - def test_query_negated_metadata(self): - self._create_samples() - - filter_expr = { - "and": [{"=": {"resource_id": "resource-id-42"}}, - {"not": {"or": [{">": {"resource_metadata.an_int_key": - 43}}, - {"<=": {"resource_metadata.a_float_key": - 0.41}}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("resource-id-42", sample_item.resource_id) - self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) - self.assertTrue(sample_item.resource_metadata["a_float_key"] > - 0.41) - - def test_query_negated_complex_expression(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_with_double_negation(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_negate_not_equal(self): - self._create_samples() - filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - - def test_query_negated_in_op(self): - self._create_samples() - filter_expr = { - "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, - {"=": {"resource_id": "resource-id-42"}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.counter_volume, - [0.41, 0.8, 0.81]) - - -class StatisticsTest(DBTestBase): - def prepare_data(self): - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'memory', - 'gauge', - 'MB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={}, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - - def test_by_meter(self): - f = storage.SampleFilter( - meter='memory' - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('MB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - results.period_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), - results.period_end) - - def test_by_user(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - - def test_no_period_in_query(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.period) - - def test_period_is_int(self): - f = storage.SampleFilter( - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertIs(int, type(results.period)) - self.assertEqual(6, results.count) - - def test_by_user_period(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(2, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8.5, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(9, r.max) - self.assertEqual(17, r.sum) - self.assertEqual(7200, r.period) - self.assertIsInstance(r.period, int) - expected_end = r.period_start + datetime.timedelta(seconds=7200) - self.assertEqual(expected_end, r.period_end) - self.assertEqual(3660, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), - r.duration_end) - - def test_by_user_period_with_timezone(self): - dates = [ - '2012-09-25T00:28:00-10:00', - '2012-09-25T01:28:00-09:00', - '2012-09-25T02:28:00-08:00', - '2012-09-25T03:28:00-07:00', - '2012-09-25T04:28:00-06:00', - '2012-09-25T05:28:00-05:00', - '2012-09-25T06:28:00-04:00', - '2012-09-25T07:28:00-03:00', - '2012-09-25T08:28:00-02:00', - '2012-09-25T09:28:00-01:00', - '2012-09-25T10:28:00Z', - '2012-09-25T11:28:00+01:00', - '2012-09-25T12:28:00+02:00', - '2012-09-25T13:28:00+03:00', - '2012-09-25T14:28:00+04:00', - '2012-09-25T15:28:00+05:00', - '2012-09-25T16:28:00+06:00', - '2012-09-25T17:28:00+07:00', - '2012-09-25T18:28:00+08:00', - '2012-09-25T19:28:00+09:00', - '2012-09-25T20:28:00+10:00', - '2012-09-25T21:28:00+11:00', - '2012-09-25T22:28:00+12:00', - ] - for date in dates: - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp=date - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - - def test_by_user_period_start_end(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - end_timestamp='2012-09-25T11:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual(1, len(results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(1, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(8, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(1800, r.period) - self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), - r.period_end) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_end) - - def test_by_project(self): - f = storage.SampleFilter( - meter='volume.size', - resource='resource-id', - start_timestamp='2012-09-25T11:30:00', - end_timestamp='2012-09-25T11:32:00', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.duration) - self.assertEqual(1, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(6, results.min) - self.assertEqual(6, results.max) - self.assertEqual(6, results.sum) - self.assertEqual(6, results.avg) - - def test_one_resource(self): - f = storage.SampleFilter( - user='user-id', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(5, results.min) - self.assertEqual(7, results.max) - self.assertEqual(18, results.sum) - self.assertEqual(6, results.avg) - - def test_with_no_sample(self): - f = storage.SampleFilter( - user='user-not-exists', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual([], results) - - -class StatisticsGroupByTest(DBTestBase): - def prepare_data(self): - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '84'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '83'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '84'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3', 'metadata_instance_type': '83'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], - 'instance_type': - test_sample['metadata_instance_type']}, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_resource(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_project(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_source(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['source'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'source': 'source-1'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_unknown_field(self): - f = storage.SampleFilter( - meter='instance', - ) - # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list - # whereas the SQLAlchemy get_meter_statistics() returns a generator. - # You have to apply list() to the SQLAlchemy generator to get it to - # throw an error. The MongoDB get_meter_statistics() will throw an - # error before list() is called. By using lambda, we can cover both - # MongoDB and SQLAlchemy in a single test. - self.assertRaises( - ceilometer.NotImplementedError, - lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) - ) - - def test_group_by_metadata(self): - # This test checks grouping by a single metadata field - # (now only resource_metadata.instance_type is available). - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_multiple_regular(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['user_id', - 'resource_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-2'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-1'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-2'}, - r.groupby, ) - - def test_group_by_multiple_metadata(self): - # TODO(terriyu): test_group_by_multiple_metadata needs to be - # implemented. - # This test should check grouping by multiple metadata fields. - pass - - def test_group_by_multiple_regular_metadata(self): - # This test checks grouping by a combination of regular and - # metadata fields. - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['user_id', 'resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', - '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_metadata.instance_type': '83'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '84'}, - r.groupby) - - def test_group_by_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_metadata_with_query_filter(self): - # This test checks grouping by a metadata field in combination - # with a query filter. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_with_query_filter_multiple(self): - f = storage.SampleFilter( - meter='instance', - user='user-2', - source='source-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['project_id', 'resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'project_id': 'project-2', - 'resource_id': 'resource-1'}, - r.groupby) - - def test_group_by_metadata_with_query_filter_multiple(self): - # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple - # needs to be implemented. - # This test should check grouping by multiple metadata fields in - # combination with a query filter. - pass - - def test_group_by_with_period(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_period(self): - # This test checks grouping by metadata fields in combination - # with period grouping. - f = storage.SampleFilter( - meter='instance') - - results = list(self.conn.get_meter_statistics(f, period=7200, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - - def test_group_by_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_query_filter_and_period(self): - # This test checks grouping with metadata fields in combination - # with a query filter and period grouping. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list( - self.conn.get_meter_statistics( - f, period=7200, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_start_timestamp_after(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_end_timestamp_before(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_start_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(1, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_start_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), - end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_start_end_timestamp_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), - end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_start_end_timestamp_with_period(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=3600, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 15, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 15, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), - datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - - -class CounterDataTypeTest(DBTestBase): - def prepare_data(self): - c = sample.Sample( - 'dummyBigCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'dummySmallCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=-337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'floatCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=1938495037.53697, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_storage_can_handle_large_values(self): - f = storage.SampleFilter( - meter='dummyBigCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(337203685477580, results[0].counter_volume) - f = storage.SampleFilter( - meter='dummySmallCounter', - ) - results = list(self.conn.get_samples(f)) - observed_num = int(results[0].counter_volume) - self.assertEqual(-337203685477580, observed_num) - - def test_storage_can_handle_float_values(self): - f = storage.SampleFilter( - meter='floatCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(1938495037.53697, results[0].counter_volume) - - class EventTestBase(tests_db.TestBase): """Separate test base class. @@ -3067,134 +458,3 @@ class GetEventTest(EventTestBase): (event_models.Trait.FLOAT_TYPE, 0.0)] for trait in events[0].traits: options.remove((trait.dtype, trait.value)) - - -class BigIntegerTest(tests_db.TestBase): - def test_metadata_bigint(self): - metadata = {'bigint': 99999999999999} - s = sample.Sample(name='name', - type=sample.TYPE_GAUGE, - unit='B', - volume=1, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime.utcnow(), - resource_metadata=metadata) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - -@tests_db.run_with('mongodb') -class MongoAutoReconnectTest(DBTestBase): - def setUp(self): - super(MongoAutoReconnectTest, self).setUp() - self.CONF.set_override('retry_interval', 0, group='database') - - def test_mongo_client(self): - self.assertIsInstance(self.conn.conn.conn, - pymongo.MongoClient) - - def test_mongo_cursor_next(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - raise_exc = [False, True] - method = self.conn.db.resource.find().cursor.next - with mock.patch('pymongo.cursor.Cursor.next', - mock.Mock()) as mock_next: - mock_next.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - resource = self.conn.db.resource.find().next() - self.assertEqual(expected_first_sample_timestamp, - resource['first_sample_timestamp']) - - def test_mongo_insert(self): - raise_exc = [False, True] - method = self.conn.db.meter.insert - - with mock.patch('pymongo.collection.Collection.insert', - mock.Mock(return_value=method)) as mock_insert: - mock_insert.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_insert.__name__ = 'insert' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - meters = list(self.conn.db.meter.find()) - self.assertEqual(12, len(meters)) - - def test_mongo_find_and_modify(self): - raise_exc = [False, True] - method = self.conn.db.resource.find_and_modify - - with mock.patch('pymongo.collection.Collection.find_and_modify', - mock.Mock()) as mock_fam: - mock_fam.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_fam.__name__ = 'find_and_modify' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] - self.assertEqual('test-proxy', data) - - def test_mongo_update(self): - raise_exc = [False, True] - method = self.conn.db.resource.update - - with mock.patch('pymongo.collection.Collection.update', - mock.Mock()) as mock_update: - mock_update.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_update.__name__ = 'update' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 17, 39), - source='test-proxy-update') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] - self.assertEqual('test-proxy-update', data) - - -@tests_db.run_with('mongodb') -class MongoTimeToLiveTest(DBTestBase): - - def test_ensure_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - self.assertEqual(5, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(5, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - def test_modification_of_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - cfg.CONF.set_override('metering_time_to_live', 15, group='database') - self.conn.upgrade() - self.assertEqual(15, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(15, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - -class TestRecordUnicodeSamples(DBTestBase): - def prepare_data(self): - self.msgs = [] - self.msgs.append(self.create_and_store_sample( - name=u'meter.accent\xe9\u0437', - metadata={u"metadata_key\xe9\u0437": "test", - u"metadata_key": u"test\xe9\u0437"}, - )) - - def test_unicode_sample(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - expected = self.msgs[0] - actual = results[0].as_dict() - self.assertEqual(expected['counter_name'], actual['counter_name']) - self.assertEqual(expected['resource_metadata'], - actual['resource_metadata']) diff --git a/ceilometer/tests/functional/test_bin.py b/ceilometer/tests/functional/test_bin.py index 3339922b..c51c50df 100644 --- a/ceilometer/tests/functional/test_bin.py +++ b/ceilometer/tests/functional/test_bin.py @@ -50,8 +50,6 @@ class BinTestCase(base.BaseTestCase): stderr=subprocess.PIPE) __, err = subp.communicate() self.assertEqual(0, subp.poll()) - self.assertIn(b"Nothing to clean, database metering " - b"time to live is disabled", err) self.assertIn(b"Nothing to clean, database event " b"time to live is disabled", err) @@ -78,88 +76,4 @@ class BinTestCase(base.BaseTestCase): self.assertIn(msg, err) def test_run_expirer_ttl_enabled(self): - self._test_run_expirer_ttl_enabled('metering_time_to_live', - 'metering') - self._test_run_expirer_ttl_enabled('time_to_live', 'metering') self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') - - -class BinSendSampleTestCase(base.BaseTestCase): - def setUp(self): - super(BinSendSampleTestCase, self).setUp() - pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) - if six.PY3: - content = content.encode('utf-8') - - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - - def tearDown(self): - super(BinSendSampleTestCase, self).tearDown() - os.remove(self.tempfile) - - def test_send_counter_run(self): - subp = subprocess.Popen(['ceilometer-send-sample', - "--config-file=%s" % self.tempfile, - "--sample-resource=someuuid", - "--sample-name=mycounter"]) - self.assertEqual(0, subp.wait()) - - -class BinCeilometerPollingServiceTestCase(base.BaseTestCase): - def setUp(self): - super(BinCeilometerPollingServiceTestCase, self).setUp() - self.tempfile = None - self.subp = None - - def tearDown(self): - if self.subp: - try: - self.subp.kill() - except OSError: - pass - os.remove(self.tempfile) - super(BinCeilometerPollingServiceTestCase, self).tearDown() - - def test_starting_with_duplication_namespaces(self): - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = subprocess.Popen(['ceilometer-polling', - "--config-file=%s" % self.tempfile, - "--polling-namespaces", - "compute", - "compute"], - stderr=subprocess.PIPE) - out = self.subp.stderr.read(1024) - self.assertIn(b'Duplicated values: [\'compute\', \'compute\'] ' - b'found in CLI options, auto de-duplicated', out) - - def test_polling_namespaces_invalid_value_in_config(self): - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "polling_namespaces = ['central']\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = subprocess.Popen( - ["ceilometer-polling", "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - __, err = self.subp.communicate() - expected = ("Exception: Valid values are ['compute', 'central', " - "'ipmi'], but found [\"['central']\"]") - self.assertIn(expected, err) diff --git a/ceilometer/tests/functional/test_collector.py b/ceilometer/tests/functional/test_collector.py deleted file mode 100644 index 5c09188b..00000000 --- a/ceilometer/tests/functional/test_collector.py +++ /dev/null @@ -1,248 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslo_utils import timeutils -from oslotest import mockpatch -from stevedore import extension - -from ceilometer import collector -from ceilometer import dispatcher -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class FakeException(Exception): - pass - - -class FakeConnection(object): - def create_worker(self, topic, proxy, pool_name): - pass - - -class TestCollector(tests_base.BaseTestCase): - def setUp(self): - super(TestCollector, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.import_opt("connection", "oslo_db.options", group="database") - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override('telemetry_secret', 'not-so-secret', - group='publisher') - self._setup_messaging() - - self.counter = sample.Sample( - name='foobar', - type='bad', - unit='F', - volume=1, - user_id='jd', - project_id='ceilometer', - resource_id='cat', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={}, - ).as_dict() - - self.utf8_msg = utils.meter_message_from_counter( - sample.Sample( - name=u'test', - type=sample.TYPE_CUMULATIVE, - unit=u'', - volume=1, - user_id=u'test', - project_id=u'test', - resource_id=u'test_run_tasks', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={u'name': [([u'TestPublish'])]}, - source=u'testsource', - ), - 'not-so-secret') - - self.srv = collector.CollectorService() - - def _setup_messaging(self, enabled=True): - if enabled: - self.setup_messaging(self.CONF) - else: - self.useFixture(mockpatch.Patch( - 'ceilometer.messaging.get_transport', - return_value=None)) - - def _setup_fake_dispatcher(self): - plugin = mock.MagicMock() - fake_dispatcher = extension.ExtensionManager.make_test_instance([ - extension.Extension('test', None, None, plugin,), - ], propagate_map_exceptions=True) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.load_dispatcher_manager', - return_value=(fake_dispatcher, fake_dispatcher))) - return plugin - - def _make_fake_socket(self, sample): - def recvfrom(size): - # Make the loop stop - self.srv.udp_run = False - return msgpack.dumps(sample), ('127.0.0.1', 12345) - - sock = mock.Mock() - sock.recvfrom = recvfrom - return sock - - def _verify_udp_socket(self, udp_socket): - conf = self.CONF.collector - udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - udp_socket.bind.assert_called_once_with((conf.udp_address, - conf.udp_port)) - - def test_udp_receive_base(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - - with mock.patch('socket.socket') as mock_socket: - mock_socket.return_value = udp_socket - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) - - self._verify_udp_socket(udp_socket) - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.assert_called_once_with(self.counter) - - def test_udp_socket_ipv6(self): - self._setup_messaging(False) - self.CONF.set_override('udp_address', '::1', group='collector') - self._setup_fake_dispatcher() - sock = self._make_fake_socket('data') - - with mock.patch.object(socket, 'socket') as mock_socket: - mock_socket.return_value = sock - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) - - def test_udp_receive_storage_error(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.side_effect = self._raise_error - - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - - self._verify_udp_socket(udp_socket) - - mock_record.assert_called_once_with(self.counter) - - @staticmethod - def _raise_error(*args, **kwargs): - raise Exception - - def test_udp_receive_bad_decoding(self): - self._setup_messaging(False) - self._setup_fake_dispatcher() - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - with mock.patch('msgpack.loads', self._raise_error): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - - self._verify_udp_socket(udp_socket) - - @mock.patch.object(collector.CollectorService, 'start_udp') - def test_only_udp(self, udp_start): - """Check that only UDP is started if messaging transport is unset.""" - self._setup_messaging(False) - self._setup_fake_dispatcher() - udp_socket = self._make_fake_socket(self.counter) - real_start = oslo_messaging.MessageHandlingServer.start - with mock.patch.object(oslo_messaging.MessageHandlingServer, - 'start', side_effect=real_start) as rpc_start: - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - self.assertEqual(0, rpc_start.call_count) - self.assertEqual(1, udp_start.call_count) - - def test_udp_receive_valid_encoding(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.data_sent = [] - with mock.patch('socket.socket', - return_value=self._make_fake_socket(self.utf8_msg)): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - self.assertTrue(utils.verify_signature( - mock_dispatcher.method_calls[0][1][0], - "not-so-secret")) - - def _test_collector_requeue(self, listener, batch_listener=False): - - mock_dispatcher = self._setup_fake_dispatcher() - self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.side_effect = Exception('boom') - mock_dispatcher.verify_and_record_events.side_effect = Exception( - 'boom') - - self.srv.start() - self.addCleanup(self.srv.stop) - endp = getattr(self.srv, listener).dispatcher.endpoints[0] - ret = endp.sample([{'ctxt': {}, 'publisher_id': 'pub_id', - 'event_type': 'event', 'payload': {}, - 'metadata': {}}]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, - ret) - - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_sample_requeue(self): - self._test_collector_requeue('sample_listener') - - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_event_requeue(self): - self.CONF.set_override('store_events', True, group='notification') - self._test_collector_requeue('event_listener') diff --git a/ceilometer/tests/functional/test_notification.py b/ceilometer/tests/functional/test_notification.py deleted file mode 100644 index 7e4ee6a4..00000000 --- a/ceilometer/tests/functional/test_notification.py +++ /dev/null @@ -1,614 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import shutil - -import mock -from oslo_config import fixture as fixture_config -import oslo_messaging -import oslo_service.service -from oslo_utils import fileutils -from oslo_utils import timeutils -import six -from stevedore import extension -import yaml - -from ceilometer.compute.notifications import instance -from ceilometer import messaging -from ceilometer import notification -from ceilometer.publisher import test as test_publisher -from ceilometer.tests import base as tests_base - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -class TestNotification(tests_base.BaseTestCase): - - def setUp(self): - super(TestNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", False, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - self.setup_messaging(self.CONF) - self.srv = notification.NotificationService() - - def fake_get_notifications_manager(self, pm): - self.plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('test', - None, - None, - self.plugin) - ] - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') - def _do_process_notification_manager_start(self, - fake_event_endpoint_class): - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = self.fake_get_notifications_manager - self.srv.start() - self.addCleanup(self.srv.stop) - self.fake_event_endpoint = fake_event_endpoint_class.return_value - - def test_start_multiple_listeners(self): - urls = ["fake://vhost1", "fake://vhost2"] - self.CONF.set_override("messaging_urls", urls, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners)) - - def test_process_notification(self): - self._do_process_notification_manager_start() - self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() - - self.plugin.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertTrue(self.srv.pipeline_manager.publisher.called) - - def test_process_notification_no_events(self): - self._do_process_notification_manager_start() - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertNotEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) - def test_process_notification_with_events(self): - self.CONF.set_override("store_events", True, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_unique_consumers(self, mock_listener): - - def fake_get_notifications_manager_dup_targets(pm): - plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [extension.Extension('test', None, None, plugin), - extension.Extension('test', None, None, plugin)]) - - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = fake_get_notifications_manager_dup_targets - self.srv.start() - self.addCleanup(self.srv.stop) - self.assertEqual(1, len(mock_listener.call_args_list)) - args, kwargs = mock_listener.call_args - self.assertEqual(1, len(args[1])) - self.assertIsInstance(args[1][0], oslo_messaging.Target) - - -class BaseRealNotification(tests_base.BaseTestCase): - def setup_pipeline(self, counter_names): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 5, - 'meters': counter_names, - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ['test://'] - }] - }) - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def setup_event_pipeline(self, event_names): - ev_pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_event', - 'events': event_names, - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'publishers': ['test://'] - }] - }) - if six.PY3: - ev_pipeline = ev_pipeline.encode('utf-8') - - ev_pipeline_cfg_file = fileutils.write_to_tempfile( - content=ev_pipeline, prefix="event_pipeline", suffix="yaml") - return ev_pipeline_cfg_file - - def setUp(self): - super(BaseRealNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - # Dummy config file to avoid looking for system config - self.CONF([], project='ceilometer', validate_default_values=True) - self.setup_messaging(self.CONF, 'nova') - - pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.expected_samples = 2 - - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", True, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - - ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.*']) - self.expected_events = 1 - - self.CONF.set_override("event_pipeline_cfg_file", - ev_pipeline_cfg_file) - self.CONF.set_override( - "definitions_cfg_file", - self.path_get('etc/ceilometer/event_definitions.yaml'), - group='event') - self.publisher = test_publisher.TestPublisher("") - - def _check_notification_service(self): - self.srv.start() - self.addCleanup(self.srv.stop) - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - notifier.info({}, 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if (len(self.publisher.samples) >= self.expected_samples and - len(self.publisher.events) >= self.expected_events): - break - - resources = list(set(s.resource_id for s in self.publisher.samples)) - self.assertEqual(self.expected_samples, len(self.publisher.samples)) - self.assertEqual(self.expected_events, len(self.publisher.events)) - self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) - - -class TestRealNotificationReloadablePipeline(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationReloadablePipeline, self).setUp() - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('refresh_event_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 1) - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_pipeline_poller(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.tg = mock.MagicMock() - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) - self.assertIn(pipeline_poller_call, - self.srv.tg.add_timer.call_args_list) - - def test_notification_reloaded_pipeline(self): - pipeline_cfg_file = self.setup_pipeline(['instance']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline = self.srv.pipe_manager - - # Modify the collection targets - updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', - 'disk.root.size']) - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - self.srv.refresh_pipeline() - - self.assertNotEqual(pipeline, self.srv.pipe_manager) - - def test_notification_reloaded_event_pipeline(self): - ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.create.start']) - self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) - - self.CONF.set_override("store_events", True, group="notification") - - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline = self.srv.event_pipe_manager - - # Modify the collection targets - updated_ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.*']) - - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file) - self.srv.refresh_pipeline() - - self.assertNotEqual(pipeline, self.srv.pipe_manager) - - -class TestRealNotification(BaseRealNotification): - - def setUp(self): - super(TestRealNotification, self).setUp() - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service_error_topic(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.start() - self.addCleanup(self.srv.stop) - notifier = messaging.get_notifier(self.transport, - 'compute.vagrant-precise') - notifier.error({}, 'compute.instance.error', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if len(self.publisher.events) >= self.expected_events: - break - self.assertEqual(self.expected_events, len(self.publisher.events)) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_disable_non_metrics(self, fake_publisher_cls): - self.CONF.set_override("disable_non_metric_meters", True, - group="notification") - # instance is a not a metric. we should only get back memory - self.expected_samples = 1 - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - self.assertEqual('memory', self.publisher.samples[0].name) - - @mock.patch.object(oslo_service.service.Service, 'stop') - def test_notification_service_start_abnormal(self, mocked): - try: - self.srv.stop() - except Exception: - pass - self.assertEqual(1, mocked.call_count) - - -class TestRealNotificationHA(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationHA, self).setUp() - self.CONF.set_override('workload_partitioning', True, - group='notification') - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_reset_listener_on_refresh(self, mock_listener): - mock_listener.side_effect = [ - mock.MagicMock(), # main listener - mock.MagicMock(), # pipeline listener - mock.MagicMock(), # refresh pipeline listener - ] - - self.srv.start() - self.addCleanup(self.srv.stop) - - def _check_listener_targets(): - args, kwargs = mock_listener.call_args - self.assertEqual(20, len(args[1])) - self.assertIsInstance(args[1][0], oslo_messaging.Target) - - _check_listener_targets() - - listener = self.srv.pipeline_listener - self.srv._configure_pipeline_listener() - self.assertIsNot(listener, self.srv.pipeline_listener) - - _check_listener_targets() - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_retain_common_targets_on_refresh(self, mock_listener): - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '.extract_my_subset', return_value=[1, 2]): - self.srv.start() - self.addCleanup(self.srv.stop) - listened_before = [target.topic for target in - mock_listener.call_args[0][1]] - self.assertEqual(4, len(listened_before)) - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '.extract_my_subset', return_value=[1, 3]): - self.srv._refresh_agent(None) - listened_after = [target.topic for target in - mock_listener.call_args[0][1]] - self.assertEqual(4, len(listened_after)) - common = set(listened_before) & set(listened_after) - for topic in common: - self.assertTrue(topic.endswith('1')) - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_notify_to_relevant_endpoint(self, mock_listener): - self.srv.start() - self.addCleanup(self.srv.stop) - - targets = mock_listener.call_args[0][1] - self.assertIsNotEmpty(targets) - - endpoints = {} - for endpoint in mock_listener.call_args[0][2]: - self.assertEqual(1, len(endpoint.publish_context.pipelines)) - pipe = list(endpoint.publish_context.pipelines)[0] - endpoints[pipe.name] = endpoint - - notifiers = [] - notifiers.extend(self.srv.pipe_manager.transporters[0][2]) - notifiers.extend(self.srv.event_pipe_manager.transporters[0][2]) - for notifier in notifiers: - filter_rule = endpoints[notifier.publisher_id].filter_rule - self.assertEqual(True, filter_rule.match(None, - notifier.publisher_id, - None, None, None)) - - @mock.patch('oslo_messaging.Notifier.sample') - def test_broadcast_to_relevant_pipes_only(self, mock_notifier): - self.srv.start() - self.addCleanup(self.srv.stop) - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, 'nonmatching.end', - None, None)): - continue - endpoint.info([{ - 'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'nonmatching.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - self.assertFalse(mock_notifier.called) - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, - 'compute.instance.create.end', - None, None)): - continue - endpoint.info([{ - 'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertTrue(mock_notifier.called) - self.assertEqual(3, mock_notifier.call_count) - self.assertEqual('pipeline.event', - mock_notifier.call_args_list[0][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[1][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[2][1]['event_type']) - - -class TestRealNotificationMultipleAgents(tests_base.BaseTestCase): - def setup_pipeline(self, transformers): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 5, - 'meters': ['instance', 'memory'], - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': transformers, - 'publishers': ['test://'] - }] - }) - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def setUp(self): - super(TestRealNotificationMultipleAgents, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([], project='ceilometer', validate_default_values=True) - self.setup_messaging(self.CONF, 'nova') - - pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", False, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - self.CONF.set_override('workload_partitioning', True, - group='notification') - self.CONF.set_override('pipeline_processing_queues', 2, - group='notification') - self.publisher = test_publisher.TestPublisher("") - self.publisher2 = test_publisher.TestPublisher("") - - def _check_notifications(self, fake_publisher_cls): - fake_publisher_cls.side_effect = [self.publisher, self.publisher2] - - self.srv = notification.NotificationService() - self.srv2 = notification.NotificationService() - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '._get_members', return_value=['harry', 'lloyd']): - with mock.patch('uuid.uuid4', return_value='harry'): - self.srv.start() - self.addCleanup(self.srv.stop) - with mock.patch('uuid.uuid4', return_value='lloyd'): - self.srv2.start() - self.addCleanup(self.srv2.stop) - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - payload1 = TEST_NOTICE_PAYLOAD.copy() - payload1['instance_id'] = '0' - notifier.info({}, 'compute.instance.create.end', payload1) - payload2 = TEST_NOTICE_PAYLOAD.copy() - payload2['instance_id'] = '1' - notifier.info({}, 'compute.instance.create.end', payload2) - self.expected_samples = 4 - start = timeutils.utcnow() - with mock.patch('six.moves.builtins.hash', lambda x: int(x)): - while timeutils.delta_seconds(start, timeutils.utcnow()) < 60: - if (len(self.publisher.samples + self.publisher2.samples) >= - self.expected_samples): - break - - self.assertEqual(2, len(self.publisher.samples)) - self.assertEqual(2, len(self.publisher2.samples)) - self.assertEqual(1, len(set( - s.resource_id for s in self.publisher.samples))) - self.assertEqual(1, len(set( - s.resource_id for s in self.publisher2.samples))) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_no_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline([]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline( - [{ - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_multiple_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline( - [{ - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }, { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) diff --git a/ceilometer/tests/integration/__init__.py b/ceilometer/tests/integration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/integration/gabbi/__init__.py b/ceilometer/tests/integration/gabbi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml b/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml deleted file mode 100644 index 437d35dd..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml +++ /dev/null @@ -1,175 +0,0 @@ -defaults: - request_headers: - x-auth-token: $ENVIRON['ADMIN_TOKEN'] - -tests: - - name: list alarms none - desc: Lists alarms, none yet exist - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - response_strings: - - "[]" - - - name: list servers none - desc: List servers, none yet exists - url: $ENVIRON['NOVA_SERVICE_URL']/servers - method: GET - response_strings: - - "[]" - - - name: create stack - desc: Create an autoscaling stack - url: $ENVIRON['HEAT_SERVICE_URL']/stacks - method: POST - request_headers: - content-type: application/json - data: <@create_stack.json - status: 201 - - - name: waiting for stack creation - desc: Wait for the second event on the stack resource, it can be a success or failure - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test - redirects: true - method: GET - status: 200 - poll: - count: 300 - delay: 1 - response_json_paths: - $.events[1].resource_name: integration_test - - - name: control stack status - desc: Checks the stack have been created successfully - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - status: 200 - poll: - count: 5 - delay: 1 - response_json_paths: - $.stack.stack_status: "CREATE_COMPLETE" - - - name: list servers - desc: Wait the autoscaling stack grow to two servers - url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail - method: GET - poll: - count: 600 - delay: 1 - response_json_paths: - $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[0].status: ACTIVE - $.servers[1].status: ACTIVE - $.servers.`len`: 2 - - - name: check gnocchi resources - desc: Check the gnocchi resources for this two servers exists - url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance - method: GET - poll: - count: 30 - delay: 1 - response_strings: - - '"id": "$RESPONSE["$.servers[0].id"]"' - - '"id": "$RESPONSE["$.servers[1].id"]"' - - - name: check alarm - desc: Check the aodh alarm and its state - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - poll: - count: 30 - delay: 1 - response_strings: - - "integration_test-cpu_alarm_high-" - response_json_paths: - $[0].state: alarm - - - name: get stack location for update - desc: Get the stack location - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - method: GET - status: 302 - - - name: update stack - desc: Update an autoscaling stack - url: $LOCATION - method: PUT - request_headers: - content-type: application/json - data: <@update_stack.json - status: 202 - - - name: waiting for stack update - desc: Wait for the third event on the stack resource, it can be a success or failure - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test - redirects: true - method: GET - status: 200 - poll: - count: 300 - delay: 1 - response_json_paths: - $.events[3].resource_name: integration_test - - - name: control stack status - desc: Checks the stack have been created successfully - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - status: 200 - poll: - count: 5 - delay: 1 - response_json_paths: - $.stack.stack_status: "UPDATE_COMPLETE" - - - name: list servers - desc: Wait the autoscaling stack shrink to one server - url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail - method: GET - poll: - count: 600 - delay: 1 - response_json_paths: - $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[0].status: ACTIVE - $.servers.`len`: 1 - - - name: get stack location - desc: Get the stack location - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - method: GET - status: 302 - - - name: delete stack - desc: Delete the stack - url: $LOCATION - method: DELETE - status: 204 - - - name: get deleted stack - desc: Check the stack have been deleted - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - poll: - count: 240 - delay: 1 - status: 404 - - - name: list alarms deleted - desc: List alarms, no more exist - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - response_strings: - - "[]" - - - name: list servers deleted - desc: List servers, no more exists - url: $ENVIRON['NOVA_SERVICE_URL']/servers - method: GET - response_strings: - - "[]" diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json b/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json deleted file mode 100644 index 7b3d3b4d..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "stack_name": "integration_test", - "template": { - "heat_template_version": "2013-05-23", - "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", - "resources": { - "asg": { - "type": "OS::Heat::AutoScalingGroup", - "properties": { - "min_size": 1, - "max_size": 2, - "resource": { - "type": "OS::Nova::Server", - "properties": { - "networks": [{ "network": "private" }], - "flavor": "m1.tiny", - "image": "$ENVIRON['GLANCE_IMAGE_NAME']", - "metadata": { - "metering.server_group": { "get_param": "OS::stack_id" } - }, - "user_data_format": "RAW", - "user_data": {"Fn::Join": ["", [ - "#!/bin/sh\n", - "echo 'Loading CPU'\n", - "set -v\n", - "cat /dev/urandom > /dev/null\n" - ]]} - } - } - } - }, - "web_server_scaleup_policy": { - "type": "OS::Heat::ScalingPolicy", - "properties": { - "adjustment_type": "change_in_capacity", - "auto_scaling_group_id": { "get_resource": "asg" }, - "cooldown": 2, - "scaling_adjustment": 1 - } - }, - "cpu_alarm_high": { - "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", - "properties": { - "description": "Scale-up if the mean CPU > 10% on 1 minute", - "metric": "cpu_util", - "aggregation_method": "mean", - "granularity": 60, - "evaluation_periods": 1, - "threshold": 10, - "comparison_operator": "gt", - "alarm_actions": [ - { "get_attr": [ "web_server_scaleup_policy", "alarm_url" ] } - ], - "resource_type": "instance", - "query": { - "str_replace": { - "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", - "params": { - "stack_id": { "get_param": "OS::stack_id" } - } - } - } - } - } - } - } -} diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json b/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json deleted file mode 100644 index 8897d399..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "template": { - "heat_template_version": "2013-05-23", - "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", - "resources": { - "asg": { - "type": "OS::Heat::AutoScalingGroup", - "properties": { - "min_size": 1, - "max_size": 2, - "resource": { - "type": "OS::Nova::Server", - "properties": { - "networks": [{ "network": "private" }], - "flavor": "m1.tiny", - "image": "$ENVIRON['GLANCE_IMAGE_NAME']", - "metadata": { - "metering.server_group": { "get_param": "OS::stack_id" } - }, - "user_data_format": "RAW", - "user_data": {"Fn::Join": ["", [ - "#!/bin/sh\n", - "echo 'Loading CPU'\n", - "set -v\n", - "cat /dev/urandom > /dev/null\n" - ]]} - } - } - } - }, - "web_server_scaledown_policy": { - "type": "OS::Heat::ScalingPolicy", - "properties": { - "adjustment_type": "change_in_capacity", - "auto_scaling_group_id": { "get_resource": "asg" }, - "cooldown": 2, - "scaling_adjustment": -1 - } - }, - "cpu_alarm_high": { - "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", - "properties": { - "description": "Scale-down if the mean CPU > 10% on 1 minute", - "metric": "cpu_util", - "aggregation_method": "mean", - "granularity": 60, - "evaluation_periods": 1, - "threshold": 10, - "comparison_operator": "gt", - "alarm_actions": [ - { "get_attr": [ "web_server_scaledown_policy", "alarm_url" ] } - ], - "resource_type": "instance", - "query": { - "str_replace": { - "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", - "params": { - "stack_id": { "get_param": "OS::stack_id" } - } - } - } - } - } - } - } -} diff --git a/ceilometer/tests/integration/gabbi/test_gabbi_live.py b/ceilometer/tests/integration/gabbi/test_gabbi_live.py deleted file mode 100644 index b347b556..00000000 --- a/ceilometer/tests/integration/gabbi/test_gabbi_live.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver - - -TESTS_DIR = 'gabbits-live' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL", - "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", - "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"] - - for env_variable in NEEDED_ENV: - if not os.getenv(env_variable): - if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): - raise RuntimeError('%s is not set' % env_variable) - else: - return - - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host="localhost", port=8041) diff --git a/ceilometer/tests/integration/hooks/post_test_hook.sh b/ceilometer/tests/integration/hooks/post_test_hook.sh deleted file mode 100755 index fbc69a7e..00000000 --- a/ceilometer/tests/integration/hooks/post_test_hook.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -xe - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -function generate_testr_results { - if [ -f .testrepository/0 ]; then - sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -# If we're running in the gate find our keystone endpoint to give to -# gabbi tests and do a chown. Otherwise the existing environment -# should provide URL and TOKEN. -if [ -d $BASE/new/devstack ]; then - export CEILOMETER_DIR="$BASE/new/ceilometer" - STACK_USER=stack - sudo chown -R $STACK_USER:stack $CEILOMETER_DIR - source $BASE/new/devstack/openrc admin admin - # Go to the ceilometer dir - cd $CEILOMETER_DIR -fi - -openstack catalog list -export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/public/{print $2}') -export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') -export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/public/{print $2}') -export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/public/{print $2}') -export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.*uec /{print $4}') -export ADMIN_TOKEN=$(openstack token issue -c id -f value) - -# Run tests -echo "Running telemetry integration test suite" -set +e - -sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration -EXIT_CODE=$? - -echo "* Message queue status:" -sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info - -if [ $EXIT_CODE -ne 0 ] ; then - set +x - echo "* Heat stack:" - openstack stack show integration_test - echo "* Alarm list:" - ceilometer alarm-list - echo "* Nova instance list:" - openstack server list - - echo "* Gnocchi instance list:" - gnocchi resource list -t instance - for instance_id in $(openstack server list -f value -c ID); do - echo "* Nova instance detail:" - openstack server show $instance_id - echo "* Gnocchi instance detail:" - gnocchi resource show -t instance $instance_id - echo "* Gnocchi measures for instance ${instance_id}:" - gnocchi measures show -r $instance_id cpu_util - done - - gnocchi status - - # Be sure to source Gnocchi settings before - source $BASE/new/gnocchi/devstack/settings - echo "* Unprocessed measures:" - sudo find $GNOCCHI_DATA_DIR/measure - - set -x -fi - -set -e - -# Collect and parse result -if [ -n "$CEILOMETER_DIR" ]; then - generate_testr_results -fi -exit $EXIT_CODE diff --git a/ceilometer/tests/pipeline_base.py b/ceilometer/tests/pipeline_base.py deleted file mode 100644 index 6731cb7b..00000000 --- a/ceilometer/tests/pipeline_base.py +++ /dev/null @@ -1,2157 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2013 Intel Corp. -# -# Authors: Yunhong Jiang -# Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import datetime -import traceback - -import mock -from oslo_utils import timeutils -from oslotest import base -from oslotest import mockpatch -import six -from stevedore import extension - -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer import sample -from ceilometer import transformer -from ceilometer.transformer import accumulator -from ceilometer.transformer import arithmetic -from ceilometer.transformer import conversions - - -@six.add_metaclass(abc.ABCMeta) -class BasePipelineTestCase(base.BaseTestCase): - @staticmethod - def fake_tem_init(): - """Fake a transformerManager for pipeline. - - The faked entry point setting is below: - update: TransformerClass - except: TransformerClassException - drop: TransformerClassDrop - """ - pass - - def fake_tem_get_ext(self, name): - class_name_ext = { - 'update': self.TransformerClass, - 'except': self.TransformerClassException, - 'drop': self.TransformerClassDrop, - 'cache': accumulator.TransformerAccumulator, - 'aggregator': conversions.AggregatorTransformer, - 'unit_conversion': conversions.ScalingTransformer, - 'rate_of_change': conversions.RateOfChangeTransformer, - 'arithmetic': arithmetic.ArithmeticTransformer, - 'delta': conversions.DeltaTransformer, - } - - if name in class_name_ext: - return extension.Extension(name, None, - class_name_ext[name], - None, - ) - - raise KeyError(name) - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'except://': self.PublisherClassException} - return fake_drivers[url](url) - - class PublisherClassException(publisher.PublisherBase): - def publish_samples(self, samples): - raise Exception() - - def publish_events(self, events): - raise Exception() - - class TransformerClass(transformer.TransformerBase): - samples = [] - grouping_keys = ['counter_name'] - - def __init__(self, append_name='_update'): - self.__class__.samples = [] - self.append_name = append_name - - @staticmethod - def flush(): - return [] - - def handle_sample(self, counter): - self.__class__.samples.append(counter) - newname = getattr(counter, 'name') + self.append_name - return sample.Sample( - name=newname, - type=counter.type, - volume=counter.volume, - unit=counter.unit, - user_id=counter.user_id, - project_id=counter.project_id, - resource_id=counter.resource_id, - timestamp=counter.timestamp, - resource_metadata=counter.resource_metadata, - ) - - class TransformerClassDrop(transformer.TransformerBase): - samples = [] - grouping_keys = ['resource_id'] - - def __init__(self): - self.__class__.samples = [] - - def handle_sample(self, counter): - self.__class__.samples.append(counter) - - class TransformerClassException(object): - grouping_keys = ['resource_id'] - - @staticmethod - def handle_sample(counter): - raise Exception() - - def setUp(self): - super(BasePipelineTestCase, self).setUp() - - self.test_counter = sample.Sample( - name='a', - type=sample.TYPE_GAUGE, - volume=1, - unit='B', - user_id="test_user", - project_id="test_proj", - resource_id="test_resource", - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ) - - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - self.transformer_manager = mock.MagicMock() - self.transformer_manager.__getitem__.side_effect = \ - self.fake_tem_get_ext - - self._setup_pipeline_cfg() - - self._reraise_exception = True - self.useFixture(mockpatch.Patch( - 'ceilometer.pipeline.LOG.exception', - side_effect=self._handle_reraise_exception)) - - def _handle_reraise_exception(self, msg): - if self._reraise_exception: - raise Exception(traceback.format_exc()) - - @abc.abstractmethod - def _setup_pipeline_cfg(self): - """Setup the appropriate form of pipeline config.""" - - @abc.abstractmethod - def _augment_pipeline_cfg(self): - """Augment the pipeline config with an additional element.""" - - @abc.abstractmethod - def _break_pipeline_cfg(self): - """Break the pipeline config with a malformed element.""" - - @abc.abstractmethod - def _dup_pipeline_name_cfg(self): - """Break the pipeline config with duplicate pipeline name.""" - - @abc.abstractmethod - def _set_pipeline_cfg(self, field, value): - """Set a field to a value in the pipeline config.""" - - @abc.abstractmethod - def _extend_pipeline_cfg(self, field, value): - """Extend an existing field in the pipeline config with a value.""" - - @abc.abstractmethod - def _unset_pipeline_cfg(self, field): - """Clear an existing field in the pipeline config.""" - - def _exception_create_pipelinemanager(self): - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) - - def test_no_counters(self): - self._unset_pipeline_cfg('counters') - self._exception_create_pipelinemanager() - - def test_no_transformers(self): - self._unset_pipeline_cfg('transformers') - pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) - - def test_no_name(self): - self._unset_pipeline_cfg('name') - self._exception_create_pipelinemanager() - - def test_no_interval(self): - self._unset_pipeline_cfg('interval') - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - pipe = pipeline_manager.pipelines[0] - self.assertEqual(600, pipe.get_interval()) - - def test_no_publishers(self): - self._unset_pipeline_cfg('publishers') - self._exception_create_pipelinemanager() - - def test_invalid_resources(self): - invalid_resource = {'invalid': 1} - self._set_pipeline_cfg('resources', invalid_resource) - self._exception_create_pipelinemanager() - - def test_check_counters_include_exclude_same(self): - counter_cfg = ['a', '!a'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_counters_include_exclude(self): - counter_cfg = ['a', '!b'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_counters_wildcard_included(self): - counter_cfg = ['a', '*'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_publishers_invalid_publisher(self): - publisher_cfg = ['test_invalid'] - self._set_pipeline_cfg('publishers', publisher_cfg) - - def test_invalid_string_interval(self): - self._set_pipeline_cfg('interval', 'string') - self._exception_create_pipelinemanager() - - def test_check_transformer_invalid_transformer(self): - transformer_cfg = [ - {'name': "test_invalid", - 'parameters': {}} - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._exception_create_pipelinemanager() - - def test_get_interval(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - pipe = pipeline_manager.pipelines[0] - self.assertEqual(5, pipe.get_interval()) - - def test_publisher_transformer_invoked(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - - def test_multiple_included_counters(self): - counter_cfg = ['a', 'b'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('b_update', getattr(publisher.samples[1], "name")) - - @mock.patch('ceilometer.pipeline.LOG') - def test_none_volume_counter(self, LOG): - self._set_pipeline_cfg('counters', ['empty_volume']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - publisher = pipeline_manager.pipelines[0].publishers[0] - - test_s = sample.Sample( - name='empty_volume', - type=self.test_counter.type, - volume=None, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([test_s]) - - LOG.warning.assert_called_once_with( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' - 'sample will be dropped' - % {'counter_name': test_s.name, - 'resource_id': test_s.resource_id, - 'timestamp': test_s.timestamp, - 'counter_volume': test_s.volume}) - - self.assertEqual(0, len(publisher.samples)) - - @mock.patch('ceilometer.pipeline.LOG') - def test_fake_volume_counter(self, LOG): - self._set_pipeline_cfg('counters', ['fake_volume']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - publisher = pipeline_manager.pipelines[0].publishers[0] - - test_s = sample.Sample( - name='fake_volume', - type=self.test_counter.type, - volume='fake_value', - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([test_s]) - - LOG.warning.assert_called_once_with( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has volume which is not a number ' - '(volume: %(counter_volume)s), the sample will be dropped' - % {'counter_name': test_s.name, - 'resource_id': test_s.resource_id, - 'timestamp': test_s.timestamp, - 'counter_volume': test_s.volume}) - - self.assertEqual(0, len(publisher.samples)) - - def test_counter_dont_match(self): - counter_cfg = ['nomatch'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - self.assertEqual(0, publisher.calls) - - def test_wildcard_counter(self): - counter_cfg = ['*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - - def test_wildcard_excluded_counters(self): - counter_cfg = ['*', '!a'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) - - def test_wildcard_excluded_counters_not_excluded(self): - counter_cfg = ['*', '!b'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - - def test_all_excluded_counters_not_excluded(self): - counter_cfg = ['!b', '!c'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - - def test_all_excluded_counters_is_excluded(self): - counter_cfg = ['!a', '!c'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('b')) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('c')) - - def test_wildcard_and_excluded_wildcard_counters(self): - counter_cfg = ['*', '!disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) - - def test_included_counter_and_wildcard_counters(self): - counter_cfg = ['cpu', 'disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertTrue(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('instance')) - - def test_excluded_counter_and_excluded_wildcard_counters(self): - counter_cfg = ['!cpu', '!disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_meter('instance')) - - def test_multiple_pipeline(self): - self._augment_pipeline_cfg() - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - new_publisher = pipeline_manager.pipelines[1].publishers[0] - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual(1, new_publisher.calls) - self.assertEqual('b_new', getattr(new_publisher.samples[0], "name")) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - self.assertEqual('b', - getattr(self.TransformerClass.samples[1], "name")) - - def test_multiple_pipeline_exception(self): - self._reraise_exception = False - self._break_pipeline_cfg() - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, publisher.calls) - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - self.assertEqual('b', - getattr(self.TransformerClass.samples[1], "name")) - - def test_none_transformer_pipeline(self): - self._set_pipeline_cfg('transformers', None) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.samples[0], 'name')) - - def test_empty_transformer_pipeline(self): - self._set_pipeline_cfg('transformers', []) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.samples[0], 'name')) - - def test_multiple_transformer_same_class(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, publisher.calls) - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update_update', - getattr(publisher.samples[0], 'name')) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual('a_update', - getattr(self.TransformerClass.samples[1], 'name')) - - def test_multiple_transformer_same_class_different_parameter(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': - { - "append_name": "_update", - } - }, - { - 'name': 'update', - 'parameters': - { - "append_name": "_new", - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual('a_update', - getattr(self.TransformerClass.samples[1], 'name')) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, - len(publisher.samples)) - self.assertEqual('a_update_new', - getattr(publisher.samples[0], 'name')) - - def test_multiple_transformer_drop_transformer(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': - { - "append_name": "_update", - } - }, - { - 'name': 'drop', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': - { - "append_name": "_new", - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual(1, - len(self.TransformerClassDrop.samples)) - self.assertEqual('a_update', - getattr(self.TransformerClassDrop.samples[0], 'name')) - - def test_multiple_publisher(self): - self._set_pipeline_cfg('publishers', ['test://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual('a_update', - getattr(new_publisher.samples[0], 'name')) - self.assertEqual('a_update', - getattr(publisher.samples[0], 'name')) - - def test_multiple_publisher_isolation(self): - self._reraise_exception = False - self._set_pipeline_cfg('publishers', ['except://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual('a_update', - getattr(new_publisher.samples[0], 'name')) - - def test_multiple_counter_pipeline(self): - self._set_pipeline_cfg('counters', ['a', 'b']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter, - sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - )]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) - self.assertEqual('b_update', getattr(publisher.samples[1], 'name')) - - def test_flush_pipeline_cache(self): - CACHE_SIZE = 10 - extra_transformer_cfg = [ - { - 'name': 'cache', - 'parameters': { - 'size': CACHE_SIZE, - } - }, - { - 'name': 'update', - 'parameters': - { - 'append_name': '_new' - } - }, - ] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(self.test_counter) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - pipe.publish_data(self.test_counter) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - for i in range(CACHE_SIZE - 2): - pipe.publish_data(self.test_counter) - pipe.flush() - self.assertEqual(CACHE_SIZE, len(publisher.samples)) - self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) - - def test_flush_pipeline_cache_multiple_counter(self): - CACHE_SIZE = 3 - extra_transformer_cfg = [ - { - 'name': 'cache', - 'parameters': { - 'size': CACHE_SIZE - } - }, - { - 'name': 'update', - 'parameters': - { - 'append_name': '_new' - } - }, - ] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - self._set_pipeline_cfg('counters', ['a', 'b']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter, - sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - )]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(CACHE_SIZE, len(publisher.samples)) - self.assertEqual('a_update_new', - getattr(publisher.samples[0], 'name')) - self.assertEqual('b_update_new', - getattr(publisher.samples[1], 'name')) - - def test_flush_pipeline_cache_before_publisher(self): - extra_transformer_cfg = [{ - 'name': 'cache', - 'parameters': {} - }] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - publisher = pipe.publishers[0] - pipe.publish_data(self.test_counter) - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update', - getattr(publisher.samples[0], 'name')) - - def test_global_unit_conversion(self): - scale = 'volume / ((10**6) * 60)' - transformer_cfg = [ - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': scale}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=1200000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - cpu_mins = publisher.samples[-1] - self.assertEqual('cpu_mins', getattr(cpu_mins, 'name')) - self.assertEqual('min', getattr(cpu_mins, 'unit')) - self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type')) - self.assertEqual(20, getattr(cpu_mins, 'volume')) - - def test_unit_identified_source_unit_conversion(self): - transformer_cfg = [ - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {'unit': '°C'}, - 'target': {'unit': '°F', - 'scale': '(volume * 1.8) + 32'}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['core_temperature', - 'ambient_temperature']) - counters = [ - sample.Sample( - name='core_temperature', - type=sample.TYPE_GAUGE, - volume=36.0, - unit='°C', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - sample.Sample( - name='ambient_temperature', - type=sample.TYPE_GAUGE, - volume=88.8, - unit='°F', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - core_temp = publisher.samples[0] - self.assertEqual('core_temperature', getattr(core_temp, 'name')) - self.assertEqual('°F', getattr(core_temp, 'unit')) - self.assertEqual(96.8, getattr(core_temp, 'volume')) - amb_temp = publisher.samples[1] - self.assertEqual('ambient_temperature', getattr(amb_temp, 'name')) - self.assertEqual('°F', getattr(amb_temp, 'unit')) - self.assertEqual(88.8, getattr(amb_temp, 'volume')) - self.assertEqual(96.8, getattr(core_temp, 'volume')) - - def _do_test_rate_of_change_conversion(self, prev, curr, type, expected, - offset=1, weight=None): - s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)" - "* (resource_metadata.non.existent or 1.0)" - "* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))") - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - now = timeutils.utcnow() - later = now + datetime.timedelta(minutes=offset) - um = {'autoscaling_weight': weight} if weight else {} - counters = [ - sample.Sample( - name='cpu', - type=type, - volume=prev, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=prev, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource2', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 2, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=curr, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 4, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=curr, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource2', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 2, - 'user_metadata': um}, - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - cpu_util = publisher.samples[0] - self.assertEqual('cpu_util', getattr(cpu_util, 'name')) - self.assertEqual('test_resource', getattr(cpu_util, 'resource_id')) - self.assertEqual('%', getattr(cpu_util, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) - self.assertEqual(expected, getattr(cpu_util, 'volume')) - cpu_util = publisher.samples[1] - self.assertEqual('cpu_util', getattr(cpu_util, 'name')) - self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id')) - self.assertEqual('%', getattr(cpu_util, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) - self.assertEqual(expected * 2, getattr(cpu_util, 'volume')) - - def test_rate_of_change_conversion(self): - self._do_test_rate_of_change_conversion(120000000000, - 180000000000, - sample.TYPE_CUMULATIVE, - 25.0) - - def test_rate_of_change_conversion_weight(self): - self._do_test_rate_of_change_conversion(120000000000, - 180000000000, - sample.TYPE_CUMULATIVE, - 27.5, - weight=1.1) - - def test_rate_of_change_conversion_negative_cumulative_delta(self): - self._do_test_rate_of_change_conversion(180000000000, - 120000000000, - sample.TYPE_CUMULATIVE, - 50.0) - - def test_rate_of_change_conversion_negative_gauge_delta(self): - self._do_test_rate_of_change_conversion(180000000000, - 120000000000, - sample.TYPE_GAUGE, - -25.0) - - def test_rate_of_change_conversion_zero_delay(self): - self._do_test_rate_of_change_conversion(120000000000, - 120000000000, - sample.TYPE_CUMULATIVE, - 0.0, - offset=0) - - def test_rate_of_change_no_predecessor(self): - s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - now = timeutils.utcnow() - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=120000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - - @mock.patch('ceilometer.transformer.conversions.LOG') - def test_rate_of_change_out_of_order(self, the_log): - s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - now = timeutils.utcnow() - earlier = now - datetime.timedelta(seconds=10) - later = now + datetime.timedelta(seconds=10) - - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=125000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=120000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=earlier.isoformat(), - resource_metadata={'cpu_number': 4} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=130000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 4} - ), - ] - - pipe.publish_data(counters) - publisher = pipe.publishers[0] - self.assertEqual(1, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - - cpu_util_sample = publisher.samples[0] - self.assertEqual(12.5, cpu_util_sample.volume) - the_log.warning.assert_called_with( - 'dropping out of time order sample: %s', - (counters[1],) - ) - - def test_resources(self): - resources = ['test1://', 'test2://'] - self._set_pipeline_cfg('resources', resources) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(resources, - pipeline_manager.pipelines[0].resources) - - def test_no_resources(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(0, len(pipeline_manager.pipelines[0].resources)) - - def _do_test_rate_of_change_mapping(self, pipe, meters, units): - now = timeutils.utcnow() - base = 1000 - offset = 7 - rate = 42 - later = now + datetime.timedelta(minutes=offset) - counters = [] - for v, ts in [(base, now.isoformat()), - (base + (offset * 60 * rate), later.isoformat())]: - for n, u, r in [(meters[0], units[0], 'resource1'), - (meters[1], units[1], 'resource2')]: - s = sample.Sample( - name=n, - type=sample.TYPE_CUMULATIVE, - volume=v, - unit=u, - user_id='test_user', - project_id='test_proj', - resource_id=r, - timestamp=ts, - resource_metadata={}, - ) - counters.append(s) - - pipe.publish_data(counters) - publisher = pipe.publishers[0] - self.assertEqual(2, len(publisher.samples)) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - bps = publisher.samples[0] - self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name')) - self.assertEqual('resource1', getattr(bps, 'resource_id')) - self.assertEqual('%s/s' % units[0], getattr(bps, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type')) - self.assertEqual(rate, getattr(bps, 'volume')) - rps = publisher.samples[1] - self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name')) - self.assertEqual('resource2', getattr(rps, 'resource_id')) - self.assertEqual('%s/s' % units[1], getattr(rps, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type')) - self.assertEqual(rate, getattr(rps, 'volume')) - - def test_rate_of_change_mapping(self): - map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)', - 'unit': '(B|request)'} - map_to = {'name': 'disk.\\1.\\2.rate', - 'unit': '\\1/s'} - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': { - 'map_from': map_from - }, - 'target': { - 'map_to': map_to, - 'type': sample.TYPE_GAUGE - }, - }, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['disk.read.bytes', - 'disk.write.requests']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - meters = ('disk.read.bytes', 'disk.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_mapping(pipe, meters, units) - - def _do_test_aggregator(self, parameters, expected_length): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': parameters, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=53, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=42, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=15, - unit='B', - user_id='test_user', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=2, - unit='B', - user_id='test_user_bis', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '3.0'} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(expected_length, len(publisher.samples)) - return sorted(publisher.samples, key=lambda s: s.volume) - - def test_aggregator_meter_type(self): - volumes = [1.0, 2.0, 3.0] - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': len(volumes) * len(sample.TYPES)} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', - ['testgauge', 'testcumulative', 'testdelta']) - counters = [] - for sample_type in sample.TYPES: - for volume in volumes: - counters.append(sample.Sample( - name='test' + sample_type, - type=sample_type, - volume=volume, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - )) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - actual = sorted(s.volume for s in publisher.samples) - self.assertEqual([2.0, 3.0, 6.0], actual) - - def test_aggregator_metadata(self): - for conf, expected_version in [('last', '2.0'), ('first', '1.0')]: - samples = self._do_test_aggregator({ - 'resource_metadata': conf, - 'target': {'name': 'aggregated-bytes'} - }, expected_length=4) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(2, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(15, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - s = samples[2] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(42, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': expected_version}, - s.resource_metadata) - s = samples[3] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(95, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': expected_version}, - s.resource_metadata) - - def test_aggregator_user_last_and_metadata_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=2) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(44, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(110, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - - def test_aggregator_user_first_and_metadata_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'first', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=2) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(44, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(110, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - - def test_aggregator_all_first(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'first', - 'user_id': 'first', - 'project_id': 'first', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '1.0'}, - s.resource_metadata) - - def test_aggregator_all_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - def test_aggregator_all_mixed(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'drop', - 'user_id': 'first', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({}, s.resource_metadata) - - def test_aggregator_metadata_default(self): - samples = self._do_test_aggregator({ - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - @mock.patch('ceilometer.transformer.conversions.LOG') - def test_aggregator_metadata_invalid(self, mylog): - samples = self._do_test_aggregator({ - 'resource_metadata': 'invalid', - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertTrue(mylog.warning.called) - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - def test_aggregator_sized_flush(self): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 2}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ) - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counters[0]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(0, len(publisher.samples)) - - pipe.publish_data([counters[1]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(2, len(publisher.samples)) - - def test_aggregator_timed_flush(self): - timeutils.set_time_override() - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 900, 'retention_time': 60}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - timeutils.advance_time_seconds(120) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - - def test_aggregator_without_authentication(self): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 2}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.outgoing.bytes']) - counters = [ - sample.Sample( - name='storage.objects.outgoing.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id=None, - project_id=None, - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.outgoing.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id=None, - project_id=None, - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ) - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counters[0]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(0, len(publisher.samples)) - - pipe.publish_data([counters[1]]) - pipe.flush() - publisher = pipe.publishers[0] - - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(42, getattr(publisher.samples[0], 'volume')) - self.assertEqual("test_resource", getattr(publisher.samples[0], - 'resource_id')) - - def test_aggregator_to_rate_of_change_transformer_two_resources(self): - resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4', - '5dd418a6-c6a9-49c9-9cef-b357d72c71dd'] - - aggregator = conversions.AggregatorTransformer(size="2", - timestamp="last") - - rate_of_change_transformer = conversions.RateOfChangeTransformer() - - counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00') - - for offset in range(2): - counter = copy.copy(self.test_counter) - counter.timestamp = timeutils.isotime(counter_time) - counter.resource_id = resource_id[0] - counter.volume = offset - counter.type = sample.TYPE_CUMULATIVE - counter.unit = 'ns' - aggregator.handle_sample(counter) - - if offset == 1: - test_time = counter_time - - counter_time = counter_time + datetime.timedelta(0, 1) - - aggregated_counters = aggregator.flush() - self.assertEqual(len(aggregated_counters), 1) - self.assertEqual(aggregated_counters[0].timestamp, - timeutils.isotime(test_time)) - - rate_of_change_transformer.handle_sample(aggregated_counters[0]) - - for offset in range(2): - counter = copy.copy(self.test_counter) - counter.timestamp = timeutils.isotime(counter_time) - counter.resource_id = resource_id[offset] - counter.volume = 2 - counter.type = sample.TYPE_CUMULATIVE - counter.unit = 'ns' - aggregator.handle_sample(counter) - - if offset == 0: - test_time = counter_time - - counter_time = counter_time + datetime.timedelta(0, 1) - - aggregated_counters = aggregator.flush() - self.assertEqual(len(aggregated_counters), 2) - - for counter in aggregated_counters: - if counter.resource_id == resource_id[0]: - rateOfChange = rate_of_change_transformer.handle_sample( - counter) - self.assertEqual(counter.timestamp, - timeutils.isotime(test_time)) - - self.assertEqual(rateOfChange.volume, 1) - - def _do_test_arithmetic_expr_parse(self, expr, expected): - actual = arithmetic.ArithmeticTransformer.parse_expr(expr) - self.assertEqual(expected, actual) - - def test_arithmetic_expr_parse(self): - expr = '$(cpu) + $(cpu.util)' - expected = ('cpu.volume + _cpu_util_ESC.volume', - { - 'cpu': 'cpu', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_parameter(self): - expr = '$(cpu) + $(cpu.util).resource_metadata' - expected = ('cpu.volume + _cpu_util_ESC.resource_metadata', - { - 'cpu': 'cpu', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_reserved_keyword(self): - expr = '$(class) + $(cpu.util)' - expected = ('_class_ESC.volume + _cpu_util_ESC.volume', - { - 'class': '_class_ESC', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_already_escaped(self): - expr = '$(class) + $(_class_ESC)' - expected = ('_class_ESC.volume + __class_ESC_ESC.volume', - { - 'class': '_class_ESC', - '_class_ESC': '__class_ESC_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def _do_test_arithmetic(self, expression, scenario, expected): - transformer_cfg = [ - { - 'name': 'arithmetic', - 'parameters': { - 'target': {'name': 'new_meter', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'expr': expression}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', - list(set(s['name'] for s in scenario))) - counters = [] - test_resources = ['test_resource1', 'test_resource2'] - for resource_id in test_resources: - for s in scenario: - counters.append(sample.Sample( - name=s['name'], - type=sample.TYPE_CUMULATIVE, - volume=s['volume'], - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id=resource_id, - timestamp=timeutils.utcnow().isoformat(), - resource_metadata=s.get('metadata') - )) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - for s in counters: - pipe.publish_data(s) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - expected_len = len(test_resources) * len(expected) - self.assertEqual(expected_len, len(publisher.samples)) - - # bucket samples by resource first - samples_by_resource = dict((r, []) for r in test_resources) - for s in publisher.samples: - samples_by_resource[s.resource_id].append(s) - - for resource_id in samples_by_resource: - self.assertEqual(len(expected), - len(samples_by_resource[resource_id])) - for i, s in enumerate(samples_by_resource[resource_id]): - self.assertEqual('new_meter', getattr(s, 'name')) - self.assertEqual(resource_id, getattr(s, 'resource_id')) - self.assertEqual('%', getattr(s, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type')) - self.assertEqual(expected[i], getattr(s, 'volume')) - - def test_arithmetic_transformer(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [50.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_expr_empty(self): - expression = '' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_expr_misconfigured(self): - expression = '512.0 * 3' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_nan(self): - expression = 'float(\'nan\') * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_exception(self): - expression = '$(memory) / 0' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_multiple_samples(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=2048.0), - dict(name='memory.usage', volume=512.0), - dict(name='memory', volume=1024.0), - ] - expected = [25.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_missing(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [dict(name='memory.usage', volume=512.0)] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_more_than_needed(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - dict(name='cpu_util', volume=90.0), - ] - expected = [50.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_cache_cleared(self): - transformer_cfg = [ - { - 'name': 'arithmetic', - 'parameters': { - 'target': {'name': 'new_meter', - 'expr': '$(memory.usage) + 2'} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['memory.usage']) - counter = sample.Sample( - name='memory.usage', - type=sample.TYPE_GAUGE, - volume=1024.0, - unit='MB', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata=None - ) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1026.0, publisher.samples[0].volume) - - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - - counter.volume = 2048.0 - pipe.publish_data([counter]) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2050.0, publisher.samples[1].volume) - - def test_aggregator_timed_flush_no_matching_samples(self): - timeutils.set_time_override() - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 900, 'retention_time': 60}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['unrelated-sample']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - timeutils.advance_time_seconds(200) - pipe = pipeline_manager.pipelines[0] - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - def _do_test_delta(self, data, expected, growth_only=False): - transformer_cfg = [ - { - 'name': 'delta', - 'parameters': { - 'target': {'name': 'new_meter'}, - 'growth_only': growth_only, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(data) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(expected, len(publisher.samples)) - return publisher.samples - - def test_delta_transformer(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 2) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata) - self.assertEqual(-10, deltas[0].volume) - self.assertEqual('new_meter', deltas[1].name) - self.assertEqual('delta', deltas[1].type) - self.assertEqual('ns', deltas[1].unit) - self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata) - self.assertEqual(37, deltas[1].volume) - - def test_delta_transformer_out_of_order(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5)) - .isoformat()), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 1) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) - self.assertEqual(27, deltas[0].volume) - - def test_delta_transformer_growth_only(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 1, True) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) - self.assertEqual(37, deltas[0].volume) - - def test_unique_pipeline_names(self): - self._dup_pipeline_name_cfg() - self._exception_create_pipelinemanager() - - def test_get_pipeline_grouping_key(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(set(['resource_id', 'counter_name']), - set(pipeline.get_pipeline_grouping_key( - pipeline_manager.pipelines[0]))) - - def test_get_pipeline_duplicate_grouping_key(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(['counter_name'], - pipeline.get_pipeline_grouping_key( - pipeline_manager.pipelines[0])) diff --git a/ceilometer/tests/tempest/__init__.py b/ceilometer/tests/tempest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/api/__init__.py b/ceilometer/tests/tempest/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/api/base.py b/ceilometer/tests/tempest/api/base.py deleted file mode 100644 index 81c53d64..00000000 --- a/ceilometer/tests/tempest/api/base.py +++ /dev/null @@ -1,162 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_utils import timeutils -from tempest.common import compute -from tempest.common.utils import data_utils -from tempest import config -from tempest import exceptions -from tempest.lib import exceptions as lib_exc -import tempest.test - -from ceilometer.tests.tempest.service import client - - -CONF = config.CONF - - -class ClientManager(client.Manager): - - load_clients = [ - 'servers_client', - 'compute_networks_client', - 'compute_floating_ips_client', - 'flavors_client', - 'image_client', - 'image_client_v2', - 'telemetry_client', - ] - - -class BaseTelemetryTest(tempest.test.BaseTestCase): - - """Base test case class for all Telemetry API tests.""" - - credentials = ['primary'] - client_manager = ClientManager - - @classmethod - def skip_checks(cls): - super(BaseTelemetryTest, cls).skip_checks() - if not CONF.service_available.ceilometer: - raise cls.skipException("Ceilometer support is required") - - @classmethod - def setup_credentials(cls): - cls.set_network_resources() - super(BaseTelemetryTest, cls).setup_credentials() - - @classmethod - def setup_clients(cls): - super(BaseTelemetryTest, cls).setup_clients() - cls.telemetry_client = cls.os_primary.telemetry_client - cls.servers_client = cls.os_primary.servers_client - cls.flavors_client = cls.os_primary.flavors_client - cls.image_client = cls.os_primary.image_client - cls.image_client_v2 = cls.os_primary.image_client_v2 - - @classmethod - def resource_setup(cls): - super(BaseTelemetryTest, cls).resource_setup() - cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size', - 'disk.ephemeral.size'] - - cls.glance_notifications = ['image.size'] - - cls.glance_v2_notifications = ['image.download', 'image.serve'] - - cls.server_ids = [] - cls.image_ids = [] - - @classmethod - def create_server(cls): - tenant_network = cls.get_tenant_network() - body, server = compute.create_test_server( - cls.os_primary, - tenant_network=tenant_network, - name=data_utils.rand_name('ceilometer-instance'), - wait_until='ACTIVE') - cls.server_ids.append(body['id']) - return body - - @classmethod - def create_image(cls, client, **kwargs): - body = client.create_image(name=data_utils.rand_name('image'), - container_format='bare', - disk_format='raw', - **kwargs) - # TODO(jswarren) Move ['image'] up to initial body value assignment - # once both v1 and v2 glance clients include the full response - # object. - if 'image' in body: - body = body['image'] - cls.image_ids.append(body['id']) - return body - - @staticmethod - def cleanup_resources(method, list_of_ids): - for resource_id in list_of_ids: - try: - method(resource_id) - except lib_exc.NotFound: - pass - - @classmethod - def resource_cleanup(cls): - cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids) - cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids) - super(BaseTelemetryTest, cls).resource_cleanup() - - def await_samples(self, metric, query): - """This method is to wait for sample to add it to database. - - There are long time delays when using Postgresql (or Mysql) - database as ceilometer backend - """ - timeout = CONF.compute.build_timeout - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: - body = self.telemetry_client.list_samples(metric, query) - if body: - return body - time.sleep(CONF.compute.build_interval) - - raise exceptions.TimeoutException( - 'Sample for metric:%s with query:%s has not been added to the ' - 'database within %d seconds' % (metric, query, - CONF.compute.build_timeout)) - - -class BaseTelemetryAdminTest(BaseTelemetryTest): - """Base test case class for admin Telemetry API tests.""" - - credentials = ['primary', 'admin'] - - @classmethod - def setup_clients(cls): - super(BaseTelemetryAdminTest, cls).setup_clients() - cls.telemetry_admin_client = cls.os_adm.telemetry_client - - def await_events(self, query): - timeout = CONF.compute.build_timeout - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: - body = self.telemetry_admin_client.list_events(query) - if body: - return body - time.sleep(CONF.compute.build_interval) - - raise exceptions.TimeoutException( - 'Event with query:%s has not been added to the ' - 'database within %d seconds' % (query, CONF.compute.build_timeout)) diff --git a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py b/ceilometer/tests/tempest/api/test_telemetry_notification_api.py deleted file mode 100644 index d723b558..00000000 --- a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Change-Id: I14e16a1a7d9813b324ee40545c07f0e88fb637b7 - -import testtools - -from ceilometer.tests.tempest.api import base -from tempest import config -from tempest.lib import decorators -from tempest import test - - -CONF = config.CONF - - -class TelemetryNotificationAPITest(base.BaseTelemetryTest): - - @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caae') - @test.services('compute') - def test_check_nova_notification(self): - - body = self.create_server() - - query = ('resource', 'eq', body['id']) - - for metric in self.nova_notifications: - self.await_samples(metric, query) - - @test.attr(type="smoke") - @test.idempotent_id('04b10bfe-a5dc-47af-b22f-0460426bf499') - @test.services("image") - @testtools.skipIf(not CONF.image_feature_enabled.api_v1, - "Glance api v1 is disabled") - def test_check_glance_v1_notifications(self): - body = self.create_image(self.image_client, is_public=False) - self.image_client.update_image(body['id'], data='data') - - query = 'resource', 'eq', body['id'] - - self.image_client.delete_image(body['id']) - - for metric in self.glance_notifications: - self.await_samples(metric, query) - - @test.attr(type="smoke") - @test.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8f') - @test.services("image") - @testtools.skipIf(not CONF.image_feature_enabled.api_v2, - "Glance api v2 is disabled") - def test_check_glance_v2_notifications(self): - body = self.create_image(self.image_client_v2, visibility='private') - - self.image_client_v2.store_image_file(body['id'], "file") - self.image_client_v2.show_image_file(body['id']) - - query = 'resource', 'eq', body['id'] - - for metric in self.glance_v2_notifications: - self.await_samples(metric, query) - - -class TelemetryNotificationAdminAPITest(base.BaseTelemetryAdminTest): - - @test.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebea') - @test.services('compute') - @decorators.skip_because(bug='1480490') - def test_check_nova_notification_event_and_meter(self): - - body = self.create_server() - - if CONF.telemetry.event_enabled: - query = ('instance_id', 'eq', body['id']) - self.await_events(query) - - query = ('resource', 'eq', body['id']) - for metric in self.nova_notifications: - self.await_samples(metric, query) diff --git a/ceilometer/tests/tempest/config.py b/ceilometer/tests/tempest/config.py deleted file mode 100644 index dea33f6c..00000000 --- a/ceilometer/tests/tempest/config.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -service_available_group = cfg.OptGroup(name="service_available", - title="Available OpenStack Services") - -ServiceAvailableGroup = [ - cfg.BoolOpt('ceilometer', - default=True, - help="Whether or not Ceilometer is expected to be available"), -] - -telemetry_group = cfg.OptGroup(name='telemetry', - title='Telemetry Service Options') - -TelemetryGroup = [ - cfg.StrOpt('catalog_type', - default='metering', - help="Catalog type of the Telemetry service."), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the telemetry service."), - cfg.BoolOpt('event_enabled', - default=True, - help="Runs Ceilometer event-related tests"), -] diff --git a/ceilometer/tests/tempest/plugin.py b/ceilometer/tests/tempest/plugin.py deleted file mode 100644 index 077a3c20..00000000 --- a/ceilometer/tests/tempest/plugin.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from tempest import config -from tempest.test_discover import plugins - -import ceilometer -from ceilometer.tests.tempest import config as tempest_config - - -class CeilometerTempestPlugin(plugins.TempestPlugin): - - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(ceilometer.__file__)))[0] - test_dir = "ceilometer/tests/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group(conf, - tempest_config.service_available_group, - tempest_config.ServiceAvailableGroup) - config.register_opt_group(conf, - tempest_config.telemetry_group, - tempest_config.TelemetryGroup) - - def get_opt_lists(self): - return [(tempest_config.telemetry_group.name, - tempest_config.TelemetryGroup)] diff --git a/ceilometer/tests/tempest/scenario/__init__.py b/ceilometer/tests/tempest/scenario/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py b/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py deleted file mode 100644 index 0d99ee35..00000000 --- a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2014 Red Hat -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from tempest.common.utils import data_utils -from tempest import config -from tempest import test - -from ceilometer.tests.tempest.service import client - - -CONF = config.CONF - -LOG = logging.getLogger(__name__) - -# Loop for up to 120 seconds waiting on notifications -# NOTE(chdent): The choice of 120 seconds is fairly -# arbitrary: Long enough to give the notifications the -# chance to travel across a highly latent bus but not -# so long as to allow excessive latency to never be visible. -# TODO(chdent): Ideally this value would come from configuration. -NOTIFICATIONS_WAIT = 120 -NOTIFICATIONS_SLEEP = 1 - - -class ClientManager(client.Manager): - - load_clients = [ - 'telemetry_client', - 'container_client', - 'object_client', - ] - - -class TestObjectStorageTelemetry(test.BaseTestCase): - """Test that swift uses the ceilometer middleware. - - * create container. - * upload a file to the created container. - * retrieve the file from the created container. - * wait for notifications from ceilometer. - """ - - credentials = ['primary'] - client_manager = ClientManager - - @classmethod - def skip_checks(cls): - super(TestObjectStorageTelemetry, cls).skip_checks() - if not CONF.service_available.swift: - skip_msg = ("%s skipped as swift is not available" % - cls.__name__) - raise cls.skipException(skip_msg) - if not CONF.service_available.ceilometer: - skip_msg = ("%s skipped as ceilometer is not available" % - cls.__name__) - raise cls.skipException(skip_msg) - - @classmethod - def setup_credentials(cls): - cls.set_network_resources() - super(TestObjectStorageTelemetry, cls).setup_credentials() - - @classmethod - def setup_clients(cls): - super(TestObjectStorageTelemetry, cls).setup_clients() - cls.telemetry_client = cls.os_primary.telemetry_client - cls.container_client = cls.os_primary.container_client - cls.object_client = cls.os_primary.object_client - - def _confirm_notifications(self, container_name, obj_name): - # NOTE: Loop seeking for appropriate notifications about the containers - # and objects sent to swift. - - def _check_samples(): - # NOTE: Return True only if we have notifications about some - # containers and some objects and the notifications are about - # the expected containers and objects. - # Otherwise returning False will case _check_samples to be - # called again. - results = self.telemetry_client.list_samples( - 'storage.objects.incoming.bytes') - LOG.debug('got samples %s', results) - - # Extract container info from samples. - containers, objects = [], [] - for sample in results: - meta = sample['resource_metadata'] - if meta.get('container') and meta['container'] != 'None': - containers.append(meta['container']) - elif (meta.get('target.metadata:container') and - meta['target.metadata:container'] != 'None'): - containers.append(meta['target.metadata:container']) - - if meta.get('object') and meta['object'] != 'None': - objects.append(meta['object']) - elif (meta.get('target.metadata:object') and - meta['target.metadata:object'] != 'None'): - objects.append(meta['target.metadata:object']) - - return (container_name in containers and obj_name in objects) - - self.assertTrue(test.call_until_true(_check_samples, - NOTIFICATIONS_WAIT, - NOTIFICATIONS_SLEEP), - 'Correct notifications were not received after ' - '%s seconds.' % NOTIFICATIONS_WAIT) - - def create_container(self): - name = data_utils.rand_name('swift-scenario-container') - self.container_client.create_container(name) - # look for the container to assure it is created - self.container_client.list_container_contents(name) - LOG.debug('Container %s created' % (name)) - self.addCleanup(self.container_client.delete_container, - name) - return name - - def upload_object_to_container(self, container_name): - obj_name = data_utils.rand_name('swift-scenario-object') - obj_data = data_utils.arbitrary_string() - self.object_client.create_object(container_name, obj_name, obj_data) - self.addCleanup(self.object_client.delete_object, - container_name, - obj_name) - return obj_name - - @test.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb85') - @test.services('object_storage') - def test_swift_middleware_notifies(self): - container_name = self.create_container() - obj_name = self.upload_object_to_container(container_name) - self._confirm_notifications(container_name, obj_name) diff --git a/ceilometer/tests/tempest/service/__init__.py b/ceilometer/tests/tempest/service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/service/client.py b/ceilometer/tests/tempest/service/client.py deleted file mode 100644 index 179f8a1a..00000000 --- a/ceilometer/tests/tempest/service/client.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils as json -from six.moves.urllib import parse as urllib - -from tempest import config -from tempest.lib.common import rest_client -from tempest.lib.services.compute.flavors_client import FlavorsClient -from tempest.lib.services.compute.floating_ips_client import FloatingIPsClient -from tempest.lib.services.compute.networks_client import NetworksClient -from tempest.lib.services.compute.servers_client import ServersClient -from tempest import manager -from tempest.services.object_storage.container_client import ContainerClient -from tempest.services.object_storage.object_client import ObjectClient - -from ceilometer.tests.tempest.service.images.v1.images_client import \ - ImagesClient -from ceilometer.tests.tempest.service.images.v2.images_client import \ - ImagesClient as ImagesClientV2 - - -CONF = config.CONF - - -class TelemetryClient(rest_client.RestClient): - - version = '2' - uri_prefix = "v2" - - def deserialize(self, body): - return json.loads(body.replace("\n", "")) - - def serialize(self, body): - return json.dumps(body) - - def create_sample(self, meter_name, sample_list): - uri = "%s/meters/%s" % (self.uri_prefix, meter_name) - body = self.serialize(sample_list) - resp, body = self.post(uri, body) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def _helper_list(self, uri, query=None, period=None): - uri_dict = {} - if query: - uri_dict = {'q.field': query[0], - 'q.op': query[1], - 'q.value': query[2]} - if period: - uri_dict['period'] = period - if uri_dict: - uri += "?%s" % urllib.urlencode(uri_dict) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyList(resp, body) - - def list_resources(self, query=None): - uri = '%s/resources' % self.uri_prefix - return self._helper_list(uri, query) - - def list_meters(self, query=None): - uri = '%s/meters' % self.uri_prefix - return self._helper_list(uri, query) - - def list_statistics(self, meter, period=None, query=None): - uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter) - return self._helper_list(uri, query, period) - - def list_samples(self, meter_id, query=None): - uri = '%s/meters/%s' % (self.uri_prefix, meter_id) - return self._helper_list(uri, query) - - def list_events(self, query=None): - uri = '%s/events' % self.uri_prefix - return self._helper_list(uri, query) - - def show_resource(self, resource_id): - uri = '%s/resources/%s' % (self.uri_prefix, resource_id) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - -class Manager(manager.Manager): - - load_clients = [ - 'servers_client', - 'compute_networks_client', - 'compute_floating_ips_client', - 'flavors_client', - 'image_client', - 'image_client_v2', - 'telemetry_client', - 'container_client', - 'object_client', - ] - - default_params = { - 'disable_ssl_certificate_validation': - CONF.identity.disable_ssl_certificate_validation, - 'ca_certs': CONF.identity.ca_certificates_file, - 'trace_requests': CONF.debug.trace_requests - } - - compute_params = { - 'service': CONF.compute.catalog_type, - 'region': CONF.compute.region or CONF.identity.region, - 'endpoint_type': CONF.compute.endpoint_type, - 'build_interval': CONF.compute.build_interval, - 'build_timeout': CONF.compute.build_timeout, - } - compute_params.update(default_params) - - image_params = { - 'catalog_type': CONF.image.catalog_type, - 'region': CONF.image.region or CONF.identity.region, - 'endpoint_type': CONF.image.endpoint_type, - 'build_interval': CONF.image.build_interval, - 'build_timeout': CONF.image.build_timeout, - } - image_params.update(default_params) - - telemetry_params = { - 'service': CONF.telemetry.catalog_type, - 'region': CONF.identity.region, - 'endpoint_type': CONF.telemetry.endpoint_type, - } - telemetry_params.update(default_params) - - object_storage_params = { - 'service': CONF.object_storage.catalog_type, - 'region': CONF.object_storage.region or CONF.identity.region, - 'endpoint_type': CONF.object_storage.endpoint_type - } - object_storage_params.update(default_params) - - def __init__(self, credentials=None, service=None): - super(Manager, self).__init__(credentials) - for client in self.load_clients: - getattr(self, 'set_%s' % client)() - - def set_servers_client(self): - self.servers_client = ServersClient(self.auth_provider, - **self.compute_params) - - def set_compute_networks_client(self): - self.compute_networks_client = NetworksClient(self.auth_provider, - **self.compute_params) - - def set_compute_floating_ips_client(self): - self.compute_floating_ips_client = FloatingIPsClient( - self.auth_provider, - **self.compute_params) - - def set_flavors_client(self): - self.flavors_client = FlavorsClient(self.auth_provider, - **self.compute_params) - - def set_image_client(self): - self.image_client = ImagesClient(self.auth_provider, - **self.image_params) - - def set_image_client_v2(self): - self.image_client_v2 = ImagesClientV2(self.auth_provider, - **self.image_params) - - def set_telemetry_client(self): - self.telemetry_client = TelemetryClient(self.auth_provider, - **self.telemetry_params) - - def set_container_client(self): - self.container_client = ContainerClient(self.auth_provider, - **self.object_storage_params) - - def set_object_client(self): - self.object_client = ObjectClient(self.auth_provider, - **self.object_storage_params) diff --git a/ceilometer/tests/unit/agent/__init__.py b/ceilometer/tests/unit/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/agent/agentbase.py b/ceilometer/tests/unit/agent/agentbase.py deleted file mode 100644 index 666cf6ab..00000000 --- a/ceilometer/tests/unit/agent/agentbase.py +++ /dev/null @@ -1,738 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 Intel corp. -# Copyright 2013 eNovance -# Copyright 2014 Red Hat, Inc -# -# Authors: Yunhong Jiang -# Julien Danjou -# Eoghan Glynn -# Nejc Saje -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import datetime - -import mock -from oslo_config import fixture as fixture_config -from oslotest import mockpatch -import six -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer import sample -from ceilometer.tests import base -from ceilometer import utils - - -class TestSample(sample.Sample): - def __init__(self, name, type, unit, volume, user_id, project_id, - resource_id, timestamp=None, resource_metadata=None, - source=None): - super(TestSample, self).__init__(name, type, unit, volume, user_id, - project_id, resource_id, timestamp, - resource_metadata, source) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -default_test_data = TestSample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'Pollster'}, -) - - -class TestPollster(plugin_base.PollsterBase): - test_data = default_test_data - discovery = None - - @property - def default_discovery(self): - return self.discovery - - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - c = copy.deepcopy(self.test_data) - c.resource_metadata['resources'] = resources - return [c] - - -class BatchTestPollster(TestPollster): - test_data = default_test_data - discovery = None - - @property - def default_discovery(self): - return self.discovery - - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - for resource in resources: - c = copy.deepcopy(self.test_data) - c.timestamp = datetime.datetime.utcnow().isoformat() - c.resource_id = resource - c.resource_metadata['resource'] = resource - yield c - - -class TestPollsterException(TestPollster): - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - raise Exception() - - -class TestDiscovery(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - return self.resources - - -class TestDiscoveryException(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - raise Exception() - - -@six.add_metaclass(abc.ABCMeta) -class BaseAgentManagerTestCase(base.BaseTestCase): - - class Pollster(TestPollster): - samples = [] - resources = [] - test_data = default_test_data - - class BatchPollster(BatchTestPollster): - samples = [] - resources = [] - test_data = default_test_data - - class PollsterAnother(TestPollster): - samples = [] - resources = [] - test_data = TestSample( - name='testanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterException(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexception', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterExceptionAnother(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexceptionanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class Discovery(TestDiscovery): - params = [] - resources = [] - - class DiscoveryAnother(TestDiscovery): - params = [] - resources = [] - - @property - def group_id(self): - return 'another_group' - - class DiscoveryException(TestDiscoveryException): - params = [] - - def setup_polling(self): - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - - def create_extension_list(self): - return [extension.Extension('test', - None, - None, - self.Pollster(), ), - extension.Extension('testbatch', - None, - None, - self.BatchPollster(), ), - extension.Extension('testanother', - None, - None, - self.PollsterAnother(), ), - extension.Extension('testexception', - None, - None, - self.PollsterException(), ), - extension.Extension('testexceptionanother', - None, - None, - self.PollsterExceptionAnother(), )] - - def create_discovery_manager(self): - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension( - 'testdiscovery', - None, - None, - self.Discovery(), ), - extension.Extension( - 'testdiscoveryanother', - None, - None, - self.DiscoveryAnother(), ), - extension.Extension( - 'testdiscoveryexception', - None, - None, - self.DiscoveryException(), ), - ], - ) - - @abc.abstractmethod - def create_manager(self): - """Return subclass specific manager.""" - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def setUp(self): - super(BaseAgentManagerTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'pipeline_cfg_file', - self.path_get('etc/ceilometer/pipeline.yaml') - ) - self.CONF(args=[]) - self.mgr = self.create_manager() - self.mgr.extensions = self.create_extension_list() - self.mgr.partition_coordinator = mock.MagicMock() - fake_subset = lambda _, x: x - p_coord = self.mgr.partition_coordinator - p_coord.extract_my_subset.side_effect = fake_subset - self.mgr.tg = mock.MagicMock() - self.pipeline_cfg = { - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.setup_polling() - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - @staticmethod - def get_publisher(url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'rpc://': test_publisher.TestPublisher} - return fake_drivers[url](url) - - def tearDown(self): - self.Pollster.samples = [] - self.Pollster.discovery = [] - self.PollsterAnother.samples = [] - self.PollsterAnother.discovery = [] - self.PollsterException.samples = [] - self.PollsterException.discovery = [] - self.PollsterExceptionAnother.samples = [] - self.PollsterExceptionAnother.discovery = [] - self.Pollster.resources = [] - self.PollsterAnother.resources = [] - self.PollsterException.resources = [] - self.PollsterExceptionAnother.resources = [] - self.Discovery.params = [] - self.DiscoveryAnother.params = [] - self.DiscoveryException.params = [] - self.Discovery.resources = [] - self.DiscoveryAnother.resources = [] - super(BaseAgentManagerTestCase, self).tearDown() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.mgr.start() - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) - self.mgr.stop() - self.mgr.partition_coordinator.stop.assert_called_once_with() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start_with_pipeline_poller(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 5) - self.mgr.start() - self.addCleanup(self.mgr.stop) - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) - self.assertEqual([timer_call, pipeline_poller_call], - self.mgr.tg.add_timer.call_args_list) - - def test_join_partitioning_groups(self): - self.mgr.discovery_manager = self.create_discovery_manager() - self.mgr.join_partitioning_groups() - p_coord = self.mgr.partition_coordinator - static_group_ids = [utils.hash_of_set(p['resources']) - for p in self.pipeline_cfg['sources'] - if p['resources']] - expected = [mock.call(self.mgr.construct_group_id(g)) - for g in ['another_group', 'global'] + static_group_ids] - self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.join_group.call_args_list) - - def test_setup_polling_tasks(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - per_task_resources = polling_tasks[60].resources - self.assertEqual(1, len(per_task_resources)) - self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), - set(per_task_resources['test_pipeline-test'].get({}))) - - def test_setup_polling_tasks_multiple_interval(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(2, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.assertIn(10, polling_tasks.keys()) - - def test_setup_polling_tasks_mismatch_counter(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test_invalid'], - 'resources': ['invalid://'], - 'sinks': ['test_sink'] - }) - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.assertNotIn(10, polling_tasks.keys()) - - def test_setup_polling_task_same_interval(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['testanother'], - 'resources': ['testanother://'] if self.source_resources else [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - pollsters = polling_tasks.get(60).pollster_matches - self.assertEqual(2, len(pollsters)) - per_task_resources = polling_tasks[60].resources - self.assertEqual(2, len(per_task_resources)) - key = 'test_pipeline-test' - self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), - set(per_task_resources[key].get({}))) - key = 'test_pipeline_1-testanother' - self.assertEqual(set(self.pipeline_cfg['sources'][1]['resources']), - set(per_task_resources[key].get({}))) - - def test_agent_manager_start(self): - mgr = self.create_manager() - mgr.extensions = self.mgr.extensions - mgr.create_polling_task = mock.MagicMock() - mgr.tg = mock.MagicMock() - mgr.start() - self.addCleanup(mgr.stop) - self.assertTrue(mgr.tg.add_timer.called) - - def test_manager_exception_persistency(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['testanother'], - 'sinks': ['test_sink'] - }) - self.setup_polling() - - def _verify_discovery_params(self, expected): - self.assertEqual(expected, self.Discovery.params) - self.assertEqual(expected, self.DiscoveryAnother.params) - self.assertEqual(expected, self.DiscoveryException.params) - - def _do_test_per_pollster_discovery(self, discovered_resources, - static_resources): - self.Pollster.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - if static_resources: - # just so we can test that static + pre_pipeline amalgamated - # override per_pollster - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscoveryanother', - 'testdiscoverynonexistent', - 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - if static_resources: - self.assertEqual(set(static_resources + - self.DiscoveryAnother.resources), - set(self.Pollster.resources)) - else: - self.assertEqual(set(self.Discovery.resources), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pollster_discovery(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): - # ensure static+per_source_discovery overrides per_pollster_discovery - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pollster_discovery_duplicated(self): - self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], - []) - - def test_per_pollster_discovery_overridden_by_duplicated_static(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'dup', 'dup']) - - def test_per_pollster_discovery_caching(self): - # ensure single discovery associated with multiple pollsters - # only called once per polling cycle - discovered_resources = ['discovered_1', 'discovered_2'] - self.Pollster.discovery = 'testdiscovery' - self.PollsterAnother.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.pipeline_cfg['sources'][0]['meters'].append('testanother') - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Discovery.params)) - self.assertEqual(discovered_resources, self.Pollster.resources) - self.assertEqual(discovered_resources, self.PollsterAnother.resources) - - def _do_test_per_pipeline_discovery(self, - discovered_resources, - static_resources): - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - discovery = self.Discovery.resources + self.DiscoveryAnother.resources - # compare resource lists modulo ordering - self.assertEqual(set(static_resources + discovery), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pipeline_discovery_discovered_only(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pipeline_discovery_static_only(self): - self._do_test_per_pipeline_discovery([], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_augmented_by_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_duplicated_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], - ['dup', 'static_1', 'dup']) - - def test_multiple_pipelines_different_static_resources(self): - # assert that the individual lists of static and discovered resources - # for each pipeline with a common interval are passed to individual - # pollsters matching each pipeline - self.pipeline_cfg['sources'][0]['resources'] = ['test://'] - self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] - self.pipeline_cfg['sources'].append({ - 'name': 'another_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['another://'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_new'] - }) - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual([None], self.Discovery.params) - self.assertEqual([None], self.DiscoveryAnother.params) - self.assertEqual(2, len(self.Pollster.samples)) - samples = self.Pollster.samples - test_resources = ['test://', 'discovered_1', 'discovered_2'] - another_resources = ['another://', 'discovered_3', 'discovered_4'] - if samples[0][1] == test_resources: - self.assertEqual(another_resources, samples[1][1]) - elif samples[0][1] == another_resources: - self.assertEqual(test_resources, samples[1][1]) - else: - self.fail('unexpected sample resources %s' % samples) - - def test_multiple_sources_different_discoverers(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1']}, - {'name': 'test_source_2', - 'interval': 60, - 'meters': ['testanother'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - self.assertEqual(1, len(self.PollsterAnother.samples)) - self.assertEqual(['discovered_3', 'discovered_4'], - self.PollsterAnother.resources) - - def test_multiple_sinks_same_discoverer(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1', 'test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - - def test_discovery_partitioning(self): - self.mgr.discovery_manager = self.create_discovery_manager() - p_coord = self.mgr.partition_coordinator - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), - d.obj.resources) - for d in self.mgr.discovery_manager - if hasattr(d.obj, 'resources')] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) - - def test_static_resources_partitioning(self): - p_coord = self.mgr.partition_coordinator - static_resources = ['static_1', 'static_2'] - static_resources2 = ['static_3', 'static_4'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline2', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': static_resources2, - 'sinks': ['test_sink'] - }) - # have one pipeline without static resources defined - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline3', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - # Only two groups need to be created, one for each pipeline, - # even though counter test is used twice - expected = [mock.call(self.mgr.construct_group_id( - utils.hash_of_set(resources)), - resources) - for resources in [static_resources, - static_resources2]] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_polling_and_notify_with_resources(self, LOG): - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Polling pollster %(poll)s in the context of %(src)s', - {'poll': 'test', 'src': 'test_pipeline'}) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_skip_polling_and_notify_with_no_resources(self, LOG): - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - pollster = list(polling_task.pollster_matches['test_pipeline'])[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Skip pollster %(name)s, no %(p_context)sresources found this ' - 'cycle', {'name': pollster.name, 'p_context': ''}) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_skip_polling_polled_resources(self, LOG): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['test'], - 'resources': ['test://'], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Skip pollster %(name)s, no %(p_context)sresources found this ' - 'cycle', {'name': 'test', 'p_context': 'new '}) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_polling_samples_timestamp(self, mock_utc): - polled_samples = [] - timestamp = '2222-11-22T00:11:22.333333' - - def fake_send_notification(samples): - polled_samples.extend(samples) - - mock_utc.return_value = datetime.datetime.strptime( - timestamp, "%Y-%m-%dT%H:%M:%S.%f") - - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task._send_notification = mock.Mock( - side_effect=fake_send_notification) - polling_task.poll_and_notify() - self.assertEqual(timestamp, polled_samples[0]['timestamp']) diff --git a/ceilometer/tests/unit/agent/test_discovery.py b/ceilometer/tests/unit/agent/test_discovery.py deleted file mode 100644 index bf68c26b..00000000 --- a/ceilometer/tests/unit/agent/test_discovery.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright 2014 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/central/manager.py -""" - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent.discovery import endpoint -from ceilometer.agent.discovery import localnode -from ceilometer.hardware import discovery as hardware - - -class TestEndpointDiscovery(base.BaseTestCase): - - def setUp(self): - super(TestEndpointDiscovery, self).setUp() - self.discovery = endpoint.EndpointDiscovery() - self.manager = mock.MagicMock() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('interface', 'test-endpoint-type', - group='service_credentials') - self.CONF.set_override('region_name', 'test-region-name', - group='service_credentials') - self.catalog = (self.manager.keystone.session.auth.get_access. - return_value.service_catalog) - - def test_keystone_called(self): - self.discovery.discover(self.manager, param='test-service-type') - expected = [mock.call(service_type='test-service-type', - interface='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, self.catalog.get_urls.call_args_list) - - def test_keystone_called_no_service_type(self): - self.discovery.discover(self.manager) - expected = [mock.call(service_type=None, - interface='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, - self.catalog.get_urls - .call_args_list) - - def test_keystone_called_no_endpoints(self): - self.catalog.get_urls.return_value = [] - self.assertEqual([], self.discovery.discover(self.manager)) - - -class TestLocalnodeDiscovery(base.BaseTestCase): - def setUp(self): - super(TestLocalnodeDiscovery, self).setUp() - self.discovery = localnode.LocalNodeDiscovery() - self.manager = mock.MagicMock() - - def test_lockalnode_discovery(self): - self.assertEqual(['local_host'], self.discovery.discover(self.manager)) - - -class TestHardwareDiscovery(base.BaseTestCase): - class MockInstance(object): - addresses = {'ctlplane': [ - {'addr': '0.0.0.0', - 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} - ]} - id = 'resource_id' - image = {'id': 'image_id'} - flavor = {'id': 'flavor_id'} - - expected = { - 'resource_id': 'resource_id', - 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', - 'mac_addr': '01-23-45-67-89-ab', - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - } - - def setUp(self): - super(TestHardwareDiscovery, self).setUp() - self.discovery = hardware.NodesDiscoveryTripleO() - self.discovery.nova_cli = mock.MagicMock() - self.manager = mock.MagicMock() - - def test_hardware_discovery(self): - self.discovery.nova_cli.instance_get_all.return_value = [ - self.MockInstance()] - resources = self.discovery.discover(self.manager) - self.assertEqual(1, len(resources)) - self.assertEqual(self.expected, resources[0]) - - def test_hardware_discovery_without_flavor(self): - instance = self.MockInstance() - instance.flavor = {} - self.discovery.nova_cli.instance_get_all.return_value = [instance] - resources = self.discovery.discover(self.manager) - self.assertEqual(0, len(resources)) diff --git a/ceilometer/tests/unit/agent/test_manager.py b/ceilometer/tests/unit/agent/test_manager.py deleted file mode 100644 index 4a33f55c..00000000 --- a/ceilometer/tests/unit/agent/test_manager.py +++ /dev/null @@ -1,499 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer agent manager""" - -import shutil - -from keystoneclient import exceptions as ks_exceptions -import mock -from novaclient import client as novaclient -from oslo_config import fixture as fixture_config -from oslo_service import service as os_service -from oslo_utils import fileutils -from oslotest import base -from oslotest import mockpatch -import requests -import six -from stevedore import extension -import yaml - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.hardware import discovery -from ceilometer import pipeline -from ceilometer.tests.unit.agent import agentbase - - -class PollingException(Exception): - pass - - -class TestPollsterBuilder(agentbase.TestPollster): - @classmethod - def build_pollsters(cls): - return [('builder1', cls()), ('builder2', cls())] - - -@mock.patch('ceilometer.compute.pollsters.' - 'BaseComputePollster.setup_environment', - mock.Mock(return_value=None)) -class TestManager(base.BaseTestCase): - def setUp(self): - super(TestManager, self).setUp() - self.conf = self.useFixture(fixture_config.Config()).conf - self.conf(args=[]) - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def test_load_plugins(self): - mgr = manager.AgentManager() - self.assertIsNotNone(list(mgr.extensions)) - - def test_load_plugins_pollster_list(self): - mgr = manager.AgentManager(pollster_list=['disk.*']) - # currently we do have 26 disk-related pollsters - self.assertEqual(26, len(list(mgr.extensions))) - - def test_load_invalid_plugins_pollster_list(self): - # if no valid pollsters have been loaded, the ceilometer - # polling program should exit - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - pollster_list=['aaa']) - - def test_load_plugins_no_intersection(self): - # Let's test nothing will be polled if namespace and pollsters - # list have no intersection. - parameters = dict(namespaces=['compute'], - pollster_list=['storage.*']) - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - parameters) - - # Test plugin load behavior based on Node Manager pollsters. - # pollster_list is just a filter, so sensor pollsters under 'ipmi' - # namespace would be also instanced. Still need mock __init__ for it. - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_normal_plugins(self): - mgr = manager.AgentManager(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - # 8 pollsters for Node Manager - self.assertEqual(8, len(mgr.extensions)) - - # Skip loading pollster upon ExtensionLoadError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=plugin_base.ExtensionLoadError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.agent.manager.LOG') - def test_load_failed_plugins(self, LOG): - # Here we additionally check that namespaces will be converted to the - # list if param was not set as a list. - try: - manager.AgentManager(namespaces='ipmi', - pollster_list=['hardware.ipmi.node.*']) - except manager.EmptyPollstersList: - err_msg = 'Skip loading extension for hardware.ipmi.node.%s' - pollster_names = [ - 'power', 'temperature', 'outlet_temperature', - 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] - calls = [mock.call(err_msg % n) for n in pollster_names] - LOG.exception.assert_has_calls(calls=calls, any_order=True) - - # Skip loading pollster upon ImportError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=ImportError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_import_error_in_plugin(self): - parameters = dict(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - parameters) - - # Exceptions other than ExtensionLoadError are propagated - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=PollingException)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_exceptional_plugins(self): - self.assertRaises(PollingException, - manager.AgentManager, - ['ipmi'], - ['hardware.ipmi.node.*']) - - def test_load_plugins_pollster_list_forbidden(self): - manager.cfg.CONF.set_override('backend_url', 'http://', - group='coordination') - self.assertRaises(manager.PollsterListForbidden, - manager.AgentManager, - pollster_list=['disk.*']) - manager.cfg.CONF.reset() - - def test_builder(self): - @staticmethod - def fake_get_ext_mgr(namespace): - if 'builder' in namespace: - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('builder', - None, - TestPollsterBuilder, - None), - ] - ) - else: - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('test', - None, - None, - agentbase.TestPollster()), - ] - ) - - with mock.patch.object(manager.AgentManager, '_get_ext_mgr', - new=fake_get_ext_mgr): - mgr = manager.AgentManager(namespaces=['central']) - self.assertEqual(3, len(mgr.extensions)) - for ext in mgr.extensions: - self.assertIn(ext.name, ['builder1', 'builder2', 'test']) - self.assertIsInstance(ext.obj, agentbase.TestPollster) - - -class TestPollsterKeystone(agentbase.TestPollster): - def get_samples(self, manager, cache, resources): - # Just try to use keystone, that will raise an exception - manager.keystone.projects.list() - - -class TestPollsterPollingException(agentbase.TestPollster): - polling_failures = 0 - - def get_samples(self, manager, cache, resources): - func = super(TestPollsterPollingException, self).get_samples - sample = func(manager=manager, - cache=cache, - resources=resources) - - # Raise polling exception after 2 times - self.polling_failures += 1 - if self.polling_failures > 2: - raise plugin_base.PollsterPermanentError(resources) - - return sample - - -class TestRunTasks(agentbase.BaseAgentManagerTestCase): - - class PollsterKeystone(TestPollsterKeystone): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testkeystone', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - class PollsterPollingException(TestPollsterPollingException): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testpollingexception', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - @staticmethod - @mock.patch('ceilometer.compute.pollsters.' - 'BaseComputePollster.setup_environment', - mock.Mock(return_value=None)) - def create_manager(): - return manager.AgentManager() - - @staticmethod - def setup_pipeline_file(pipeline): - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def fake_notifier_sample(self, ctxt, event_type, payload): - for m in payload['samples']: - del m['message_signature'] - self.notified_samples.append(m) - - def setUp(self): - self.notified_samples = [] - self.notifier = mock.Mock() - self.notifier.sample.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=self.notifier)) - self.source_resources = True - super(TestRunTasks, self).setUp() - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - return_value=mock.Mock())) - - def tearDown(self): - self.PollsterKeystone.samples = [] - self.PollsterKeystone.resources = [] - self.PollsterPollingException.samples = [] - self.PollsterPollingException.resources = [] - super(TestRunTasks, self).tearDown() - - def create_extension_list(self): - exts = super(TestRunTasks, self).create_extension_list() - exts.extend([extension.Extension('testkeystone', - None, - None, - self.PollsterKeystone(), ), - extension.Extension('testpollingexception', - None, - None, - self.PollsterPollingException(), )]) - return exts - - def test_get_sample_resources(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertTrue(self.Pollster.resources) - - def test_when_keystone_fail(self): - """Test for bug 1316532.""" - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - side_effect=ks_exceptions.ClientException)) - self.pipeline_cfg = { - 'sources': [{ - 'name': "test_keystone", - 'interval': 10, - 'meters': ['testkeystone'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertFalse(self.PollsterKeystone.samples) - self.assertFalse(self.notified_samples) - - @mock.patch('ceilometer.agent.manager.LOG') - @mock.patch('ceilometer.nova_client.LOG') - def test_hardware_discover_fail_minimize_logs(self, novalog, baselog): - self.useFixture(mockpatch.PatchObject( - novaclient.HTTPClient, - 'authenticate', - side_effect=requests.ConnectionError)) - - class PollsterHardware(agentbase.TestPollster): - discovery = 'tripleo_overcloud_nodes' - - class PollsterHardwareAnother(agentbase.TestPollster): - discovery = 'tripleo_overcloud_nodes' - - self.mgr.extensions.extend([ - extension.Extension('testhardware', - None, - None, - PollsterHardware(), ), - extension.Extension('testhardware2', - None, - None, - PollsterHardwareAnother(), ) - ]) - ext = extension.Extension('tripleo_overcloud_nodes', - None, - None, - discovery.NodesDiscoveryTripleO()) - self.mgr.discovery_manager = (extension.ExtensionManager - .make_test_instance([ext])) - - self.pipeline_cfg = { - 'sources': [{ - 'name': "test_hardware", - 'interval': 10, - 'meters': ['testhardware', 'testhardware2'], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertEqual(1, novalog.exception.call_count) - self.assertFalse(baselog.exception.called) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_polling_exception(self, LOG): - source_name = 'test_pollingexception' - self.pipeline_cfg = { - 'sources': [{ - 'name': source_name, - 'interval': 10, - 'meters': ['testpollingexception'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - pollster = list(polling_task.pollster_matches[source_name])[0] - - # 2 samples after 4 pollings, as pollster got disabled upon exception - for x in range(0, 4): - self.mgr.interval_task(polling_task) - samples = self.notified_samples - self.assertEqual(2, len(samples)) - LOG.error.assert_called_once_with(( - 'Prevent pollster %(name)s for ' - 'polling source %(source)s anymore!') - % ({'name': pollster.name, 'source': source_name})) - - def test_batching_polled_samples_false(self): - self.CONF.set_override('batch_polled_samples', False) - self._batching_samples(4, 4) - - def test_batching_polled_samples_true(self): - self.CONF.set_override('batch_polled_samples', True) - self._batching_samples(4, 1) - - def test_batching_polled_samples_default(self): - self._batching_samples(4, 1) - - def _batching_samples(self, expected_samples, call_count): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['testbatch'], - 'resources': ['alpha', 'beta', 'gamma', 'delta'], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) - self.mgr.start() - self.addCleanup(self.mgr.stop) - # Manually executes callbacks - for timer in self.mgr.pollster_timers: - timer.f(*timer.args, **timer.kw) - - samples = self.notified_samples - self.assertEqual(expected_samples, len(samples)) - self.assertEqual(call_count, self.notifier.sample.call_count) - - def test_start_with_reloadable_pipeline(self): - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 2) - - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) - self.mgr.start() - self.addCleanup(self.mgr.stop) - - # we only got the old name of meters - for sample in self.notified_samples: - self.assertEqual('test', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) - - # Modify the collection targets - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['testanother'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - updated_pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - - # Flush notified samples to test only new, nothing latent on - # fake message bus. - self.notified_samples = [] - - # we only got the new name of meters - for sample in self.notified_samples: - self.assertEqual('testanother', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) diff --git a/ceilometer/tests/unit/agent/test_plugin.py b/ceilometer/tests/unit/agent/test_plugin.py deleted file mode 100644 index e3a30b34..00000000 --- a/ceilometer/tests/unit/agent/test_plugin.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent import plugin_base - - -class NotificationBaseTestCase(base.BaseTestCase): - def setUp(self): - super(NotificationBaseTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - class FakePlugin(plugin_base.NotificationBase): - event_types = ['compute.*'] - - def process_notification(self, message): - pass - - def get_targets(self, conf): - pass - - def test_plugin_info(self): - plugin = self.FakePlugin(mock.Mock()) - plugin.to_samples_and_publish = mock.Mock() - message = { - 'ctxt': {'user_id': 'fake_user_id', - 'project_id': 'fake_project_id'}, - 'publisher_id': 'fake.publisher_id', - 'event_type': 'fake.event', - 'payload': {'foo': 'bar'}, - 'metadata': {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', - 'timestamp': '2015-06-1909:19:35.786893'} - } - plugin.info([message]) - notification = { - 'priority': 'info', - 'event_type': 'fake.event', - 'timestamp': '2015-06-1909:19:35.786893', - '_context_user_id': 'fake_user_id', - '_context_project_id': 'fake_project_id', - 'publisher_id': 'fake.publisher_id', - 'payload': {'foo': 'bar'}, - 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' - } - plugin.to_samples_and_publish.assert_called_with(notification) diff --git a/ceilometer/tests/unit/api/test_hooks.py b/ceilometer/tests/unit/api/test_hooks.py deleted file mode 100644 index 96bc023b..00000000 --- a/ceilometer/tests/unit/api/test_hooks.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging - -from ceilometer.api import hooks -from ceilometer.tests import base - - -class TestTestNotifierHook(base.BaseTestCase): - - def setUp(self): - super(TestTestNotifierHook, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_init_notifier_with_drivers(self): - self.CONF.set_override('telemetry_driver', 'messagingv2', - group='publisher_notifier') - hook = hooks.NotifierHook() - notifier = hook.notifier - self.assertIsInstance(notifier, oslo_messaging.Notifier) - self.assertEqual(['messagingv2'], notifier._driver_names) diff --git a/ceilometer/tests/unit/api/v2/test_complex_query.py b/ceilometer/tests/unit/api/v2/test_complex_query.py deleted file mode 100644 index 363e2112..00000000 --- a/ceilometer/tests/unit/api/v2/test_complex_query.py +++ /dev/null @@ -1,363 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to complex query.""" -import datetime - -import fixtures -import jsonschema -import mock -from oslotest import base -import wsme - -from ceilometer.api.controllers.v2 import query -from ceilometer.storage import models - - -class FakeComplexQuery(query.ValidatedComplexQuery): - def __init__(self, db_model, additional_name_mapping=None, metadata=False): - super(FakeComplexQuery, self).__init__(query=None, - db_model=db_model, - additional_name_mapping=( - additional_name_mapping or - {}), - metadata_allowed=metadata) - - -sample_name_mapping = {"resource": "resource_id", - "meter": "counter_name", - "type": "counter_type", - "unit": "counter_unit", - "volume": "counter_volume"} - - -class TestComplexQuery(base.BaseTestCase): - def setUp(self): - super(TestComplexQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - - def test_replace_isotime_utc(self): - filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_timezone_removed(self): - filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_wrong_syntax(self): - filter_expr = {"=": {"timestamp": "not a valid isotime string"}} - self.assertRaises(wsme.exc.ClientSideError, - self.query._replace_isotime_with_datetime, - filter_expr) - - def test_replace_isotime_in_complex_filter(self): - filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["and"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["and"][1]["="]["timestamp"]) - - def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): - subfilter = {"and": [{"=": {"project_id": 42}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - - filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - subfilter]} - - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["or"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["or"][1]["and"][1]["="]["timestamp"]) - - def test_convert_operator_to_lower_case(self): - filter_expr = {"AND": [{"=": {"project_id": 42}}, - {"=": {"project_id": 44}}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("and", list(filter_expr.keys())[0]) - - filter_expr = {"Or": [{"=": {"project_id": 43}}, - {"anD": [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("or", list(filter_expr.keys())[0]) - self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) - - def test_invalid_filter_misstyped_field_name_samples(self): - filter = {"=": {"project_id11": 42}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_complex_filter_wrong_field_names(self): - filter = {"and": - [{"=": {"non_existing_field": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"or": - [{"=": {"non_existing_field": 42}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_convert_orderby(self): - orderby = [] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([], orderby) - - orderby = [{"project_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "desc"}], orderby) - - orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], - orderby) - - def test_validate_orderby_empty_direction(self): - orderby = [{"project_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"resource_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_order_string(self): - orderby = [{"project_id": "not a valid order"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_multiple_item_order_string(self): - orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_empty_field_name(self): - orderby = [{"": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"": "desc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name(self): - orderby = [{"project_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): - orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_metadata_is_not_allowed(self): - orderby = [{"metadata.display_name": "asc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - -class TestFilterSyntaxValidation(base.BaseTestCase): - def setUp(self): - super(TestFilterSyntaxValidation, self).setUp() - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - - def test_simple_operator(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=>": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - def test_valid_value_types(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 42}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 3.14}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": True}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": False}} - self.query._validate_filter(filter) - - def test_invalid_simple_operator(self): - filter = {"==": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_operator_is_invalid(self): - filter = {"=": {"project_id": "string_value"}, - "<": {"": ""}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_expression_is_invalid(self): - filter = {} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_field_name(self): - filter = {"=": {"": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {" ": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {"\t": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_field_is_invalid(self): - filter = {"=": {"project_id": "value", "resource_id": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_missing_field_after_simple_op_is_invalid(self): - filter = {"=": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - def test_complex_operator_with_in(self): - filter = {"and": [{"<": {"counter_volume": 42}}, - {">=": {"counter_volume": 36}}, - {"in": {"project_id": ["project_id1", - "project_id2", - "project_id3"]}}]} - self.query._validate_filter(filter) - - def test_invalid_complex_operator(self): - filter = {"xor": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or_with_one_child_is_invalid(self): - filter = {"or": [{"=": {"project_id": "string_value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_complex_operator_with_zero_child_is_invalid(self): - filter = {"or": []} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_complex_operator_is_invalid(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}], - "or": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not(self): - filter = {"not": {"=": {"project_id": "value"}}} - self.query._validate_filter(filter) - - filter = { - "not": - {"or": - [{"and": - [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]}} - self.query._validate_filter(filter) - - def test_not_with_zero_child_is_invalid(self): - filter = {"not": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not_with_more_than_one_child_is_invalid(self): - filter = {"not": {"=": {"project_id": "value"}, - "!=": {"resource_id": "value"}}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_in_query_not_passing(self): - filter = {"in": {"resource_id": []}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) diff --git a/ceilometer/tests/unit/api/v2/test_query.py b/ceilometer/tests/unit/api/v2/test_query.py index 5ed3d12c..0e57aaab 100644 --- a/ceilometer/tests/unit/api/v2/test_query.py +++ b/ceilometer/tests/unit/api/v2/test_query.py @@ -18,18 +18,12 @@ import datetime import fixtures import mock -from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import wsme from ceilometer.api.controllers.v2 import base as v2_base from ceilometer.api.controllers.v2 import events -from ceilometer.api.controllers.v2 import meters -from ceilometer.api.controllers.v2 import utils -from ceilometer import storage -from ceilometer.storage import base as storage_base -from ceilometer.tests import base as tests_base class TestQuery(base.BaseTestCase): @@ -166,237 +160,3 @@ class TestQuery(base.BaseTestCase): type='string') self.assertRaises(v2_base.ClientSideError, events._event_query_to_event_filter, [query]) - - -class TestValidateGroupByFields(base.BaseTestCase): - - def test_valid_field(self): - result = meters._validate_groupby_fields(['user_id']) - self.assertEqual(['user_id'], result) - - def test_valid_fields_multiple(self): - result = set(meters._validate_groupby_fields( - ['user_id', 'project_id', 'source'])) - self.assertEqual(set(['user_id', 'project_id', 'source']), result) - - def test_invalid_field(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['wtf']) - - def test_invalid_field_multiple(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['user_id', 'wtf', 'project_id', 'source']) - - def test_duplicate_fields(self): - result = set( - meters._validate_groupby_fields(['user_id', 'source', 'user_id']) - ) - self.assertEqual(set(['user_id', 'source']), result) - - -class TestQueryToKwArgs(tests_base.BaseTestCase): - def setUp(self): - super(TestQueryToKwArgs, self).setUp() - self.useFixture(mockpatch.PatchObject( - utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) - self.useFixture(mockpatch.PatchObject( - utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) - - def test_sample_filter_single(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertIn('user', kwargs) - self.assertEqual(1, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - - def test_sample_filter_multi(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid'), - v2_base.Query(field='project_id', - op='eq', - value='pid'), - v2_base.Query(field='resource_id', - op='eq', - value='rid'), - v2_base.Query(field='source', - op='eq', - value='source_name'), - v2_base.Query(field='meter', - op='eq', - value='meter_name')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(5, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - self.assertEqual('pid', kwargs['project']) - self.assertEqual('rid', kwargs['resource']) - self.assertEqual('source_name', kwargs['source']) - self.assertEqual('meter_name', kwargs['meter']) - - def test_sample_filter_timestamp(self): - ts_start = timeutils.utcnow() - ts_end = ts_start + datetime.timedelta(minutes=5) - q = [v2_base.Query(field='timestamp', - op='lt', - value=str(ts_end)), - v2_base.Query(field='timestamp', - op='gt', - value=str(ts_start))] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(4, len(kwargs)) - self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) - self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) - self.assertEqual('gt', kwargs['start_timestamp_op']) - self.assertEqual('lt', kwargs['end_timestamp_op']) - - def test_sample_filter_meta(self): - q = [v2_base.Query(field='metadata.size', - op='eq', - value='20'), - v2_base.Query(field='resource_metadata.id', - op='eq', - value='meta_id')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(1, len(kwargs)) - self.assertEqual(2, len(kwargs['metaquery'])) - self.assertEqual(20, kwargs['metaquery']['metadata.size']) - self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) - - def test_sample_filter_non_equality_on_metadata(self): - queries = [v2_base.Query(field='resource_metadata.image_id', - op='gt', - value='image', - type='string'), - v2_base.Query(field='metadata.ramdisk_id', - op='le', - value='ramdisk', - type='string')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__) - - def test_sample_filter_invalid_field(self): - q = [v2_base.Query(field='invalid', - op='eq', - value='20')] - self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_invalid_op(self): - q = [v2_base.Query(field='user_id', - op='lt', - value='20')] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_timestamp_invalid_op(self): - ts_start = timeutils.utcnow() - q = [v2_base.Query(field='timestamp', - op='eq', - value=str(ts_start))] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_exclude_internal(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake', - type='string') - for f in ['y', 'on_behalf_of', 'x']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises(wsme.exc.ClientSideError, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__, - internal_keys=['on_behalf_of']) - - def test_sample_filter_self_always_excluded(self): - queries = [v2_base.Query(field='user_id', - op='eq', - value='20')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - self.assertNotIn('self', kwargs) - - def test_sample_filter_translation(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake_%s' % f, - type='string') for f in ['user_id', - 'project_id', - 'resource_id']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - for o in ['user', 'project', 'resource']: - self.assertEqual('fake_%s_id' % o, kwargs.get(o)) - - def test_timestamp_validation(self): - q = [v2_base.Query(field='timestamp', - op='le', - value='123')] - - exc = self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - expected_exc = wsme.exc.InvalidInput('timestamp', '123', - 'invalid timestamp format') - self.assertEqual(str(expected_exc), str(exc)) - - def test_sample_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - valid_keys = ['message_id', 'meter', 'project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_meters_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, - q, storage_base.Connection.get_meters, ['limit', 'unique']) - valid_keys = ['project', 'resource', 'source', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_resources_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, - q, storage_base.Connection.get_resources, ['limit']) - valid_keys = ['project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) diff --git a/ceilometer/tests/unit/api/v2/test_statistics.py b/ceilometer/tests/unit/api/v2/test_statistics.py deleted file mode 100644 index d5198540..00000000 --- a/ceilometer/tests/unit/api/v2/test_statistics.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test statistics objects.""" - -import datetime - -from oslotest import base - -from ceilometer.api.controllers.v2 import meters - - -class TestStatisticsDuration(base.BaseTestCase): - - def setUp(self): - super(TestStatisticsDuration, self).setUp() - - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def test_nulls(self): - s = meters.Statistics(duration_start=None, - duration_end=None, - start_timestamp=None, - end_timestamp=None) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_overlap_range_start(self): - s = meters.Statistics(duration_start=self.early1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.start, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(8 * 60 * 60, s.duration) - - def test_within_range(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle2, s.duration_end) - self.assertEqual(10 * 60 * 60, s.duration) - - def test_within_range_zero_duration(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(0, s.duration) - - def test_overlap_range_end(self): - s = meters.Statistics(duration_start=self.middle2, - duration_end=self.late1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle2, s.duration_start) - self.assertEqual(self.end, s.duration_end) - self.assertEqual(((6 * 60) - 1) * 60, s.duration) - - def test_after_range(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_without_timestamp(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=None, - end_timestamp=None) - self.assertEqual(self.late1, s.duration_start) - self.assertEqual(self.late2, s.duration_end) diff --git a/ceilometer/tests/unit/compute/__init__.py b/ceilometer/tests/unit/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/notifications/__init__.py b/ceilometer/tests/unit/compute/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/notifications/test_instance.py b/ceilometer/tests/unit/compute/notifications/test_instance.py deleted file mode 100644 index fdd8e512..00000000 --- a/ceilometer/tests/unit/compute/notifications/test_instance.py +++ /dev/null @@ -1,608 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for converters for producing compute counter messages from -notification events. -""" -from oslotest import base - -from ceilometer.compute.notifications import instance -from ceilometer import sample - - -INSTANCE_CREATE_END = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.create.end', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -INSTANCE_DELETE_START = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:24:14.547374', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.delete.start', - u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'deleting', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:24:14.824743', -} - -INSTANCE_EXISTS = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - -INSTANCE_EXISTS_METADATA_LIST = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'metadata': [], - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - - -INSTANCE_FINISH_RESIZE_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', - u'_context_quota_class': None, - u'event_type': u'compute.instance.finish_resize.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:10:17.436974', - u'_context_is_admin': True, - u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:08:39.162612', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 5, - u'deleted_at': u'', - u'fixed_ips': [{u'floating_ips': [], - u'label': u'private', - u'version': 4, - u'meta': {}, - u'address': u'10.0.0.3', - u'type': u'fixed'}], - u'memory_mb': 2048, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'reservation_id': u'r-u3fvim06', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.923939', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 20, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 20, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.small', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_RESIZE_REVERT_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', - u'_context_quota_class': None, - u'event_type': u'compute.instance.resize.revert.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:20:32.009532', - u'_context_is_admin': True, - u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:19:51.018218', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'resize_reverting', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'deleted_at': u'', - u'reservation_id': u'r-u3fvim06', - u'memory_mb': 512, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.000000', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_A-wste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 0, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 0, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.tiny', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_SCHEDULED = { - u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', - u'_context_quota_class': None, - u'event_type': u'scheduler.run_instance.scheduled', - u'_context_service_catalog': [{ - u'endpoints': [{ - u'adminURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'region': u'RegionOne', - u'internalURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', - u'publicURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], - u'endpoints_links': [], - u'type': u'volume', - u'name': u'cinder'}], - u'_context_auth_token': u'TOK', - u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', - u'payload': { - 'instance_id': 'fake-uuid1-1', - u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, - u'request_spec': { - u'num_instances': 1, - u'block_device_mapping': [{ - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'guest_format': None, - u'boot_index': 0, - u'no_device': None, - u'connection_info': None, - u'volume_id': None, - u'volume_size': None, - u'device_name': None, - u'disk_bus': None, - u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'source_type': u'image', - u'device_type': u'disk', - u'snapshot_id': None, - u'destination_type': u'local', - u'delete_on_termination': True}], - u'image': { - u'status': u'active', - u'name': u'cirros-0.3.1-x86_64-uec', - u'deleted': False, - u'container_format': u'ami', - u'created_at': u'2014-02-18T13:16:26.000000', - u'disk_format': u'ami', - u'updated_at': u'2014-02-18T13:16:27.000000', - u'properties': { - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, - u'min_disk': 0, - u'min_ram': 0, - u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', - u'owner': u'2bd766a095b44486bf07cf7f666997eb', - u'is_public': True, - u'deleted_at': None, - u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'size': 25165824}, - u'instance_type': { - u'root_gb': 1, - u'name': u'm1.tiny', - u'ephemeral_gb': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'extra_specs': {}, - u'swap': 0, - u'rxtx_factor': 1.0, - u'flavorid': u'1', - u'vcpu_weight': None, - u'id': 2}, - u'instance_properties': { - u'vm_state': u'building', - u'availability_zone': None, - u'terminated_at': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'user_data': None, - u'cleaned': False, - u'vm_mode': None, - u'deleted_at': None, - u'reservation_id': u'r-ven5q6om', - u'id': 15, - u'security_groups': [{ - u'deleted_at': None, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'description': u'default', - u'deleted': False, - u'created_at': u'2014-02-19T11:02:31.000000', - u'updated_at': None, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'id': 1, - u'name': u'default'}], - u'disable_terminate': False, - u'root_device_name': None, - u'display_name': u'new', - u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'default_swap_device': None, - u'info_cache': { - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'deleted': False, - u'created_at': u'2014-03-05T12:44:00.000000', - u'updated_at': None, - u'network_info': [], - u'deleted_at': None}, - u'hostname': u'new', - u'launched_on': None, - u'display_description': u'new', - u'key_data': None, - u'deleted': False, - u'config_drive': u'', - u'power_state': 0, - u'default_ephemeral_device': None, - u'progress': 0, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'launched_at': None, - u'scheduled_at': None, - u'node': None, - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', - u'access_ip_v6': None, - u'access_ip_v4': None, - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'key_name': None, - u'updated_at': None, - u'host': None, - u'root_gb': 1, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'system_metadata': { - u'image_kernel_id': - u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'image_min_disk': u'1', - u'instance_type_memory_mb': u'512', - u'instance_type_swap': u'0', - u'instance_type_vcpu_weight': None, - u'instance_type_root_gb': u'1', - u'instance_type_name': u'm1.tiny', - u'image_ramdisk_id': - u'4999726c-545c-4a9e-bfc0-917459784275', - u'instance_type_id': u'2', - u'instance_type_ephemeral_gb': u'0', - u'instance_type_rxtx_factor': u'1.0', - u'instance_type_flavorid': u'1', - u'instance_type_vcpus': u'1', - u'image_container_format': u'ami', - u'image_min_ram': u'0', - u'image_disk_format': u'ami', - u'image_base_image_ref': - u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, - u'task_state': u'scheduling', - u'shutdown_terminate': False, - u'cell_name': None, - u'ephemeral_key_uuid': None, - u'locked': False, - u'name': u'instance-0000000f', - u'created_at': u'2014-03-05T12:44:00.000000', - u'locked_by': None, - u'launch_index': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'architecture': None, - u'auto_disk_config': False, - u'os_type': None, - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}}, - u'security_group': [u'default'], - u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_timestamp': u'2014-03-05T12:44:00.135674', - u'publisher_id': u'scheduler.eglynn-f19-devstack3', - u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', - u'_context_remote_address': u'172.16.12.21', - u'_context_roles': [u'_member_', u'admin'], - u'timestamp': u'2014-03-05 12:44:00.733758', - u'_context_user': u'0a757cd896b64b65ba3784afef564116', - u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', - u'_context_project_name': u'admin', - u'_context_read_deleted': u'no', - u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_instance_lock_checked': False, - u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_user_name': u'admin' -} - - -class TestNotifications(base.BaseTestCase): - - def test_process_notification(self): - info = list(instance.Instance(None).process_notification( - INSTANCE_CREATE_END - ))[0] - for name, actual, expected in [ - ('counter_name', info.name, 'instance'), - ('counter_type', info.type, sample.TYPE_GAUGE), - ('counter_volume', info.volume, 1), - ('timestamp', info.timestamp, - INSTANCE_CREATE_END['timestamp']), - ('resource_id', info.resource_id, - INSTANCE_CREATE_END['payload']['instance_id']), - ('instance_type_id', - info.resource_metadata['instance_type_id'], - INSTANCE_CREATE_END['payload']['instance_type_id']), - ('host', info.resource_metadata['host'], - INSTANCE_CREATE_END['publisher_id']), - ]: - self.assertEqual(expected, actual, name) - - @staticmethod - def _find_counter(counters, name): - return filter(lambda counter: counter.name == name, counters)[0] - - def _verify_user_metadata(self, metadata): - self.assertIn('user_metadata', metadata) - user_meta = metadata['user_metadata'] - self.assertEqual('Group_A', user_meta.get('server_group')) - self.assertNotIn('AutoScalingGroupName', user_meta) - self.assertIn('foo_bar', user_meta) - self.assertNotIn('foo.bar', user_meta) - - def test_instance_create_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - - def test_instance_exists_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS)) - self.assertEqual(1, len(counters)) - - def test_instance_exists_metadata_list(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) - self.assertEqual(1, len(counters)) - - def test_instance_delete_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_DELETE_START)) - self.assertEqual(1, len(counters)) - - def test_instance_finish_resize_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_scheduled(self): - ic = instance.InstanceScheduled(None) - - self.assertIn(INSTANCE_SCHEDULED['event_type'], - ic.event_types) - - counters = list(ic.process_notification(INSTANCE_SCHEDULED)) - self.assertEqual(1, len(counters)) - names = [c.name for c in counters] - self.assertEqual(['instance.scheduled'], names) - rid = [c.resource_id for c in counters] - self.assertEqual(['fake-uuid1-1'], rid) diff --git a/ceilometer/tests/unit/compute/pollsters/__init__.py b/ceilometer/tests/unit/compute/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/pollsters/base.py b/ceilometer/tests/unit/compute/pollsters/base.py deleted file mode 100644 index 95fd86b2..00000000 --- a/ceilometer/tests/unit/compute/pollsters/base.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import mockpatch - -import ceilometer.tests.base as base - - -class TestPollsterBase(base.BaseTestCase): - - def setUp(self): - super(TestPollsterBase, self).setUp() - - self.inspector = mock.Mock() - self.instance = mock.MagicMock() - self.instance.name = 'instance-00000001' - setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', - self.instance.name) - setattr(self.instance, 'OS-EXT-STS:vm_state', - 'active') - self.instance.id = 1 - self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - self.instance.status = 'active' - self.instance.metadata = { - 'fqdn': 'vm_fqdn', - 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', - 'project_cos': 'dev'} - - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) diff --git a/ceilometer/tests/unit/compute/pollsters/test_cpu.py b/ceilometer/tests/unit/compute/pollsters/test_cpu.py deleted file mode 100644 index bfc3f729..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_cpu.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import cpu -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.unit.compute.pollsters import base - - -class TestCPUPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), - virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), - # cpu_time resets on instance restart - virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), - )) - - def inspect_cpus(name): - return next(next_value) - - self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - def _verify_cpu_metering(expected_time): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu']), set([s.name for s in samples])) - self.assertEqual(expected_time, samples[0].volume) - self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) - # ensure elapsed time between polling cycles is non-zero - time.sleep(0.001) - - _verify_cpu_metering(1 * (10 ** 6)) - _verify_cpu_metering(3 * (10 ** 6)) - _verify_cpu_metering(2 * (10 ** 6)) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_no_caching(self): - cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) - self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(10 ** 6, samples[0].volume) - self.assertEqual(0, len(cache)) - - -class TestCPUUtilPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUUtilPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUUtilStats(util=40), - virt_inspector.CPUUtilStats(util=60), - )) - - def inspect_cpu_util(name, duration): - return next(next_value) - - self.inspector.inspect_cpu_util = (mock. - Mock(side_effect=inspect_cpu_util)) - - mgr = manager.AgentManager() - pollster = cpu.CPUUtilPollster() - - def _verify_cpu_util_metering(expected_util): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu_util']), - set([s.name for s in samples])) - self.assertEqual(expected_util, samples[0].volume) - - _verify_cpu_util_metering(40) - _verify_cpu_util_metering(60) diff --git a/ceilometer/tests/unit/compute/pollsters/test_diskio.py b/ceilometer/tests/unit/compute/pollsters/test_diskio.py deleted file mode 100644 index 0ecafc0f..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_diskio.py +++ /dev/null @@ -1,361 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# Copyright 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import disk -from ceilometer.compute.virt import inspector as virt_inspector -import ceilometer.tests.base as base - - -class TestBaseDiskIO(base.BaseTestCase): - - TYPE = 'cumulative' - - def setUp(self): - super(TestBaseDiskIO, self).setUp() - - self.inspector = mock.Mock() - self.instance = self._get_fake_instances() - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) - - @staticmethod - def _get_fake_instances(): - instances = [] - for i in [1, 2]: - instance = mock.MagicMock() - instance.name = 'instance-%s' % i - setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', - instance.name) - instance.id = i - instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - instance.status = 'active' - instances.append(instance) - return instances - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, name, expected_count=2): - pollster = factory() - - mgr = manager.AgentManager() - cache = {} - samples = list(pollster.get_samples(mgr, cache, self.instance)) - self.assertIsNotEmpty(samples) - cache_key = getattr(pollster, self.CACHE_KEY) - self.assertIn(cache_key, cache) - for instance in self.instance: - self.assertIn(instance.id, cache[cache_key]) - self.assertEqual(set([name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == name] - self.assertEqual(len(match), expected_count, - 'missing counter %s' % name) - return match - - def _check_aggregate_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(self.TYPE, match[0].type) - if expected_device is not None: - self.assertEqual(set(expected_device), - set(match[0].resource_metadata.get('device'))) - instances = [i.id for i in self.instance] - for m in match: - self.assertIn(m.resource_id, instances) - - def _check_per_device_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name, expected_count=4) - match_dict = {} - for m in match: - match_dict[m.resource_id] = m - for instance in self.instance: - key = "%s-%s" % (instance.id, expected_device) - self.assertEqual(expected_volume, - match_dict[key].volume) - self.assertEqual(self.TYPE, match_dict[key].type) - - self.assertEqual(key, match_dict[key].resource_id) - - -class TestDiskPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskStats(read_bytes=1, read_requests=2, - write_bytes=3, write_requests=4, - errors=-1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskStats(read_bytes=2, read_requests=3, - write_bytes=5, write_requests=7, - errors=-1)), - ] - CACHE_KEY = "CACHE_KEY_DISK" - - def setUp(self): - super(TestDiskPollsters, self).setUp() - self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) - - def test_disk_read_requests(self): - self._check_aggregate_samples(disk.ReadRequestsPollster, - 'disk.read.requests', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_read_bytes(self): - self._check_aggregate_samples(disk.ReadBytesPollster, - 'disk.read.bytes', 3, - expected_device=['vda1', 'vda2']) - - def test_disk_write_requests(self): - self._check_aggregate_samples(disk.WriteRequestsPollster, - 'disk.write.requests', 11, - expected_device=['vda1', 'vda2']) - - def test_disk_write_bytes(self): - self._check_aggregate_samples(disk.WriteBytesPollster, - 'disk.write.bytes', 8, - expected_device=['vda1', 'vda2']) - - def test_per_disk_read_requests(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 3, - 'vda2') - - def test_per_disk_write_requests(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 4, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 7, - 'vda2') - - def test_per_disk_read_bytes(self): - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 1, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 2, - 'vda2') - - def test_per_disk_write_bytes(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 5, - 'vda2') - - -class TestDiskRatePollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskRateStats(1024, 300, 5120, 700)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskRateStats(2048, 400, 6144, 800)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_RATE" - - def setUp(self): - super(TestDiskRatePollsters, self).setUp() - self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) - - def test_disk_read_bytes_rate(self): - self._check_aggregate_samples(disk.ReadBytesRatePollster, - 'disk.read.bytes.rate', 3072, - expected_device=['disk1', 'disk2']) - - def test_disk_read_requests_rate(self): - self._check_aggregate_samples(disk.ReadRequestsRatePollster, - 'disk.read.requests.rate', 700, - expected_device=['disk1', 'disk2']) - - def test_disk_write_bytes_rate(self): - self._check_aggregate_samples(disk.WriteBytesRatePollster, - 'disk.write.bytes.rate', 11264, - expected_device=['disk1', 'disk2']) - - def test_disk_write_requests_rate(self): - self._check_aggregate_samples(disk.WriteRequestsRatePollster, - 'disk.write.requests.rate', 1500, - expected_device=['disk1', 'disk2']) - - def test_per_disk_read_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 1024, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 2048, 'disk2') - - def test_per_disk_read_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 300, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 400, 'disk2') - - def test_per_disk_write_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', - 5120, 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', 6144, - 'disk2') - - def test_per_disk_write_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 700, - 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 800, - 'disk2') - - -class TestDiskLatencyPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskLatencyStats(1000)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskLatencyStats(2000)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_LATENCY" - - def setUp(self): - super(TestDiskLatencyPollsters, self).setUp() - self.inspector.inspect_disk_latency = mock.Mock( - return_value=self.DISKS) - - def test_disk_latency(self): - self._check_aggregate_samples(disk.DiskLatencyPollster, - 'disk.latency', 3) - - def test_per_device_latency(self): - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 1, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 2, 'disk2') - - -class TestDiskIOPSPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskIOPSStats(10)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskIOPSStats(20)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_IOPS" - - def setUp(self): - super(TestDiskIOPSPollsters, self).setUp() - self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) - - def test_disk_iops(self): - self._check_aggregate_samples(disk.DiskIOPSPollster, - 'disk.iops', 30) - - def test_per_device_iops(self): - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 10, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 20, 'disk2') - - -class TestDiskInfoPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_INFO" - - def setUp(self): - super(TestDiskInfoPollsters, self).setUp() - self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) - - def test_disk_capacity(self): - self._check_aggregate_samples(disk.CapacityPollster, - 'disk.capacity', 7, - expected_device=['vda1', 'vda2']) - - def test_disk_allocation(self): - self._check_aggregate_samples(disk.AllocationPollster, - 'disk.allocation', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_physical(self): - self._check_aggregate_samples(disk.PhysicalPollster, - 'disk.usage', 3, - expected_device=['vda1', 'vda2']) - - def test_per_disk_capacity(self): - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 4, - 'vda2') - - def test_per_disk_allocation(self): - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 3, - 'vda2') - - def test_per_disk_physical(self): - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 1, - 'vda1') - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 2, - 'vda2') diff --git a/ceilometer/tests/unit/compute/pollsters/test_instance.py b/ceilometer/tests/unit/compute/pollsters/test_instance.py deleted file mode 100644 index f100f543..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_instance.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import instance as pollsters_instance -from ceilometer.tests.unit.compute.pollsters import base - - -class TestInstancePollster(base.TestPollsterBase): - - def setUp(self): - super(TestInstancePollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_instance(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('instance', samples[0].name) - self.assertEqual(1, samples[0].resource_metadata['vcpus']) - self.assertEqual(512, samples[0].resource_metadata['memory_mb']) - self.assertEqual(20, samples[0].resource_metadata['disk_gb']) - self.assertEqual(20, samples[0].resource_metadata['root_gb']) - self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) - self.assertEqual('active', samples[0].resource_metadata['status']) - self.assertEqual('active', samples[0].resource_metadata['state']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_keys(self): - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_keys', ['fqdn']) - - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'fqdn': 'vm_fqdn', - 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_namespace(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_namespace', []) - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertNotIn('user_metadata', samples[0].resource_metadata) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_flavor_name_as_metadata_instance_type(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('m1.small', - samples[0].resource_metadata['instance_type']) diff --git a/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py b/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py deleted file mode 100644 index f557a415..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py +++ /dev/null @@ -1,120 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for the compute pollsters. -""" - -import mock -from oslotest import base -import six - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import util - - -class FauxInstance(object): - - def __init__(self, **kwds): - for name, value in kwds.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - try: - return getattr(self, key) - except AttributeError: - return default - - -class TestLocationMetadata(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - self.manager = manager.AgentManager() - super(TestLocationMetadata, self).setUp() - - # Mimics an instance returned from nova api call - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'id': ('234cbe81-4e09-4f64-9b2a-' - '714f6b9046e3'), - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': - 'foo-zone', - 'reservation_id': 'reservation id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'OS-EXT-SRV-ATTR:host': 'host-test', - 'flavor': {'name': 'm1.tiny', - 'id': 1, - 'disk': 20, - 'ram': 512, - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.ephemeral_gb': 42}} - - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - def test_metadata(self): - md = util._get_metadata_from_object(self.instance) - for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): - if prop not in ("metadata"): - # Special cases - if prop == 'name': - prop = 'display_name' - elif prop == 'hostId': - prop = "host" - elif prop == 'OS-EXT-SRV-ATTR:host': - prop = "instance_host" - elif prop == 'OS-EXT-SRV-ATTR:instance_name': - prop = 'name' - elif prop == "id": - prop = "instance_id" - self.assertEqual(value, md[prop]) - user_metadata = md['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(1, len(user_metadata)) - - def test_metadata_empty_image(self): - self.INSTANCE_PROPERTIES['image'] = None - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertIsNone(md['image']) - self.assertIsNone(md['image_ref']) - self.assertIsNone(md['image_ref_url']) - - def test_metadata_image_through_conductor(self): - # There should be no links here, should default to None - self.INSTANCE_PROPERTIES['image'] = {'id': 1} - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertEqual(1, md['image_ref']) - self.assertIsNone(md['image_ref_url']) diff --git a/ceilometer/tests/unit/compute/pollsters/test_memory.py b/ceilometer/tests/unit/compute/pollsters/test_memory.py deleted file mode 100644 index 7576e1de..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_memory.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import memory -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.unit.compute.pollsters import base - - -class TestMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryUsageStats(usage=1.0), - virt_inspector.MemoryUsageStats(usage=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_usage(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryUsageStats): - return value - else: - raise value - - self.inspector.inspect_memory_usage = mock.Mock( - side_effect=inspect_memory_usage) - - mgr = manager.AgentManager() - pollster = memory.MemoryUsagePollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_memory_metering(expected_count, expected_memory_mb, - expected_warnings, mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.usage']), - set([s.name for s in samples])) - self.assertEqual(expected_memory_mb, samples[0].volume) - else: - self.assertEqual(expected_warnings, mylog.warning.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_memory_metering(1, 1.0, 0) - _verify_memory_metering(1, 2.0, 0) - _verify_memory_metering(0, 0, 1) - _verify_memory_metering(0, 0, 0) - - -class TestResidentMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestResidentMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryResidentStats(resident=1.0), - virt_inspector.MemoryResidentStats(resident=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_resident(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryResidentStats): - return value - else: - raise value - - self.inspector.inspect_memory_resident = mock.Mock( - side_effect=inspect_memory_resident) - - mgr = manager.AgentManager() - pollster = memory.MemoryResidentPollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_resident_memory_metering(expected_count, - expected_resident_memory_mb, - expected_warnings, mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.resident']), - set([s.name for s in samples])) - self.assertEqual(expected_resident_memory_mb, - samples[0].volume) - else: - self.assertEqual(expected_warnings, mylog.warning.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_resident_memory_metering(1, 1.0, 0) - _verify_resident_memory_metering(1, 2.0, 0) - _verify_resident_memory_metering(0, 0, 1) - _verify_resident_memory_metering(0, 0, 0) diff --git a/ceilometer/tests/unit/compute/pollsters/test_net.py b/ceilometer/tests/unit/compute/pollsters/test_net.py deleted file mode 100644 index d78a2ec3..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_net.py +++ /dev/null @@ -1,318 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import net -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer import sample -from ceilometer.tests.unit.compute.pollsters import base - - -class FauxInstance(object): - - def __init__(self, **kwargs): - for name, value in kwargs.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - return getattr(self, key, default) - - -class TestNetPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, - tx_bytes=7, tx_packets=8) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, - tx_bytes=11, tx_packets=12) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnics = mock.Mock(return_value=vnics) - - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': 'foo-zone', - 'reservation_id': 'reservation id', - 'id': 'instance id', - 'user_id': 'user id', - 'tenant_id': 'tenant id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'OS-EXT-SRV-ATTR:host': 'host-test', - 'flavor': {'disk': 20, - 'ram': 512, - 'name': 'tiny', - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.foobar': 42}} - - self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(len(match), 1, 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('cumulative', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesPollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 5, self.vnic1.fref), - ('192.168.0.4', 9, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesPollster, - [('10.0.0.2', 3, self.vnic0.fref), - ('192.168.0.3', 7, self.vnic1.fref), - ('192.168.0.4', 11, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_incoming_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingPacketsPollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 6, self.vnic1.fref), - ('192.168.0.4', 10, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingPacketsPollster, - [('10.0.0.2', 4, self.vnic0.fref), - ('192.168.0.3', 8, self.vnic1.fref), - ('192.168.0.4', 12, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_metadata(self): - factory = net.OutgoingBytesPollster - pollster = factory() - sm = pollster.make_vnic_sample(self.faux_instance, - name='network.outgoing.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=100, - vnic_data=self.vnic0) - - user_metadata = sm.resource_metadata['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(2, len(user_metadata)) - - -class TestNetPollsterCache(base.TestPollsterBase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples_cache(self, factory): - vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - vnics = [(vnic0, stats0)] - - mgr = manager.AgentManager() - pollster = factory() - cache = { - pollster.CACHE_KEY_VNIC: { - self.instance.id: vnics, - }, - } - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - - def test_incoming_bytes(self): - self._check_get_samples_cache(net.IncomingBytesPollster) - - def test_outgoing_bytes(self): - self._check_get_samples_cache(net.OutgoingBytesPollster) - - def test_incoming_packets(self): - self._check_get_samples_cache(net.IncomingPacketsPollster) - - def test_outgoing_packets(self): - self._check_get_samples_cache(net.OutgoingPacketsPollster) - - -class TestNetRatesPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetRatesPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, - tx_bytes_rate=2) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, - tx_bytes_rate=4) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, - tx_bytes_rate=6) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(1, len(match), 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('gauge', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesRatePollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 3, self.vnic1.fref), - ('192.168.0.4', 5, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesRatePollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 4, self.vnic1.fref), - ('192.168.0.4', 6, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) diff --git a/ceilometer/tests/unit/compute/test_discovery.py b/ceilometer/tests/unit/compute/test_discovery.py deleted file mode 100644 index da5b0488..00000000 --- a/ceilometer/tests/unit/compute/test_discovery.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -import iso8601 -import mock -from oslo_config import fixture as fixture_config -from oslotest import mockpatch - -from ceilometer.compute import discovery -import ceilometer.tests.base as base - - -class TestDiscovery(base.BaseTestCase): - - def setUp(self): - super(TestDiscovery, self).setUp() - - self.instance = mock.MagicMock() - self.instance.name = 'instance-00000001' - setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', - self.instance.name) - setattr(self.instance, 'OS-EXT-STS:vm_state', - 'active') - self.instance.id = 1 - self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - self.instance.status = 'active' - self.instance.metadata = { - 'fqdn': 'vm_fqdn', - 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', - 'project_cos': 'dev'} - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - self.client = mock.MagicMock() - self.client.instance_get_all_by_host.return_value = [self.instance] - patch_client = mockpatch.Patch('ceilometer.nova_client.Client', - return_value=self.client) - self.useFixture(patch_client) - - self.utc_now = mock.MagicMock( - return_value=datetime.datetime(2016, 1, 1, - tzinfo=iso8601.iso8601.UTC)) - patch_timeutils = mockpatch.Patch('oslo_utils.timeutils.utcnow', - self.utc_now) - self.useFixture(patch_timeutils) - - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('host', 'test') - - def test_normal_discovery(self): - dsc = discovery.InstanceDiscovery() - resources = dsc.discover(mock.MagicMock()) - - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - - self.client.instance_get_all_by_host.assert_called_once_with( - 'test', None) - - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - self.client.instance_get_all_by_host.assert_called_with( - self.CONF.host, "2016-01-01T00:00:00+00:00") - - def test_discovery_with_resource_update_interval(self): - self.CONF.set_override("resource_update_interval", 600, - group="compute") - dsc = discovery.InstanceDiscovery() - dsc.last_run = datetime.datetime(2016, 1, 1, - tzinfo=iso8601.iso8601.UTC) - - self.utc_now.return_value = datetime.datetime( - 2016, 1, 1, minute=5, tzinfo=iso8601.iso8601.UTC) - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(0, len(resources)) - self.client.instance_get_all_by_host.assert_not_called() - - self.utc_now.return_value = datetime.datetime( - 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - self.client.instance_get_all_by_host.assert_called_once_with( - self.CONF.host, "2016-01-01T00:00:00+00:00") diff --git a/ceilometer/tests/unit/compute/virt/__init__.py b/ceilometer/tests/unit/compute/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/hyperv/__init__.py b/ceilometer/tests/unit/compute/virt/hyperv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py b/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py deleted file mode 100644 index 7df4f2e4..00000000 --- a/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Hyper-V inspector. -""" - -import sys - -import mock -from os_win import exceptions as os_win_exc -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector -from ceilometer.compute.virt import inspector as virt_inspector - - -class TestHyperVInspection(base.BaseTestCase): - - @mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock()) - @mock.patch.object(hyperv_inspector.HyperVInspector, - '_compute_host_max_cpu_clock') - def setUp(self, mock_compute_host_cpu_clock): - self._inspector = hyperv_inspector.HyperVInspector() - self._inspector._utils = mock.MagicMock() - - super(TestHyperVInspection, self).setUp() - - def test_converted_exception(self): - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.OSWinException) - self.assertRaises(virt_inspector.InspectorException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(virt_inspector.InspectorException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.NotFound(resource='foofoo')) - self.assertRaises(virt_inspector.InstanceNotFoundException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - def test_assert_original_traceback_maintained(self): - def bar(self): - foo = "foofoo" - raise os_win_exc.NotFound(resource=foo) - - self._inspector._utils.get_cpu_metrics.side_effect = bar - try: - self._inspector.inspect_cpus(mock.sentinel.instance) - self.fail("Test expected exception, but it was not raised.") - except virt_inspector.InstanceNotFoundException: - # exception has been raised as expected. - _, _, trace = sys.exc_info() - while trace.tb_next: - # iterate until the original exception source, bar. - trace = trace.tb_next - - # original frame will contain the 'foo' variable. - self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) - - @mock.patch.object(hyperv_inspector, 'utilsfactory') - def test_compute_host_max_cpu_clock(self, mock_utilsfactory): - mock_cpu = {'MaxClockSpeed': 1000} - hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info - hostutils.return_value = [mock_cpu, mock_cpu] - - cpu_clock = self._inspector._compute_host_max_cpu_clock() - self.assertEqual(2000.0, cpu_clock) - - def test_inspect_cpus(self): - fake_instance_name = 'fake_instance_name' - fake_cpu_clock_used = 2000 - fake_cpu_count = 3000 - fake_uptime = 4000 - - self._inspector._host_max_cpu_clock = 4000.0 - fake_cpu_percent_used = (fake_cpu_clock_used / - self._inspector._host_max_cpu_clock) - fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * - 1000) - self._inspector._utils.get_cpu_metrics.return_value = ( - fake_cpu_clock_used, fake_cpu_count, fake_uptime) - - cpu_stats = self._inspector.inspect_cpus(fake_instance_name) - - self.assertEqual(fake_cpu_count, cpu_stats.number) - self.assertEqual(fake_cpu_time, cpu_stats.time) - - def test_inspect_memory_usage(self): - fake_usage = self._inspector._utils.get_memory_metrics.return_value - usage = self._inspector.inspect_memory_usage( - mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) - self.assertEqual(fake_usage, usage.usage) - - def test_inspect_vnics(self): - fake_instance_name = 'fake_instance_name' - fake_rx_mb = 1000 - fake_tx_mb = 2000 - fake_element_name = 'fake_element_name' - fake_address = 'fake_address' - - self._inspector._utils.get_vnic_metrics.return_value = [{ - 'rx_mb': fake_rx_mb, - 'tx_mb': fake_tx_mb, - 'element_name': fake_element_name, - 'address': fake_address}] - - inspected_vnics = list(self._inspector.inspect_vnics( - fake_instance_name)) - - self.assertEqual(1, len(inspected_vnics)) - self.assertEqual(2, len(inspected_vnics[0])) - - inspected_vnic, inspected_stats = inspected_vnics[0] - - self.assertEqual(fake_element_name, inspected_vnic.name) - self.assertEqual(fake_address, inspected_vnic.mac) - - self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) - self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) - - def test_inspect_disks(self): - fake_instance_name = 'fake_instance_name' - fake_read_mb = 1000 - fake_write_mb = 2000 - fake_instance_id = "fake_fake_instance_id" - fake_host_resource = "fake_host_resource" - - self._inspector._utils.get_disk_metrics.return_value = [{ - 'read_mb': fake_read_mb, - 'write_mb': fake_write_mb, - 'instance_id': fake_instance_id, - 'host_resource': fake_host_resource}] - - inspected_disks = list(self._inspector.inspect_disks( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - - self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) - self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) - - def test_inspect_disk_latency(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_latency = mock.sentinel.DISK_LATENCY - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_latency_metrics.return_value = [{ - 'disk_latency': fake_disk_latency, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_latency( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) - - def test_inspect_disk_iops_count(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_iops_count.return_value = [{ - 'iops_count': fake_disk_iops_count, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_iops( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) diff --git a/ceilometer/tests/unit/compute/virt/libvirt/__init__.py b/ceilometer/tests/unit/compute/virt/libvirt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py b/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py deleted file mode 100644 index 16220cfa..00000000 --- a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for libvirt inspector. -""" - -try: - import contextlib2 as contextlib # for Python < 3.3 -except ImportError: - import contextlib - -import fixtures -import mock -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector - - -class TestLibvirtInspection(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspection, self).setUp() - - class VMInstance(object): - id = 'ff58e738-12f4-4c58-acde-77617b68da56' - name = 'instance-00000001' - self.instance = VMInstance - self.inspector = libvirt_inspector.LibvirtInspector() - self.inspector.connection = mock.Mock() - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - self.domain = mock.Mock() - self.addCleanup(mock.patch.stopall) - - def test_inspect_cpus(self): - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - cpu_info = self.inspector.inspect_cpus(self.instance) - self.assertEqual(2, cpu_info.number) - self.assertEqual(999999, cpu_info.time) - - def test_inspect_cpus_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_cpus, - self.instance) - - def test_inspect_vnics(self): - dom_xml = """ - - - - - - - -
- - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - """ - - interface_stats = { - 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), - 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), - 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), - } - interfaceStats = interface_stats.__getitem__ - - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, - 'interfaceStats', - side_effect=interfaceStats)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - interfaces = list(self.inspector.inspect_vnics(self.instance)) - - self.assertEqual(3, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vnet0', vnic0.name) - self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) - self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) - self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) - self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) - self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) - self.assertEqual(1, info0.rx_bytes) - self.assertEqual(2, info0.rx_packets) - self.assertEqual(3, info0.tx_bytes) - self.assertEqual(4, info0.tx_packets) - - vnic1, info1 = interfaces[1] - self.assertEqual('vnet1', vnic1.name) - self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) - self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) - self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) - self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) - self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) - self.assertEqual(5, info1.rx_bytes) - self.assertEqual(6, info1.rx_packets) - self.assertEqual(7, info1.tx_bytes) - self.assertEqual(8, info1.tx_packets) - - vnic2, info2 = interfaces[2] - self.assertEqual('vnet2', vnic2.name) - self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) - self.assertIsNone(vnic2.fref) - self.assertEqual(dict(), vnic2.parameters) - self.assertEqual(9, info2.rx_bytes) - self.assertEqual(10, info2.rx_packets) - self.assertEqual(11, info2.tx_bytes) - self.assertEqual(12, info2.tx_packets) - - def test_inspect_vnics_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_vnics - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_disks(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockStats', - return_value=(1, 2, 3, - 4, -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disks(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.read_requests) - self.assertEqual(2, info0.read_bytes) - self.assertEqual(3, info0.write_requests) - self.assertEqual(4, info0.write_bytes) - - def test_inspect_disks_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_disks - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_memory_usage(self): - fake_memory_stats = {'available': 51200, 'unused': 25600} - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value=fake_memory_stats): - memory = self.inspector.inspect_memory_usage( - self.instance) - self.assertEqual(25600 / units.Ki, memory.usage) - - def test_inspect_disk_info(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disk_info(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.capacity) - self.assertEqual(2, info0.allocation) - self.assertEqual(3, info0.physical) - - def test_inspect_memory_usage_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_memory_usage, - self.instance) - - def test_inspect_memory_usage_with_empty_stats(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value={}): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.inspect_memory_usage, - self.instance) - - -class TestLibvirtInspectionWithError(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspectionWithError, self).setUp() - self.inspector = libvirt_inspector.LibvirtInspector() - self.useFixture(fixtures.MonkeyPatch( - 'ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - self._dummy_get_connection)) - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - - @staticmethod - def _dummy_get_connection(*args, **kwargs): - raise Exception('dummy') - - def test_inspect_unknown_error(self): - self.assertRaises(virt_inspector.InspectorException, - self.inspector.inspect_cpus, 'foo') - - -class TestLibvirtInitWithError(base.BaseTestCase): - - def setUp(self): - super(TestLibvirtInitWithError, self).setUp() - self.inspector = libvirt_inspector.LibvirtInspector() - libvirt_inspector.libvirt = mock.Mock() - - @mock.patch('ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - mock.Mock(return_value=None)) - def test_init_error(self): - self.assertRaises(virt_inspector.NoSanityException, - self.inspector.check_sanity) - - @mock.patch('ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - mock.Mock(side_effect=virt_inspector.NoDataException)) - def test_init_exception(self): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.check_sanity) diff --git a/ceilometer/tests/unit/compute/virt/vmware/__init__.py b/ceilometer/tests/unit/compute/virt/vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py b/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py deleted file mode 100644 index a6ca74a6..00000000 --- a/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for VMware vSphere inspector. -""" - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.vmware import inspector as vsphere_inspector - - -class TestVsphereInspection(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False, port=7443) - vsphere_inspector.get_api_session = mock.Mock( - return_value=api_session) - self._inspector = vsphere_inspector.VsphereInspector() - self._inspector._ops = mock.MagicMock() - - super(TestVsphereInspection, self).setUp() - - def test_inspect_memory_usage(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_memory_value = 1024.0 - fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops. - get_perf_counter_id.return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_memory_value - memory_stat = self._inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_cpu_util(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_cpu_util_value = 60 - fake_stat = virt_inspector.CPUUtilStats(util=60) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops.get_perf_counter_id. - return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_cpu_util_value * 100 - cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_vnic_rates(self): - - # construct test data - test_vm_moid = "vm-21" - vnic1 = "vnic-1" - vnic2 = "vnic-2" - counter_name_to_id_map = { - vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, - vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 - } - counter_id_to_stats_map = { - 1: {vnic1: 1, vnic2: 3}, - 2: {vnic1: 2, vnic2: 4}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - result = self._inspector.inspect_vnic_rates(mock.MagicMock()) - - # validate result - expected_stats = { - vnic1: virt_inspector.InterfaceRateStats(1024, 2048), - vnic2: virt_inspector.InterfaceRateStats(3072, 4096) - } - - for vnic, rates_info in result: - self.assertEqual(expected_stats[vnic.name], rates_info) - - def test_inspect_disk_rates(self): - - # construct test data - test_vm_moid = "vm-21" - disk1 = "disk-1" - disk2 = "disk-2" - counter_name_to_id_map = { - vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, - vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, - vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, - vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 - } - counter_id_to_stats_map = { - 1: {disk1: 1, disk2: 2}, - 2: {disk1: 300, disk2: 400}, - 3: {disk1: 5, disk2: 6}, - 4: {disk1: 700}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - - result = self._inspector.inspect_disk_rates(mock.MagicMock()) - - # validate result - expected_stats = { - disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), - disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) - } - - actual_stats = dict((disk.device, rates) for (disk, rates) in result) - self.assertEqual(expected_stats, actual_stats) diff --git a/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py b/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py deleted file mode 100644 index 1d7ba148..00000000 --- a/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt.vmware import vsphere_operations - - -class VsphereOperationsTest(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False) - api_session._vim = mock.MagicMock() - self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, - 1000) - super(VsphereOperationsTest, self).setUp() - - def test_get_vm_moid(self): - - vm1_moid = "vm-1" - vm2_moid = "vm-2" - vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" - vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" - - def construct_mock_vm_object(vm_moid, vm_instance): - vm_object = mock.MagicMock() - vm_object.obj.value = vm_moid - vm_object.propSet[0].val = vm_instance - return vm_object - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(self._vsphere_ops._max_objects, - options.maxObjects) - self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) - vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) - result = mock.MagicMock() - result.objects.__iter__.return_value = [vm1, vm2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - vim_mock.ContinueRetrievePropertiesEx.return_value = None - - vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) - self.assertEqual(vm1_moid, vm_moid) - - vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) - self.assertEqual(vm2_moid, vm_moid) - - def test_query_vm_property(self): - - vm_moid = "vm-21" - vm_property_name = "runtime.powerState" - vm_property_val = "poweredON" - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vm_moid, specSet[0].obj.value) - self.assertEqual(vm_property_name, specSet[0].pathSet[0]) - - # mock return result - result = mock.MagicMock() - result.objects[0].propSet[0].val = vm_property_val - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - actual_val = self._vsphere_ops.query_vm_property(vm_moid, - vm_property_name) - self.assertEqual(vm_property_val, actual_val) - - def test_get_perf_counter_id(self): - - def construct_mock_counter_info(group_name, counter_name, rollup_type, - counter_id): - counter_info = mock.MagicMock() - counter_info.groupInfo.key = group_name - counter_info.nameInfo.key = counter_name - counter_info.rollupType = rollup_type - counter_info.key = counter_id - return counter_info - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - counter_info1 = construct_mock_counter_info("a", "b", "c", 1) - counter_info2 = construct_mock_counter_info("x", "y", "z", 2) - result = mock.MagicMock() - (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. - return_value) = [counter_info1, counter_info2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") - self.assertEqual(1, counter_id) - - counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") - self.assertEqual(2, counter_id) - - def test_query_vm_stats(self): - - vm_moid = "vm-21" - device1 = "device-1" - device2 = "device-2" - device3 = "device-3" - counter_id = 5 - - def construct_mock_metric_series(device_name, stat_values): - metric_series = mock.MagicMock() - metric_series.value = stat_values - metric_series.id.instance = device_name - return metric_series - - def vim_query_perf_side_effect(perf_manager, querySpec): - # assert inputs - self.assertEqual(vm_moid, querySpec[0].entity.value) - self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) - self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, - querySpec[0].intervalId) - - # mock return result - perf_stats = mock.MagicMock() - perf_stats[0].sampleInfo = ["s1", "s2", "s3"] - perf_stats[0].value.__iter__.return_value = [ - construct_mock_metric_series(None, [111, 222, 333]), - construct_mock_metric_series(device1, [100, 200, 300]), - construct_mock_metric_series(device2, [10, 20, 30]), - construct_mock_metric_series(device3, [1, 2, 3]) - ] - return perf_stats - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect - ops = self._vsphere_ops - - # test aggregate stat - stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) - self.assertEqual(222, stat_val) - - # test per-device(non-aggregate) stats - expected_device_stats = { - device1: 200, - device2: 20, - device3: 2 - } - stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) - self.assertEqual(expected_device_stats, stats) diff --git a/ceilometer/tests/unit/compute/virt/xenapi/__init__.py b/ceilometer/tests/unit/compute/virt/xenapi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py deleted file mode 100644 index c5d5390f..00000000 --- a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for xenapi inspector. -""" - -import mock -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector - - -class TestSwapXapiHost(base.BaseTestCase): - - def test_swapping(self): - self.assertEqual( - "http://otherserver:8765/somepath", - xenapi_inspector.swap_xapi_host( - "http://someserver:8765/somepath", 'otherserver')) - - def test_no_port(self): - self.assertEqual( - "http://otherserver/somepath", - xenapi_inspector.swap_xapi_host( - "http://someserver/somepath", 'otherserver')) - - def test_no_path(self): - self.assertEqual( - "http://otherserver", - xenapi_inspector.swap_xapi_host( - "http://someserver", 'otherserver')) - - def test_same_hostname_path(self): - self.assertEqual( - "http://other:80/some", - xenapi_inspector.swap_xapi_host( - "http://some:80/some", 'other')) - - -class TestXenapiInspection(base.BaseTestCase): - - def setUp(self): - api_session = mock.Mock() - xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) - self.inspector = xenapi_inspector.XenapiInspector() - - super(TestXenapiInspection, self).setUp() - - def test_inspect_cpu_util(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.CPUUtilStats(util=40) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '536870912', - 'VCPUs_number': '1', - 'VCPUs_utilisation': {'0': 0.4, } - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_memory_usage(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.MemoryUsageStats(usage=128) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '134217728', - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - memory_stat = self.inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_vnic_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vif_rec = { - 'metrics': 'vif_metrics_ref', - 'uuid': 'vif_uuid', - 'MAC': 'vif_mac', - } - - vif_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2', - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VIFs': - return ['vif_ref'] - elif method == 'VIF.get_record': - return vif_rec - elif method == 'VIF.get_metrics': - return 'vif_metrics_ref' - elif method == 'VIF_metrics.get_record': - return vif_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) - - self.assertEqual(1, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vif_uuid', vnic0.name) - self.assertEqual('vif_mac', vnic0.mac) - self.assertEqual(1024, info0.rx_bytes_rate) - self.assertEqual(2048, info0.tx_bytes_rate) - - def test_inspect_disk_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vbd_rec = { - 'device': 'xvdd' - } - - vbd_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2' - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VBDs': - return ['vbd_ref'] - elif method == 'VBD.get_record': - return vbd_rec - elif method == 'VBD.get_metrics': - return 'vbd_metrics_ref' - elif method == 'VBD_metrics.get_record': - return vbd_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - disks = list(self.inspector.inspect_disk_rates(fake_instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('xvdd', disk0.device) - self.assertEqual(1024, info0.read_bytes_rate) - self.assertEqual(2048, info0.write_bytes_rate) diff --git a/ceilometer/tests/unit/dispatcher/test_db.py b/ceilometer/tests/unit/dispatcher/test_db.py index a10c4c3e..4fa1f689 100644 --- a/ceilometer/tests/unit/dispatcher/test_db.py +++ b/ceilometer/tests/unit/dispatcher/test_db.py @@ -21,7 +21,6 @@ from oslotest import base from ceilometer.dispatcher import database from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils class TestDispatcherDB(base.BaseTestCase): @@ -36,84 +35,8 @@ class TestDispatcherDB(base.BaseTestCase): def test_event_conn(self): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) + [], {}).serialize() with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[0][0][0])) - - def test_valid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(msg) - - def test_invalid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'message_signature': 'invalid-signature'} - - class ErrorConnection(object): - - called = False - - def record_metering_data(self, data): - self.called = True - - self.dispatcher._meter_conn = ErrorConnection() - - self.dispatcher.verify_and_record_metering_data(msg) - - if self.dispatcher.meter_conn.called: - self.fail('Should not have called the storage connection') - - def test_timestamp_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-07-02T13:53:40Z', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) - - def test_timestamp_tzinfo_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-09-30T15:31:50.262-08:00', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, - 31, 50, 262000) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) diff --git a/ceilometer/tests/unit/dispatcher/test_dispatcher.py b/ceilometer/tests/unit/dispatcher/test_dispatcher.py deleted file mode 100644 index 780c3128..00000000 --- a/ceilometer/tests/unit/dispatcher/test_dispatcher.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture -from oslotest import mockpatch - -from ceilometer import dispatcher -from ceilometer.tests import base - - -class FakeDispatcherSample(dispatcher.MeterDispatcherBase): - def record_metering_data(self, data): - pass - - -class FakeDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - def record_metering_data(self, data): - pass - - def record_events(self, events): - pass - - -class TestDispatchManager(base.BaseTestCase): - def setUp(self): - super(TestDispatchManager, self).setUp() - self.conf = self.useFixture(fixture.Config()) - self.conf.config(meter_dispatchers=['database', 'gnocchi'], - event_dispatchers=['database']) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.gnocchi.GnocchiDispatcher', - new=FakeDispatcherSample)) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.database.DatabaseDispatcher', - new=FakeDispatcher)) - - def test_load(self): - sample_mg, event_mg = dispatcher.load_dispatcher_manager() - self.assertEqual(2, len(list(sample_mg))) - self.assertEqual(1, len(list(event_mg))) diff --git a/ceilometer/tests/unit/dispatcher/test_file.py b/ceilometer/tests/unit/dispatcher/test_file.py deleted file mode 100644 index ab54e42b..00000000 --- a/ceilometer/tests/unit/dispatcher/test_file.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging.handlers -import os -import tempfile - -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.dispatcher import file -from ceilometer.publisher import utils - - -class TestDispatcherFile(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherFile, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_file_dispatcher_with_all_config(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 50 - self.CONF.dispatcher_file.backup_count = 5 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist - # and not produce errors. - dispatcher.verify_and_record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_path_only(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 0 - self.CONF.dispatcher_file.backup_count = 0 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.FileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist and not produce errors. - dispatcher.verify_and_record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_no_path(self): - self.CONF.dispatcher_file.file_path = None - dispatcher = file.FileDispatcher(self.CONF) - - # The log should be None - self.assertIsNone(dispatcher.log) diff --git a/ceilometer/tests/unit/dispatcher/test_gnocchi.py b/ceilometer/tests/unit/dispatcher/test_gnocchi.py deleted file mode 100644 index 9039b8d2..00000000 --- a/ceilometer/tests/unit/dispatcher/test_gnocchi.py +++ /dev/null @@ -1,445 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Authors: Mehdi Abaakouk -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import uuid - -from gnocchiclient import exceptions as gnocchi_exc -from gnocchiclient import utils as gnocchi_utils -from keystoneauth1 import exceptions as ka_exceptions -import mock -from oslo_config import fixture as config_fixture -from oslo_utils import fileutils -from oslotest import mockpatch -import requests -import six -import testscenarios - -from ceilometer.dispatcher import gnocchi -from ceilometer.publisher import utils -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - -load_tests = testscenarios.load_tests_apply_scenarios - - -@mock.patch('gnocchiclient.v1.client.Client', mock.Mock()) -class DispatcherTest(base.BaseTestCase): - - def setUp(self): - super(DispatcherTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - self.resource_id = str(uuid.uuid4()) - self.samples = [{ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - { - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2014-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }] - for sample in self.samples: - sample['message_signature'] = utils.compute_signature( - sample, self.conf.conf.publisher.telemetry_secret) - - ks_client = mock.Mock(auth_token='fake_token') - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - self.conf.conf.dispatcher_gnocchi.filter_service_activity = True - - def test_config_load(self): - self.conf.config(filter_service_activity=False, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - names = [rd.cfg['resource_type'] for rd in d.resources_definition] - self.assertIn('instance', names) - self.assertIn('volume', names) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_broken_config_load(self, mylog): - contents = [("---\n" - "resources:\n" - " - resource_type: foobar\n"), - ("---\n" - "resources:\n" - " - resource_type: 0\n"), - ("---\n" - "resources:\n" - " - sample_types: ['foo', 'bar']\n"), - ("---\n" - "resources:\n" - " - sample_types: foobar\n" - " - resource_type: foobar\n"), - ] - - for content in contents: - if six.PY3: - content = content.encode('utf-8') - - temp = fileutils.write_to_tempfile(content=content, - prefix='gnocchi_resources', - suffix='.yaml') - self.addCleanup(os.remove, temp) - self.conf.config(filter_service_activity=False, - resources_definition_file=temp, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - self.assertTrue(mylog.error.called) - self.assertEqual(0, len(d.resources_definition)) - - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '._if_not_cached') - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '.batch_measures') - def _do_test_activity_filter(self, expected_measures, fake_batch, __): - - d = gnocchi.GnocchiDispatcher(self.conf.conf) - d.verify_and_record_metering_data(self.samples) - fake_batch.assert_called_with( - mock.ANY, mock.ANY, - {'metrics': 1, 'resources': 1, 'measures': expected_measures}) - - def test_activity_filter_match_project_id(self): - self.samples[0]['project_id'] = ( - 'a2d42c23-d518-46b6-96ab-3fba2e146859') - self._do_test_activity_filter(1) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_activity_gnocchi_project_not_found(self, logger): - self.ks_client.projects.find.side_effect = ka_exceptions.NotFound - self._do_test_activity_filter(2) - logger.warning.assert_called_with('gnocchi project not found in ' - 'keystone, ignoring the ' - 'filter_service_activity option') - - def test_activity_filter_match_swift_event(self): - self.samples[0]['counter_name'] = 'storage.api.request' - self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' - self._do_test_activity_filter(1) - - def test_activity_filter_nomatch(self): - self._do_test_activity_filter(2) - - -class MockResponse(mock.NonCallableMock): - def __init__(self, code): - text = {500: 'Internal Server Error', - 404: 'Not Found', - 204: 'Created', - 409: 'Conflict', - }.get(code) - super(MockResponse, self).__init__(spec=requests.Response, - status_code=code, - text=text) - - -class DispatcherWorkflowTest(base.BaseTestCase, - testscenarios.TestWithScenarios): - - sample_scenarios = [ - ('disk.root.size', dict( - sample={ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - 'host': 'foo', - 'image_ref': 'imageref!', - 'flavor_id': 1234, - 'display_name': 'myinstance', - }, - metric_names=[ - 'instance', 'disk.root.size', 'disk.ephemeral.size', - 'memory', 'vcpus', 'memory.usage', 'memory.resident', - 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', - 'disk.read.requests.rate', 'disk.write.requests', - 'disk.write.requests.rate', 'disk.read.bytes', - 'disk.read.bytes.rate', 'disk.write.bytes', - 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', - 'disk.capacity', 'disk.allocation', 'disk.usage'], - resource_type='instance')), - ('hardware.ipmi.node.power', dict( - sample={ - 'counter_name': 'hardware.ipmi.node.power', - 'counter_unit': 'W', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'useless': 'not_used', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - }, - metric_names=[ - 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', - 'hardware.ipmi.node.inlet_temperature', - 'hardware.ipmi.node.outlet_temperature', - 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', - 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', - 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', - 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' - ], - resource_type='ipmi')), - ] - - default_workflow = dict(resource_exists=True, - metric_exists=True, - post_measure_fail=False, - create_resource_fail=False, - create_metric_fail=False, - update_resource_fail=False, - retry_post_measures_fail=False) - workflow_scenarios = [ - ('normal_workflow', {}), - ('new_resource', dict(resource_exists=False)), - ('new_resource_fail', dict(resource_exists=False, - create_resource_fail=True)), - ('resource_update_fail', dict(update_resource_fail=True)), - ('new_metric', dict(metric_exists=False)), - ('new_metric_fail', dict(metric_exists=False, - create_metric_fail=True)), - ('retry_fail', dict(resource_exists=False, - retry_post_measures_fail=True)), - ('measure_fail', dict(post_measure_fail=True)), - ] - - @classmethod - def generate_scenarios(cls): - workflow_scenarios = [] - for name, wf_change in cls.workflow_scenarios: - wf = cls.default_workflow.copy() - wf.update(wf_change) - workflow_scenarios.append((name, wf)) - cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, - workflow_scenarios) - - def setUp(self): - super(DispatcherWorkflowTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - # Set this explicitly to avoid conflicts with any existing - # configuration. - self.conf.config(url='http://localhost:8041', - group='dispatcher_gnocchi') - ks_client = mock.Mock() - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - - self.sample['resource_id'] = str(uuid.uuid4()) + "/foobar" - self.sample['message_signature'] = utils.compute_signature( - self.sample, self.conf.conf.publisher.telemetry_secret) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - @mock.patch('gnocchiclient.v1.client.Client') - def test_workflow(self, fakeclient_cls, logger): - self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) - - fakeclient = fakeclient_cls.return_value - - # FIXME(sileht): we don't use urlparse.quote here - # to ensure / is converted in %2F - # temporary disabled until we find a solution - # on gnocchi side. Current gnocchiclient doesn't - # encode the resource_id - resource_id = self.sample['resource_id'] # .replace("/", "%2F"), - metric_name = self.sample['counter_name'] - gnocchi_id = gnocchi_utils.encode_resource_id(resource_id) - - expected_calls = [ - mock.call.capabilities.list(), - mock.call.metric.batch_resources_metrics_measures( - {gnocchi_id: {metric_name: self.measures_attributes}}) - ] - expected_debug = [ - mock.call('gnocchi project found: %s', - 'a2d42c23-d518-46b6-96ab-3fba2e146859'), - ] - - measures_posted = False - batch_side_effect = [] - if self.post_measure_fail: - batch_side_effect += [Exception('boom!')] - elif not self.resource_exists or not self.metric_exists: - batch_side_effect += [ - gnocchi_exc.BadRequest( - 400, "Unknown metrics: %s/%s" % (gnocchi_id, - metric_name))] - attributes = self.postable_attributes.copy() - attributes.update(self.patchable_attributes) - attributes['id'] = self.sample['resource_id'] - attributes['metrics'] = dict((metric_name, {}) - for metric_name in self.metric_names) - for k, v in six.iteritems(attributes['metrics']): - if k == 'disk.root.size': - v['unit'] = 'GB' - continue - if k == 'hardware.ipmi.node.power': - v['unit'] = 'W' - continue - expected_calls.append(mock.call.resource.create( - self.resource_type, attributes)) - - if self.create_resource_fail: - fakeclient.resource.create.side_effect = [Exception('boom!')] - elif self.resource_exists: - fakeclient.resource.create.side_effect = [ - gnocchi_exc.ResourceAlreadyExists(409)] - - expected_calls.append(mock.call.metric.create({ - 'name': self.sample['counter_name'], - 'unit': self.sample['counter_unit'], - 'resource_id': resource_id})) - if self.create_metric_fail: - fakeclient.metric.create.side_effect = [Exception('boom!')] - elif self.metric_exists: - fakeclient.metric.create.side_effect = [ - gnocchi_exc.NamedMetricAreadyExists(409)] - else: - fakeclient.metric.create.side_effect = [None] - - else: # not resource_exists - expected_debug.append(mock.call( - 'Resource %s created', self.sample['resource_id'])) - - if not self.create_resource_fail and not self.create_metric_fail: - expected_calls.append( - mock.call.metric.batch_resources_metrics_measures( - {gnocchi_id: {metric_name: self.measures_attributes}}) - ) - - if self.retry_post_measures_fail: - batch_side_effect += [Exception('boom!')] - else: - measures_posted = True - - else: - measures_posted = True - - if measures_posted: - batch_side_effect += [None] - expected_debug.append( - mock.call("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", dict( - measures=len(self.measures_attributes), - metrics=1, resources=1)) - ) - - if self.patchable_attributes: - expected_calls.append(mock.call.resource.update( - self.resource_type, resource_id, - self.patchable_attributes)) - if self.update_resource_fail: - fakeclient.resource.update.side_effect = [Exception('boom!')] - else: - expected_debug.append(mock.call( - 'Resource %s updated', self.sample['resource_id'])) - - batch = fakeclient.metric.batch_resources_metrics_measures - batch.side_effect = batch_side_effect - - self.dispatcher.verify_and_record_metering_data([self.sample]) - - # Check that the last log message is the expected one - if (self.post_measure_fail or self.create_metric_fail - or self.create_resource_fail - or self.retry_post_measures_fail - or (self.update_resource_fail and self.patchable_attributes)): - logger.error.assert_called_with('boom!', exc_info=True) - else: - self.assertEqual(0, logger.error.call_count) - self.assertEqual(expected_calls, fakeclient.mock_calls) - self.assertEqual(expected_debug, logger.debug.mock_calls) - -DispatcherWorkflowTest.generate_scenarios() diff --git a/ceilometer/tests/unit/dispatcher/test_http.py b/ceilometer/tests/unit/dispatcher/test_http.py deleted file mode 100644 index 8e74f056..00000000 --- a/ceilometer/tests/unit/dispatcher/test_http.py +++ /dev/null @@ -1,121 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -import requests - -from ceilometer.dispatcher import http -from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils - - -class TestDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - def test_http_dispatcher_config_options(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.timeout = 2 - dispatcher = http.HttpDispatcher(self.CONF) - - self.assertEqual('fake', dispatcher.target) - self.assertEqual(2, dispatcher.timeout) - - def test_http_dispatcher_with_no_target(self): - self.CONF.dispatcher_http.target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - # The target should be None - self.assertEqual('', dispatcher.target) - - with mock.patch.object(requests, 'post') as post: - dispatcher.verify_and_record_metering_data(self.msg) - - # Since the target is not set, no http post should occur, thus the - # call_count should be zero. - self.assertEqual(0, post.call_count) - - def test_http_dispatcher_with_no_metadata(self): - self.CONF.dispatcher_http.target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - with mock.patch.object(requests, 'post') as post: - dispatcher.verify_and_record_metering_data(self.msg) - - self.assertEqual(1, post.call_count) - - -class TestEventDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestEventDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_http_dispatcher(self): - self.CONF.dispatcher_http.event_target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual(1, post.call_count) - - def test_http_dispatcher_bad(self): - self.CONF.dispatcher_http.event_target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - with mock.patch('ceilometer.dispatcher.http.LOG', - mock.MagicMock()) as LOG: - dispatcher.record_events(event) - self.assertTrue(LOG.exception.called) - - def test_http_dispatcher_share_target(self): - self.CONF.dispatcher_http.target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual('fake', post.call_args[0][0]) diff --git a/ceilometer/tests/unit/energy/__init__.py b/ceilometer/tests/unit/energy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/energy/test_kwapi.py b/ceilometer/tests/unit/energy/test_kwapi.py deleted file mode 100644 index eaf9dc0c..00000000 --- a/ceilometer/tests/unit/energy/test_kwapi.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.energy import kwapi - - -PROBE_DICT = { - "probes": { - "A": { - "timestamp": 1357730232.68754, - "w": 107.3, - "kwh": 0.001058255421506034 - }, - "B": { - "timestamp": 1357730232.048158, - "w": 15.0, - "kwh": 0.029019045026169896 - }, - "C": { - "timestamp": 1357730232.223375, - "w": 95.0, - "kwh": 0.17361822634312918 - } - } -} - -ENDPOINT = 'end://point' - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - - -class _BaseTestCase(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestCase, self).setUp() - self.manager = TestManager() - - -class TestKwapi(_BaseTestCase): - - @staticmethod - def fake_get_kwapi_client(ksclient, endpoint): - raise exceptions.EndpointNotFound("fake keystone exception") - - def test_endpoint_not_exist(self): - with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', - side_effect=self.fake_get_kwapi_client): - pollster = kwapi.EnergyPollster() - samples = list(pollster.get_samples(self.manager, {}, - [ENDPOINT])) - - self.assertEqual(0, len(samples)) - - -class TestEnergyPollster(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - unit = 'kwh' - - def setUp(self): - super(TestEnergyPollster, self).setUp() - self.useFixture(mockpatch.PatchObject( - kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) - - @staticmethod - def fake_iter_probes(ksclient, cache, endpoint): - probes = PROBE_DICT['probes'] - for key, value in six.iteritems(probes): - probe_dict = value - probe_dict['id'] = key - yield probe_dict - - def test_default_discovery(self): - pollster = kwapi.EnergyPollster() - self.assertEqual('endpoint:energy', pollster.default_discovery) - - def test_sample(self): - cache = {} - samples = list(self.pollster_cls().get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(len(PROBE_DICT['probes']), len(samples)) - samples_by_name = dict((s.resource_id, s) for s in samples) - for name, probe in PROBE_DICT['probes'].items(): - sample = samples_by_name[name] - self.assertEqual(probe[self.unit], sample.volume) - - -class TestPowerPollster(TestEnergyPollster): - pollster_cls = kwapi.PowerPollster - unit = 'w' - - -class TestEnergyPollsterCache(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - - def test_get_samples_cached(self): - probe = {'id': 'A'} - probe.update(PROBE_DICT['probes']['A']) - cache = { - '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], - } - self.manager._keystone = mock.Mock() - pollster = self.pollster_cls() - with mock.patch.object(pollster, '_get_probes') as do_not_call: - do_not_call.side_effect = AssertionError('should not be called') - samples = list(pollster.get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(1, len(samples)) - - -class TestPowerPollsterCache(TestEnergyPollsterCache): - pollster_cls = kwapi.PowerPollster diff --git a/ceilometer/tests/unit/event/test_converter.py b/ceilometer/tests/unit/event/test_converter.py deleted file mode 100644 index 37ae6702..00000000 --- a/ceilometer/tests/unit/event/test_converter.py +++ /dev/null @@ -1,781 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import jsonpath_rw_ext -import mock -from oslo_config import fixture as fixture_config -import six - -from ceilometer import declarative -from ceilometer.event import converter -from ceilometer.event.storage import models -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - - -class ConverterBase(base.BaseTestCase): - @staticmethod - def _create_test_notification(event_type, message_id, **kw): - return dict(event_type=event_type, - message_id=message_id, - priority="INFO", - publisher_id="compute.host-1-2-3", - timestamp="2013-08-08 21:06:37.803826", - payload=kw, - ) - - def assertIsValidEvent(self, event, notification): - self.assertIsNot( - None, event, - "Notification dropped unexpectedly:" - " %s" % str(notification)) - self.assertIsInstance(event, models.Event) - - def assertIsNotValidEvent(self, event, notification): - self.assertIs( - None, event, - "Notification NOT dropped when expected to be dropped:" - " %s" % str(notification)) - - def assertHasTrait(self, event, name, value=None, dtype=None): - traits = [trait for trait in event.traits if trait.name == name] - self.assertTrue( - len(traits) > 0, - "Trait %s not found in event %s" % (name, event)) - trait = traits[0] - if value is not None: - self.assertEqual(value, trait.value) - if dtype is not None: - self.assertEqual(dtype, trait.dtype) - if dtype == models.Trait.INT_TYPE: - self.assertIsInstance(trait.value, int) - elif dtype == models.Trait.FLOAT_TYPE: - self.assertIsInstance(trait.value, float) - elif dtype == models.Trait.DATETIME_TYPE: - self.assertIsInstance(trait.value, datetime.datetime) - elif dtype == models.Trait.TEXT_TYPE: - self.assertIsInstance(trait.value, six.string_types) - - def assertDoesNotHaveTrait(self, event, name): - traits = [trait for trait in event.traits if trait.name == name] - self.assertEqual( - len(traits), 0, - "Extra Trait %s found in event %s" % (name, event)) - - def assertHasDefaultTraits(self, event): - text = models.Trait.TEXT_TYPE - self.assertHasTrait(event, 'service', dtype=text) - - def _cmp_tree(self, this, other): - if hasattr(this, 'right') and hasattr(other, 'right'): - return (self._cmp_tree(this.right, other.right) and - self._cmp_tree(this.left, other.left)) - if not hasattr(this, 'right') and not hasattr(other, 'right'): - return this == other - return False - - def assertPathsEqual(self, path1, path2): - self.assertTrue(self._cmp_tree(path1, path2), - 'JSONPaths not equivalent %s %s' % (path1, path2)) - - -class TestTraitDefinition(ConverterBase): - - def setUp(self): - super(TestTraitDefinition, self).setUp() - self.n1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_uuid="uuid-for-instance-0001", - instance_id="id-for-instance-0001", - instance_uuid2=None, - instance_id2=None, - host='host-1-2-3', - bogus_date='', - image_meta=dict( - disk_gb='20', - thing='whatzit'), - foobar=50) - - self.ext1 = mock.MagicMock(name='mock_test_plugin') - self.test_plugin_class = self.ext1.plugin - self.test_plugin = self.test_plugin_class() - self.test_plugin.trait_values.return_value = ['foobar'] - self.ext1.reset_mock() - - self.ext2 = mock.MagicMock(name='mock_nothing_plugin') - self.nothing_plugin_class = self.ext2.plugin - self.nothing_plugin = self.nothing_plugin_class() - self.nothing_plugin.trait_values.return_value = [None] - self.ext2.reset_mock() - - self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) - - def test_to_trait_with_plugin(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_null_match_with_plugin(self): - cfg = dict(type='text', - fields=['payload.nothere', 'payload.bogus'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_values.assert_called_once_with([]) - - def test_to_trait_with_plugin_null(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='nothing')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - self.nothing_plugin_class.assert_called_once_with() - self.nothing_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_with_plugin_with_parameters(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test', parameters=dict(a=1, b='foo'))) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with(a=1, b='foo') - self.test_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait(self): - cfg = dict(type='text', fields='payload.instance_id') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='int', fields='payload.image_meta.disk_gb') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.INT_TYPE, t.dtype) - self.assertEqual(20, t.value) - - def test_to_trait_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='text', fields=['payload.instance_uuid', - 'payload.instance_id']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_multiple_different_nesting(self): - cfg = dict(type='int', fields=['payload.foobar', - 'payload.image_meta.disk_gb']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(50, t.value) - - cfg = dict(type='int', fields=['payload.image_meta.disk_gb', - 'payload.foobar']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(20, t.value) - - def test_to_trait_some_null_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id2', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_some_missing_multiple(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_missing(self): - cfg = dict(type='text', fields='payload.not_here_boss') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_null(self): - cfg = dict(type='text', fields='payload.instance_id2') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_empty_nontext(self): - cfg = dict(type='datetime', fields='payload.bogus_date') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_multiple_null_missing(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_id2']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_missing_fields_config(self): - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - dict(), - self.fake_plugin_mgr) - - def test_string_fields_config(self): - cfg = dict(fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual(t.getter.__self__, - jsonpath_rw_ext.parse('payload.test')) - - def test_list_fields_config(self): - cfg = dict(fields=['payload.test', 'payload.other']) - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual( - t.getter.__self__, - jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) - - def test_invalid_path_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.bogus(') - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - def test_invalid_plugin_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(bogus="true")) - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_unknown_plugin(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(name='bogus')) - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_type_config(self): - cfg = dict(type='text', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) - - cfg = dict(type='int', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.INT_TYPE, t.trait_type) - - cfg = dict(type='float', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) - - cfg = dict(type='datetime', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) - - def test_invalid_type_config(self): - # test invalid jsonpath... - cfg = dict(type='bogus', fields='payload.test') - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - -class TestEventDefinition(ConverterBase): - - def setUp(self): - super(TestEventDefinition, self).setUp() - - self.traits_cfg = { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - } - - self.test_notification1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - - self.test_notification2 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0002", - instance_id="uuid-for-instance-0002") - - self.test_notification3 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0003", - instance_id="uuid-for-instance-0003", - host=None) - self.fake_plugin_mgr = {} - - def test_to_event(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification1) - self.assertEqual('test.thing', e.event_type) - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), - e.generated) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0001', - dtype=dtype) - - def test_to_event_missing_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification2) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0002', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_to_event_null_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification3) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0003', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_bogus_cfg_no_traits(self): - bogus = dict(event_type='test.foo') - self.assertRaises(declarative.DefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_bogus_cfg_no_type(self): - bogus = dict(traits=self.traits_cfg) - self.assertRaises(declarative.DefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_included_type_string(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('test.thing', edef._included_types[0]) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_included_type_list(self): - cfg = dict(event_type=['test.thing', 'other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(2, len(edef._included_types)) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertTrue(edef.included_type('other.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_excluded_type_string(self): - cfg = dict(event_type='!test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('*', edef._included_types[0]) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertEqual(1, len(edef._excluded_types)) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.included_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_excluded_type_list(self): - cfg = dict(event_type=['!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_mixed_type_list(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.whatzit')) - self.assertTrue(edef.match_type('random.thing')) - - def test_catchall(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - cfg = dict(event_type=['*', 'foo'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_extract_when(self, mock_utcnow): - now = datetime.datetime.utcnow() - modified = now + datetime.timedelta(minutes=1) - mock_utcnow.return_value = now - - body = {"timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - body = {"_context_timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - then = now + datetime.timedelta(hours=1) - body = {"timestamp": str(modified), "_context_timestamp": str(then)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - when = converter.EventDefinition._extract_when({}) - self.assertTimestampEqual(now, when) - - def test_default_traits(self): - cfg = dict(event_type='test.thing', traits={}) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), - len(edef.traits)) - - def test_traits(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertIn('host', traits) - self.assertIn('instance_id', traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, - len(edef.traits)) - - -class TestNotificationConverter(ConverterBase): - - def setUp(self): - super(TestNotificationConverter, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.valid_event_def1 = [{ - 'event_type': 'compute.instance.create.*', - 'traits': { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - }, - }] - - self.test_notification1 = self._create_test_notification( - "compute.instance.create.start", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - self.test_notification2 = self._create_test_notification( - "bogus.notification.from.mars", - "uuid-for-notif-0002", - weird='true', - host='cydonia') - self.fake_plugin_mgr = {} - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_converter_missing_keys(self, mock_utcnow): - # test a malformed notification - now = datetime.datetime.utcnow() - mock_utcnow.return_value = now - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - message = {'event_type': "foo", - 'message_id': "abc", - 'publisher_id': "1"} - e = c.to_event(message) - self.assertIsValidEvent(e, message) - self.assertEqual(1, len(e.traits)) - self.assertEqual("foo", e.event_type) - self.assertEqual(now, e.generated) - - def test_converter_with_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(2, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertDoesNotHaveTrait(e, 'instance_id') - self.assertDoesNotHaveTrait(e, 'host') - - def test_converter_without_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - def test_converter_empty_cfg_with_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - def test_converter_empty_cfg_without_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(0, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsNotValidEvent(e, self.test_notification1) - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - @staticmethod - def _convert_message(convert, level): - message = {'priority': level, 'event_type': "foo", - 'message_id': "abc", 'publisher_id': "1"} - return convert.to_event(message) - - def test_store_raw_all(self): - self.CONF.event.store_raw = ['info', 'error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only(self): - self.CONF.event.store_raw = ['info'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_error_only(self): - self.CONF.event.store_raw = ['error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_skip_all(self): - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only_no_case(self): - self.CONF.event.store_raw = ['INFO'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_skip_all(self): - self.CONF.event.store_raw = ['unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_and_good(self): - self.CONF.event.store_raw = ['info', 'unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_setup_events_default_config(self): - self.CONF.set_override('definitions_cfg_file', - '/not/existing/file', group='event') - self.CONF.set_override('drop_unmatched_notifications', - False, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(1, len(c.definitions)) - self.assertTrue(c.definitions[0].is_catchall) - - self.CONF.set_override('drop_unmatched_notifications', - True, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(0, len(c.definitions)) diff --git a/ceilometer/tests/unit/event/test_endpoint.py b/ceilometer/tests/unit/event/test_endpoint.py deleted file mode 100644 index c79d3b22..00000000 --- a/ceilometer/tests/unit/event/test_endpoint.py +++ /dev/null @@ -1,200 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslo_utils import fileutils -from oslotest import mockpatch -import six -import yaml - -from ceilometer.event import endpoint as event_endpoint -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test -from ceilometer.tests import base as tests_base - - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -cfg.CONF.import_opt('store_events', 'ceilometer.notification', - group='notification') - - -class TestEventEndpoint(tests_base.BaseTestCase): - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test.TestPublisher, - 'except://': test.TestPublisher} - return fake_drivers[url](url) - - def _setup_pipeline(self, publishers): - ev_pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_event', - 'events': ['test.test'], - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'publishers': publishers - }] - }) - - if six.PY3: - ev_pipeline = ev_pipeline.encode('utf-8') - ev_pipeline_cfg_file = fileutils.write_to_tempfile( - content=ev_pipeline, prefix="event_pipeline", suffix="yaml") - self.CONF.set_override('event_pipeline_cfg_file', - ev_pipeline_cfg_file) - - ev_pipeline_mgr = pipeline.setup_event_pipeline() - return ev_pipeline_mgr - - def _setup_endpoint(self, publishers): - ev_pipeline_mgr = self._setup_pipeline(publishers) - self.endpoint = event_endpoint.EventsNotificationEndpoint( - ev_pipeline_mgr) - - self.endpoint.event_converter = mock.MagicMock() - self.endpoint.event_converter.to_event.return_value = mock.MagicMock( - event_type='test.test') - - def setUp(self): - super(TestEventEndpoint, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([]) - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("store_events", True, group="notification") - self.setup_messaging(self.CONF) - - self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher', - side_effect=self.get_publisher)) - self.fake_publisher = mock.Mock() - self.useFixture(mockpatch.Patch( - 'ceilometer.publisher.test.TestPublisher', - return_value=self.fake_publisher)) - - def test_message_to_event(self): - self._setup_endpoint(['test://']) - self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - def test_bad_event_non_ack_and_requeue(self): - self._setup_endpoint(['test://']) - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) - - def test_message_to_event_bad_event(self): - self._setup_endpoint(['test://']) - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - - message = { - 'payload': {'event_type': "foo", 'message_id': "abc"}, - 'metadata': {}, - 'ctxt': {} - } - with mock.patch("ceilometer.pipeline.LOG") as mock_logger: - ret = self.endpoint.process_notification('info', [message]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) - exception_mock = mock_logger.exception - self.assertIn('Exit after error from publisher', - exception_mock.call_args_list[0][0][0]) - - def test_message_to_event_bad_event_multi_publish(self): - - self._setup_endpoint(['test://', 'except://']) - - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - - message = { - 'payload': {'event_type': "foo", 'message_id': "abc"}, - 'metadata': {}, - 'ctxt': {} - } - with mock.patch("ceilometer.pipeline.LOG") as mock_logger: - ret = self.endpoint.process_notification('info', [message]) - self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) - exception_mock = mock_logger.exception - self.assertIn('Continue after error from publisher', - exception_mock.call_args_list[0][0][0]) diff --git a/ceilometer/tests/unit/event/test_trait_plugins.py b/ceilometer/tests/unit/event/test_trait_plugins.py deleted file mode 100644 index 6f8fe167..00000000 --- a/ceilometer/tests/unit/event/test_trait_plugins.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.event import trait_plugins - - -class TestSplitterPlugin(base.BaseTestCase): - - def setUp(self): - super(TestSplitterPlugin, self).setUp() - self.pclass = trait_plugins.SplitterTraitPlugin - - def test_split(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('test', value) - - param = dict(separator='-', segment=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('foobar', value) - - param = dict(separator='-', segment=1, max_split=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('foobar-baz', value) - - def test_no_sep(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test.foobar.baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('test.foobar.baz', value) - - def test_no_segment(self): - param = dict(separator='-', segment=5) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertIs(None, value) - - def test_no_match(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [] - value = plugin.trait_values(match_list) - self.assertEqual([], value) - - -class TestBitfieldPlugin(base.BaseTestCase): - - def setUp(self): - super(TestBitfieldPlugin, self).setUp() - self.pclass = trait_plugins.BitfieldTraitPlugin - self.init = 0 - self.params = dict(initial_bitfield=self.init, - flags=[dict(path='payload.foo', bit=0, value=42), - dict(path='payload.foo', bit=1, value=12), - dict(path='payload.thud', bit=1, value=23), - dict(path='thingy.boink', bit=4), - dict(path='thingy.quux', bit=6, - value="wokka"), - dict(path='payload.bar', bit=10, - value='test')]) - - def test_bitfield(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x412, value[0]) - - def test_initial(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - self.params['initial_bitfield'] = 0x2000 - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x2412, value[0]) - - def test_no_match(self): - match_list = [] - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(self.init, value[0]) - - def test_multi(self): - match_list = [('payload.foo', 12), - ('payload.thud', 23), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x412, value[0]) diff --git a/ceilometer/tests/unit/hardware/__init__.py b/ceilometer/tests/unit/hardware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/inspector/__init__.py b/ceilometer/tests/unit/hardware/inspector/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/inspector/test_inspector.py b/ceilometer/tests/unit/hardware/inspector/test_inspector.py deleted file mode 100644 index 22a245c4..00000000 --- a/ceilometer/tests/unit/hardware/inspector/test_inspector.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright 2014 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import netutils - -from ceilometer.hardware import inspector -from ceilometer.tests import base - - -class TestHardwareInspector(base.BaseTestCase): - def test_get_inspector(self): - url = netutils.urlsplit("snmp://") - driver = inspector.get_inspector(url) - self.assertTrue(driver) - - def test_get_inspector_illegal(self): - url = netutils.urlsplit("illegal://") - self.assertRaises(RuntimeError, - inspector.get_inspector, - url) diff --git a/ceilometer/tests/unit/hardware/inspector/test_snmp.py b/ceilometer/tests/unit/hardware/inspector/test_snmp.py deleted file mode 100644 index 71b94c02..00000000 --- a/ceilometer/tests/unit/hardware/inspector/test_snmp.py +++ /dev/null @@ -1,209 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/hardware/inspector/snmp/inspector.py -""" -from oslo_utils import netutils -from oslotest import mockpatch - -from ceilometer.hardware.inspector import snmp -from ceilometer.tests import base as test_base - -ins = snmp.SNMPInspector - - -class FakeObjectName(object): - def __init__(self, name): - self.name = name - - def __str__(self): - return str(self.name) - - -def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): - varBinds = [(FakeObjectName(oid), - int(oid.split('.')[-1])) for oid in oids] - return (None, None, 0, varBinds) - - -def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, - *oids, **kwargs): - varBindTable = [ - [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] - for oid in oids - ] - return (None, None, 0, varBindTable) - - -class TestSNMPInspector(test_base.BaseTestCase): - mapping = { - 'test_exact': { - 'matching_type': snmp.EXACT, - 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) - }, - 'post_op': '_fake_post_op', - }, - 'test_prefix': { - 'matching_type': snmp.PREFIX, - 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) - }, - 'post_op': None, - }, - } - - def setUp(self): - super(TestSNMPInspector, self).setUp() - self.inspector = snmp.SNMPInspector() - self.host = netutils.urlsplit("snmp://localhost") - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - - def test_snmp_error(self): - def get_list(func, *args, **kwargs): - return list(func(*args, **kwargs)) - - def faux_parse(ret, is_bulk): - return (True, 'forced error') - - self.useFixture(mockpatch.PatchObject( - snmp, 'parse_snmp_return', new=faux_parse)) - - self.assertRaises(snmp.SNMPException, - get_list, - self.inspector.inspect_generic, - host=self.host, - cache={}, - extra_metadata={}, - param=self.mapping['test_exact']) - - @staticmethod - def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): - metadata.update(post_op_meta=4) - extra.update(project_id=2) - return value - - def test_inspect_generic_exact(self): - self.inspector._fake_post_op = self._fake_post_op - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - cache, - {}, - self.mapping['test_exact'])) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) - self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) - self.assertEqual(1, len(ret)) - self.assertEqual(1, ret[0][0]) - self.assertEqual(8, ret[0][1]['meta']) - self.assertEqual(4, ret[0][1]['post_op_meta']) - self.assertEqual(2, ret[0][2]['project_id']) - - def test_inspect_generic_prefix(self): - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - cache, - {}, - self.mapping['test_prefix'])) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) - self.assertEqual(2, len(ret)) - self.assertIn(ret[0][0], (1, 2)) - self.assertEqual(ret[0][0], ret[0][1]['meta']) - - def test_post_op_net(self): - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - cache = {} - metadata = dict(name='lo', - speed=0, - mac='ba21e43302fe') - extra = {} - ret = self.inspector._post_op_net(self.host, cache, None, - value=8, - metadata=metadata, - extra=extra, - suffix=".2") - self.assertEqual(8, ret) - self.assertIn('ip', metadata) - self.assertIn("2", metadata['ip']) - self.assertIn('resource_id', extra) - self.assertEqual("localhost.lo", extra['resource_id']) - - def test_post_op_disk(self): - cache = {} - metadata = dict(device='/dev/sda1', - path='/') - extra = {} - ret = self.inspector._post_op_disk(self.host, cache, None, - value=8, - metadata=metadata, - extra=extra, - suffix=None) - self.assertEqual(8, ret) - self.assertIn('resource_id', extra) - self.assertEqual("localhost./dev/sda1", extra['resource_id']) - - def test_prepare_params(self): - param = {'post_op': '_post_op_disk', - 'oid': '1.3.6.1.4.1.2021.9.1.6', - 'type': 'int', - 'matching_type': 'type_prefix', - 'metadata': { - 'device': {'oid': '1.3.6.1.4.1.2021.9.1.3', - 'type': 'str'}, - 'path': {'oid': '1.3.6.1.4.1.2021.9.1.2', - 'type': "lambda x: str(x)"}}} - processed = self.inspector.prepare_params(param) - self.assertEqual('_post_op_disk', processed['post_op']) - self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0]) - self.assertEqual(int, processed['metric_oid'][1]) - self.assertEqual(snmp.PREFIX, processed['matching_type']) - self.assertEqual(2, len(processed['metadata'].keys())) - self.assertEqual('1.3.6.1.4.1.2021.9.1.2', - processed['metadata']['path'][0]) - self.assertEqual("4", - processed['metadata']['path'][1](4)) - - def test_pysnmp_ver43(self): - # Test pysnmp version >=4.3 compatibility of ObjectIdentifier - from distutils.version import StrictVersion - import pysnmp - - has43 = StrictVersion(pysnmp.__version__) >= StrictVersion('4.3.0') - oid = '1.3.6.4.1.2021.11.57.0' - - if has43: - from pysnmp.entity import engine - from pysnmp.smi import rfc1902 - from pysnmp.smi import view - snmp_engine = engine.SnmpEngine() - mvc = view.MibViewController(snmp_engine.getMibBuilder()) - name = rfc1902.ObjectIdentity(oid) - name.resolveWithMib(mvc) - else: - from pysnmp.proto import rfc1902 - name = rfc1902.ObjectName(oid) - - self.assertEqual(oid, str(name)) diff --git a/ceilometer/tests/unit/hardware/pollsters/__init__.py b/ceilometer/tests/unit/hardware/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/pollsters/test_generic.py b/ceilometer/tests/unit/hardware/pollsters/test_generic.py deleted file mode 100644 index 35d31727..00000000 --- a/ceilometer/tests/unit/hardware/pollsters/test_generic.py +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six -import yaml - -from oslo_config import fixture as fixture_config -from oslo_utils import fileutils -from oslotest import mockpatch - -from ceilometer import declarative -from ceilometer.hardware.inspector import base as inspector_base -from ceilometer.hardware.pollsters import generic -from ceilometer import sample -from ceilometer.tests import base as test_base - - -class TestMeterDefinition(test_base.BaseTestCase): - def test_config_definition(self): - cfg = dict(name='test', - type='gauge', - unit='B', - snmp_inspector={}) - definition = generic.MeterDefinition(cfg) - self.assertEqual('test', definition.name) - self.assertEqual('gauge', definition.type) - self.assertEqual('B', definition.unit) - self.assertEqual({}, definition.snmp_inspector) - - def test_config_missing_field(self): - cfg = dict(name='test', type='gauge') - try: - generic.MeterDefinition(cfg) - except declarative.MeterDefinitionException as e: - self.assertEqual("Missing field unit", e.brief_message) - - def test_config_invalid_field(self): - cfg = dict(name='test', - type='gauge', - unit='B', - invalid={}) - definition = generic.MeterDefinition(cfg) - self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar')) - - def test_config_invalid_type_field(self): - cfg = dict(name='test', - type='invalid', - unit='B', - snmp_inspector={}) - try: - generic.MeterDefinition(cfg) - except declarative.MeterDefinitionException as e: - self.assertEqual("Unrecognized type value invalid", - e.brief_message) - - @mock.patch('ceilometer.hardware.pollsters.generic.LOG') - def test_bad_metric_skip(self, LOG): - cfg = {'metric': [dict(name='test1', - type='gauge', - unit='B', - snmp_inspector={}), - dict(name='test_bad', - type='invalid', - unit='B', - snmp_inspector={}), - dict(name='test2', - type='gauge', - unit='B', - snmp_inspector={})]} - data = generic.load_definition(cfg) - self.assertEqual(2, len(data)) - LOG.error.assert_called_with( - "Error loading meter definition: %s", - "Unrecognized type value invalid") - - -class FakeInspector(inspector_base.Inspector): - net_metadata = dict(name='test.teest', - mac='001122334455', - ip='10.0.0.2', - speed=1000) - DATA = { - 'test': (0.99, {}, {}), - 'test2': (90, net_metadata, {}), - } - - def inspect_generic(self, host, cache, - extra_metadata=None, param=None): - yield self.DATA[host.hostname] - - -class TestGenericPollsters(test_base.BaseTestCase): - @staticmethod - def faux_get_inspector(url, namespace=None): - return FakeInspector() - - def setUp(self): - super(TestGenericPollsters, self).setUp() - self.conf = self.useFixture(fixture_config.Config()).conf - self.resources = ["snmp://test", "snmp://test2"] - self.useFixture(mockpatch.Patch( - 'ceilometer.hardware.inspector.get_inspector', - self.faux_get_inspector)) - self.conf(args=[]) - self.pollster = generic.GenericHardwareDeclarativePollster() - - def _setup_meter_def_file(self, cfg): - if six.PY3: - cfg = cfg.encode('utf-8') - meter_cfg_file = fileutils.write_to_tempfile(content=cfg, - prefix="snmp", - suffix="yaml") - self.conf.set_override( - 'meter_definitions_file', - meter_cfg_file, group='hardware') - cfg = declarative.load_definitions( - {}, self.conf.hardware.meter_definitions_file) - return cfg - - def _check_get_samples(self, name, definition, - expected_value, expected_type, expected_unit=None): - self.pollster._update_meter_definition(definition) - cache = {} - samples = list(self.pollster.get_samples(None, cache, - self.resources)) - self.assertTrue(samples) - self.assertIn(self.pollster.CACHE_KEY, cache) - for resource in self.resources: - self.assertIn(resource, cache[self.pollster.CACHE_KEY]) - - self.assertEqual(set([name]), - set([s.name for s in samples])) - match = [s for s in samples if s.name == name] - self.assertEqual(expected_value, match[0].volume) - self.assertEqual(expected_type, match[0].type) - if expected_unit: - self.assertEqual(expected_unit, match[0].unit) - - def test_get_samples(self): - param = dict(matching_type='type_exact', - oid='1.3.6.1.4.1.2021.10.1.3.1', - type='lambda x: float(str(x))') - meter_def = generic.MeterDefinition(dict(type='gauge', - name='hardware.test1', - unit='process', - snmp_inspector=param)) - self._check_get_samples('hardware.test1', - meter_def, - 0.99, sample.TYPE_GAUGE, - expected_unit='process') - - def test_get_pollsters_extensions(self): - param = dict(matching_type='type_exact', - oid='1.3.6.1.4.1.2021.10.1.3.1', - type='lambda x: float(str(x))') - meter_cfg = yaml.dump( - {'metric': [dict(type='gauge', - name='hardware.test1', - unit='process', - snmp_inspector=param), - dict(type='gauge', - name='hardware.test2.abc', - unit='process', - snmp_inspector=param)]}) - self._setup_meter_def_file(meter_cfg) - pollster = generic.GenericHardwareDeclarativePollster - # Clear cached mapping - pollster.mapping = None - exts = pollster.get_pollsters_extensions() - self.assertEqual(2, len(exts)) - self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc']) - self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc']) diff --git a/ceilometer/tests/unit/hardware/pollsters/test_util.py b/ceilometer/tests/unit/hardware/pollsters/test_util.py deleted file mode 100644 index a31cdbb3..00000000 --- a/ceilometer/tests/unit/hardware/pollsters/test_util.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import netutils - -from ceilometer.hardware.pollsters import util -from ceilometer import sample -from ceilometer.tests import base as test_base - - -class TestPollsterUtils(test_base.BaseTestCase): - def setUp(self): - super(TestPollsterUtils, self).setUp() - self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") - - def test_make_sample(self): - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - res_metadata={ - 'metakey': 'metaval', - }) - self.assertEqual('127.0.0.1', s.resource_id) - self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) - self.assertIn('metakey', s.resource_metadata.keys()) - - def test_make_sample_extra(self): - extra = { - 'project_id': 'project', - 'resource_id': 'resource' - } - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - extra=extra) - self.assertIsNone(s.user_id) - self.assertEqual('project', s.project_id) - self.assertEqual('resource', s.resource_id) - self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', - 'project_id': 'project', - 'resource_id': - 'resource'}, - s.resource_metadata) diff --git a/ceilometer/tests/unit/image/__init__.py b/ceilometer/tests/unit/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/image/test_glance.py b/ceilometer/tests/unit/image/test_glance.py deleted file mode 100644 index c9a16cb9..00000000 --- a/ceilometer/tests/unit/image/test_glance.py +++ /dev/null @@ -1,227 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.image import glance - -IMAGE_LIST = [ - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), - type('Image', (object,), - {u'status': u'active', - u'name': "hello world", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:27:41', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:27:41', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'22be9f90-864d-494c-aa74-8035fd535989', - u'location': None, - u'checksum': None, - u'owner': u'9e4f98287a0246daa42eaf4025db99d4', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 0}), - type('Image', (object,), - {u'status': u'queued', - u'name': None, - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:23:27', - u'disk_format': "raw", - u'updated_at': u'2012-09-18T16:23:27', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', - u'location': None, - u'checksum': None, - u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 1024}), - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), -] - -ENDPOINT = 'end://point' - - -class _BaseObject(object): - pass - - -class FakeGlanceClient(object): - class images(object): - pass - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - access = self._keystone.session.auth.get_access.return_value - access.service_catalog.get_endpoints = mock.Mock( - return_value={'image': mock.ANY}) - - -class TestImagePollsterPageSize(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = FakeGlanceClient() - glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollsterPageSize, self).setUp() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def _do_test_iter_images(self, page_size=0, length=0): - self.CONF.set_override("glance_page_size", page_size) - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - kwargs = {} - if page_size > 0: - kwargs['page_size'] = page_size - FakeGlanceClient.images.list.assert_called_with( - filters={'is_public': None}, **kwargs) - self.assertEqual(length, len(images)) - - def test_page_size(self): - self._do_test_iter_images(100, 4) - - def test_page_size_default(self): - self._do_test_iter_images(length=4) - - def test_page_size_negative_number(self): - self._do_test_iter_images(-1, 4) - - -class TestImagePollster(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = _BaseObject() - setattr(glanceclient, "images", _BaseObject()) - setattr(glanceclient.images, - "list", lambda *args, **kwargs: iter(IMAGE_LIST)) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollster, self).setUp() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - - def test_default_discovery(self): - pollster = glance.ImagePollster() - self.assertEqual('endpoint:image', pollster.default_discovery) - - def test_iter_images(self): - # Tests whether the iter_images method returns a unique image - # list when there is nothing in the cache - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - self.assertEqual(len(set(image.id for image in images)), len(images)) - - def test_iter_images_cached(self): - # Tests whether the iter_images method returns the values from - # the cache - cache = {'%s-images' % ENDPOINT: []} - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, cache, - ENDPOINT)) - self.assertEqual([], images) - - def test_image(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for sample in samples: - self.assertEqual(1, sample.volume) - - def test_image_size(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for image in IMAGE_LIST: - self.assertTrue( - any(map(lambda sample: sample.volume == image.size, - samples))) - - def test_image_get_sample_names(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(set(['image']), set([s.name for s in samples])) - - def test_image_size_get_sample_names(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(set(['image.size']), set([s.name for s in samples])) diff --git a/ceilometer/tests/unit/ipmi/__init__.py b/ceilometer/tests/unit/ipmi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/notifications/__init__.py b/ceilometer/tests/unit/ipmi/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py b/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py deleted file mode 100644 index 583219fe..00000000 --- a/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py +++ /dev/null @@ -1,795 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Sample data for test_ipmi. - -This data is provided as a sample of the data expected from the ipmitool -driver in the Ironic project, which is the publisher of the notifications -being tested. -""" - - -TEMPERATURE_DATA = { - 'DIMM GH VR Temp (0x3b)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.6 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM GH VR Temp (0x3b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU1 VR Temp (0x36)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '32 (+/- 0.500) degrees C', - 'Entity ID': '20.1 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU1 VR Temp (0x36)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM EF VR Temp (0x3a)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.5 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM EF VR Temp (0x3a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU2 VR Temp (0x37)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '31 (+/- 0.500) degrees C', - 'Entity ID': '20.2 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU2 VR Temp (0x37)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'Ambient Temp (0x32)': { - 'Status': 'ok', - 'Sensor Reading': '25 (+/- 0) degrees C', - 'Entity ID': '12.1 (Front Panel Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Upper non-critical': '43.000', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Upper non-recoverable': '50.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '46.000', - 'Sensor ID': 'Ambient Temp (0x32)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'Mezz Card Temp (0x35)': { - 'Status': 'Disabled', - 'Sensor Reading': 'Disabled', - 'Entity ID': '44.1 (I/O Module)', - 'Event Message Control': 'Per-threshold', - 'Upper non-critical': '70.000', - 'Upper non-recoverable': '85.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'Mezz Card Temp (0x35)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'PCH Temp (0x3c)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '46 (+/- 0.500) degrees C', - 'Entity ID': '45.1 (Processor/IO Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '93.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '103.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '98.000', - 'Sensor ID': 'PCH Temp (0x3c)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM CD VR Temp (0x39)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '27 (+/- 0.500) degrees C', - 'Entity ID': '20.4 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM CD VR Temp (0x39)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 2 Temp (0x34)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '30 (+/- 0) degrees C', - 'Entity ID': '16.2 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 2 Temp (0x34)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM AB VR Temp (0x38)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '28 (+/- 0.500) degrees C', - 'Entity ID': '20.3 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM AB VR Temp (0x38)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '38 (+/- 0) degrees C', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, -} - - -CURRENT_DATA = { - 'Avg Power (0x2e)': { - 'Status': 'ok', - 'Sensor Reading': '130 (+/- 0) Watts', - 'Entity ID': '21.0 (Power Management)', - 'Assertions Enabled': '', - 'Event Message Control': 'Per-threshold', - 'Readable Thresholds': 'No Thresholds', - 'Positive Hysteresis': 'Unspecified', - 'Sensor Type (Analog)': 'Current', - 'Negative Hysteresis': 'Unspecified', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Avg Power (0x2e)', - 'Assertion Events': '', - 'Minimum sensor range': '2550.000', - 'Settable Thresholds': 'No Thresholds' - } -} - - -FAN_DATA = { - 'Fan 4A Tach (0x46)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4A Tach (0x46)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 5A Tach (0x48)': { - 'Status': 'ok', - 'Sensor Reading': '7140 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5A Tach (0x48)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3A Tach (0x44)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3A Tach (0x44)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 1A Tach (0x40)': { - 'Status': 'ok', - 'Sensor Reading': '6960 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1A Tach (0x40)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3B Tach (0x45)': { - 'Status': 'ok', - 'Sensor Reading': '7104 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3B Tach (0x45)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2A Tach (0x42)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2A Tach (0x42)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 4B Tach (0x47)': { - 'Status': 'ok', - 'Sensor Reading': '7488 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4B Tach (0x47)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2B Tach (0x43)': { - 'Status': 'ok', - 'Sensor Reading': '7168 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2B Tach (0x43)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 5B Tach (0x49)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5B Tach (0x49)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 1B Tach (0x41)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1B Tach (0x41)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6B Tach (0x4b)': { - 'Status': 'ok', - 'Sensor Reading': '7616 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6B Tach (0x4b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6A Tach (0x4a)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6A Tach (0x4a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - } -} - - -VOLTAGE_DATA = { - 'Planar 12V (0x18)': { - 'Status': 'ok', - 'Sensor Reading': '12.312 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.108', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '10.692', - 'Negative Hysteresis': '0.108', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '13.446', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 12V (0x18)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '12.042' - }, - 'Planar 3.3V (0x16)': { - 'Status': 'ok', - 'Sensor Reading': '3.309 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.028', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '3.039', - 'Negative Hysteresis': '0.028', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '3.564', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 3.3V (0x16)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.309' - }, - 'Planar VBAT (0x1c)': { - 'Status': 'ok', - 'Sensor Reading': '3.137 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lnc- lcr-', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Readable Thresholds': 'lcr lnc', - 'Positive Hysteresis': '0.025', - 'Deassertions Enabled': 'lnc- lcr-', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '2.095', - 'Negative Hysteresis': '0.025', - 'Lower non-critical': '2.248', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Planar VBAT (0x1c)', - 'Settable Thresholds': 'lcr lnc', - 'Threshold Read Mask': 'lcr lnc', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.010' - }, - 'Planar 5V (0x17)': { - 'Status': 'ok', - 'Sensor Reading': '5.062 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.045', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '4.475', - 'Negative Hysteresis': '0.045', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '5.582', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 5V (0x17)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4.995' - } -} - - -SENSOR_DATA = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': TEMPERATURE_DATA, - 'Current': CURRENT_DATA, - 'Fan': FAN_DATA, - 'Voltage': VOLTAGE_DATA - } - } -} - - -EMPTY_PAYLOAD = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - } - } -} - - -MISSING_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -BAD_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': 'some bad stuff', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -NO_SENSOR_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - }, - } - } - } -} - - -NO_NODE_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - }, - } - } - } -} diff --git a/ceilometer/tests/unit/ipmi/notifications/test_ironic.py b/ceilometer/tests/unit/ipmi/notifications/test_ironic.py deleted file mode 100644 index 432e0b19..00000000 --- a/ceilometer/tests/unit/ipmi/notifications/test_ironic.py +++ /dev/null @@ -1,214 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for producing IPMI sample messages from notification events. -""" - -import mock -from oslotest import base - -from ceilometer.ipmi.notifications import ironic as ipmi -from ceilometer import sample -from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data - - -class TestNotifications(base.BaseTestCase): - - def test_ipmi_temperature_notification(self): - """Test IPMI Temperature sensor data. - - Based on the above ipmi_testdata the expected sample for a single - temperature reading has:: - - * a resource_id composed from the node_uuid Sensor ID - * a name composed from 'hardware.ipmi.' and 'temperature' - * a volume from the first chunk of the Sensor Reading - * a unit from the last chunk of the Sensor Reading - * some readings are skipped if the value is 'Disabled' - * metatata with the node id - """ - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' - ) - test_counter = counters[resource_id] - self.assertEqual(26.0, test_counter.volume) - self.assertEqual('C', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.temperature', test_counter.name) - self.assertEqual('hardware.ipmi.metrics.update', - test_counter.resource_metadata['event_type']) - self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - test_counter.resource_metadata['node']) - - def test_ipmi_current_notification(self): - """Test IPMI Current sensor data. - - A single current reading is effectively the same as temperature, - modulo "current". - """ - processor = ipmi.CurrentSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(1, len(counters), 'expected 1 current reading') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' - ) - test_counter = counters[resource_id] - self.assertEqual(130.0, test_counter.volume) - self.assertEqual('W', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.current', test_counter.name) - - def test_ipmi_fan_notification(self): - """Test IPMI Fan sensor data. - - A single fan reading is effectively the same as temperature, - modulo "fan". - """ - processor = ipmi.FanSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(12, len(counters), 'expected 12 fan readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' - ) - test_counter = counters[resource_id] - self.assertEqual(6900.0, test_counter.volume) - self.assertEqual('RPM', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.fan', test_counter.name) - - def test_ipmi_voltage_notification(self): - """Test IPMI Voltage sensor data. - - A single voltage reading is effectively the same as temperature, - modulo "voltage". - """ - processor = ipmi.VoltageSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(4, len(counters), 'expected 4 volate readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' - ) - test_counter = counters[resource_id] - self.assertEqual(3.137, test_counter.volume) - self.assertEqual('V', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.voltage', test_counter.name) - - def test_disabed_skips_metric(self): - """Test that a meter which a disabled volume is skipped.""" - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' - ) - - self.assertNotIn(resource_id, counters) - - def test_empty_payload_no_metrics_success(self): - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.EMPTY_PAYLOAD)]) - - self.assertEqual(0, len(counters), 'expected 0 readings') - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_data(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - "missing 'Sensor Reading' in payload", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_sensor_data_malformed(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - 'unable to parse sensor reading: some bad stuff', - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_node_uuid(self, mylog): - """Test for desired error message when 'node_uuid' missing. - - Presumably this will never happen given the way the data - is created, but better defensive than dead. - """ - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'node_uuid'", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_id(self, mylog): - """Test for desired error message when 'Sensor ID' missing.""" - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'Sensor ID'", - messages[0] - ) diff --git a/ceilometer/tests/unit/ipmi/platform/__init__.py b/ceilometer/tests/unit/ipmi/platform/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/platform/fake_utils.py b/ceilometer/tests/unit/ipmi/platform/fake_utils.py deleted file mode 100644 index a8bed725..00000000 --- a/ceilometer/tests/unit/ipmi/platform/fake_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import binascii - -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data - - -def get_sensor_status_init(parameter=''): - return (' 01\n', '') - - -def get_sensor_status_uninit(parameter=''): - return (' 00\n', '') - - -def init_sensor_agent(parameter=''): - return (' 00\n', '') - - -def get_nm_version_v2(parameter=''): - return test_data.nm_version_v2 - - -def get_nm_version_v3(parameter=''): - return test_data.nm_version_v3 - - -def sdr_dump(data_file=''): - if data_file == '': - raise ValueError("No file specified for ipmitool sdr dump") - fake_slave_address = '2c' - fake_channel = '60' - hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel - data = binascii.unhexlify(hexstr) - with open(data_file, 'wb') as bin_fp: - bin_fp.write(data) - - return ('', '') - - -def _execute(funcs, *cmd, **kwargs): - - datas = { - test_data.device_id_cmd: test_data.device_id, - test_data.nm_device_id_cmd: test_data.nm_device_id, - test_data.get_power_cmd: test_data.power_data, - test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, - test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, - test_data.get_airflow_cmd: test_data.airflow_data, - test_data.get_cups_index_cmd: test_data.cups_index_data, - test_data.get_cups_util_cmd: test_data.cups_util_data, - test_data.sdr_info_cmd: test_data.sdr_info, - test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, - test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, - test_data.read_sensor_current_cmd: test_data.sensor_current, - test_data.read_sensor_fan_cmd: test_data.sensor_fan, - } - - if cmd[1] == 'sdr' and cmd[2] == 'dump': - # ipmitool sdr dump /tmp/XXXX - cmd_str = "".join(cmd[:3]) - par_str = cmd[3] - else: - cmd_str = "".join(cmd) - par_str = '' - - try: - return datas[cmd_str] - except KeyError: - return funcs[cmd_str](par_str) - - -def execute_with_nm_v3(*cmd, **kwargs): - """test version of execute on Node Manager V3.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v3} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_with_nm_v2(*cmd, **kwargs): - """test version of execute on Node Manager V2.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v2} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_nm(*cmd, **kwargs): - """test version of execute on Non-Node Manager platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_ipmi(*cmd, **kwargs): - raise nmexcept.IPMIException diff --git a/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py b/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py deleted file mode 100644 index 7504aba3..00000000 --- a/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Sample data for test_intel_node_manager and test_ipmi_sensor. - -This data is provided as a sample of the data expected from the ipmitool -binary, which produce Node Manager/IPMI raw data -""" - -sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Temperature - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB P1 VR Temp (0x20) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 25 (+/- 0) degrees C - Status : ok - Nominal Reading : 58.000 - Normal Minimum : 10.000 - Normal Maximum : 105.000 - Upper critical : 115.000 - Upper non-critical : 110.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : Front Panel Temp (0x21) - Entity ID : 12.1 (Front Panel Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 23 (+/- 0) degrees C - Status : ok - Nominal Reading : 28.000 - Normal Minimum : 10.000 - Normal Maximum : 45.000 - Upper critical : 55.000 - Upper non-critical : 50.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : SSB Temp (0x22) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 43 (+/- 0) degrees C - Status : ok - Nominal Reading : 52.000 - Normal Minimum : 10.000 - Normal Maximum : 93.000 - Upper critical : 103.000 - Upper non-critical : 98.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Voltage - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB +12.0V (0xd0) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 11.831 (+/- 0) Volts - Status : ok - Nominal Reading : 11.935 - Normal Minimum : 11.363 - Normal Maximum : 12.559 - Upper critical : 13.391 - Upper non-critical : 13.027 - Lower critical : 10.635 - Lower non-critical : 10.947 - Positive Hysteresis : 0.052 - Negative Hysteresis : 0.052 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +1.35 P1LV AB (0xe4) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : Disabled - Status : Disabled - Nominal Reading : 1.342 - Normal Minimum : 1.275 - Normal Maximum : 1.409 - Upper critical : 1.488 - Upper non-critical : 1.445 - Lower critical : 1.201 - Lower non-critical : 1.244 - Positive Hysteresis : 0.006 - Negative Hysteresis : 0.006 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Event Status : Unavailable - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +5.0V (0xd1) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 4.959 (+/- 0) Volts - Status : ok - Nominal Reading : 4.981 - Normal Minimum : 4.742 - Normal Maximum : 5.241 - Upper critical : 5.566 - Upper non-critical : 5.415 - Lower critical : 4.416 - Lower non-critical : 4.546 - Positive Hysteresis : 0.022 - Negative Hysteresis : 0.022 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) - Entity ID : 10.1 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 11 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -Sensor ID : PS2 Curr Out % (0x59) - Entity ID : 10.2 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 0 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -""" - -sensor_fan_data = """Sensor ID : System Fan 1 (0x30) - Entity ID : 29.1 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 2 (0x32) - Entity ID : 29.2 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 3 (0x34) - Entity ID : 29.3 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 4 (0x36) - Entity ID : 29.4 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4606 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -""" - - -sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' -init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' -sdr_dump_cmd = 'ipmitoolsdrdump' -sdr_info_cmd = 'ipmitoolsdrinfo' - -read_sensor_all_cmd = 'ipmitoolsdr-v' -read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' -read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' -read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' -read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' - -device_id_cmd = 'ipmitoolraw0x060x01' -nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' -nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' -get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' -get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' -get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' -get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' -get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' -get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' - - -device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') -nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') - -nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') -nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') - -# start from byte 3, get cur- 57 00(87), min- 03 00(3) -# max- 37 02(567), avg- 5c 00(92) -power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' - ' 9b 12 01 50\n', '') - -# start from byte 3, get cur- 17 00(23), min- 16 00(22) -# max- 18 00(24), avg- 17 00(23) -inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- 19 00(25), min- 18 00(24) -# max- 1b 00(27), avg- 19 00(25) -outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- be 00(190), min- 96 00(150) -# max- 26 02(550), avg- cb 00(203) -airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' - ' b7 02 00 50\n', '') - -# start from byte 3, cups index 2e 00 (46) -cups_index_data = (' 57 01 00 2e 00\n', '') - -# start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) -# io_util - 00 00 ...(0) -cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' - ' 00 00 00 00 00 00 00 00 00 00 00\n', '') - -sdr_info = ('', '') - -sensor_temperature = (sensor_temperature_data, '') -sensor_voltage = (sensor_voltage_data, '') -sensor_current = (sensor_current_data, '') -sensor_fan = (sensor_fan_data, '') diff --git a/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py b/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py deleted file mode 100644 index 0383958c..00000000 --- a/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import tempfile - -import mock -from oslotest import base -import six - -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.unit.ipmi.platform import fake_utils -from ceilometer import utils - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractmethod - def init_test_engine(self): - """Prepare specific ipmitool as engine for different NM version.""" - - def setUp(self): - super(_Base, self).setUp() - self.init_test_engine() - self.nm = node_manager.NodeManager() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - node_manager.NodeManager()._inited = False - super(_Base, cls).tearDownClass() - - -class TestNodeManagerV3(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - avg_val = node_manager._hex(airflow["Average_value"]) - max_val = node_manager._hex(airflow["Maximum_value"]) - min_val = node_manager._hex(airflow["Minimum_value"]) - cur_val = node_manager._hex(airflow["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(190, cur_val) - self.assertEqual(150, min_val) - self.assertEqual(550, max_val) - self.assertEqual(203, avg_val) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(25, cur_val) - self.assertEqual(24, min_val) - self.assertEqual(27, max_val) - self.assertEqual(25, avg_val) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) - mem_util = node_manager._hex(cups_util["Mem_Utilization"]) - io_util = node_manager._hex(cups_util["IO_Utilization"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(51, cpu_util) - self.assertEqual(5, mem_util) - self.assertEqual(0, io_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - index = node_manager._hex(cups_index["CUPS_Index"]) - self.assertEqual(46, index) - - -class TestNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - - def test_read_power_all(self): - power = self.nm.read_power_all() - - avg_val = node_manager._hex(power["Average_value"]) - max_val = node_manager._hex(power["Maximum_value"]) - min_val = node_manager._hex(power["Minimum_value"]) - cur_val = node_manager._hex(power["Current_value"]) - - # get NM 2.0 - self.assertEqual(3, self.nm.nm_version) - # see ipmi_test_data.py for raw data - self.assertEqual(87, cur_val) - self.assertEqual(3, min_val) - self.assertEqual(567, max_val) - self.assertEqual(92, avg_val) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(23, cur_val) - self.assertEqual(22, min_val) - self.assertEqual(24, max_val) - self.assertEqual(23, avg_val) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - self.assertEqual({}, airflow) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - self.assertEqual({}, temperature) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - self.assertEqual({}, cups_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - self.assertEqual({}, cups_index) - - -class TestNonNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) - - def test_read_power_all(self): - # no NM support - self.assertEqual(0, self.nm.nm_version) - power = self.nm.read_power_all() - - # Non-Node Manager platform return empty data - self.assertEqual({}, power) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - # Non-Node Manager platform return empty data - self.assertEqual({}, temperature) - - -class ParseSDRFileTestCase(base.BaseTestCase): - - def setUp(self): - super(ParseSDRFileTestCase, self).setUp() - self.temp_file = tempfile.NamedTemporaryFile().name - - def test_parsing_found(self): - data = b'\x00\xFF\x00\xFF\x57\x01\x00\x0D\x01\x0A\xB2\x00\xFF' - with open(self.temp_file, 'wb') as f: - f.write(data) - result = node_manager.NodeManager._parse_slave_and_channel( - self.temp_file) - self.assertEqual(('0a', 'b'), result) - - def test_parsing_not_found(self): - data = b'\x00\xFF\x00\xFF\x52\x01\x80\x0D\x01\x6A\xB7\x00\xFF' - with open(self.temp_file, 'wb') as f: - f.write(data) - result = node_manager.NodeManager._parse_slave_and_channel( - self.temp_file) - self.assertIsNone(result) diff --git a/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py b/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py deleted file mode 100644 index e6eaddea..00000000 --- a/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base - -from ceilometer.ipmi.platform import ipmi_sensor -from ceilometer.tests.unit.ipmi.platform import fake_utils -from ceilometer import utils - - -class TestIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertTrue(self.ipmi.ipmi_support) - # only temperature data returned. - self.assertIn('Temperature', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Temperature'])) - sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] - self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # only voltage data returned. - self.assertIn('Voltage', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Voltage'])) - sensor = sensors['Voltage']['BB +5.0V (0xd1)'] - self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # only Current data returned. - self.assertIn('Current', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(2, len(sensors['Current'])) - sensor = sensors['Current']['PS1 Curr Out % (0x58)'] - self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # only Fan data returned. - self.assertIn('Fan', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(4, len(sensors['Fan'])) - sensor = sensors['Fan']['System Fan 2 (0x32)'] - self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) - - -class TestNonIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestNonIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestNonIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertFalse(self.ipmi.ipmi_support) - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) diff --git a/ceilometer/tests/unit/ipmi/pollsters/__init__.py b/ceilometer/tests/unit/ipmi/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/pollsters/base.py b/ceilometer/tests/unit/ipmi/pollsters/base.py deleted file mode 100644 index 6b8023d4..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/base.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import mock -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.tests import base - - -@six.add_metaclass(abc.ABCMeta) -class TestPollsterBase(base.BaseTestCase): - - def fake_data(self): - """Fake data used for test.""" - return None - - def fake_sensor_data(self, sensor_type): - """Fake sensor data used for test.""" - return None - - @abc.abstractmethod - def make_pollster(self): - """Produce right pollster for test.""" - - def _test_get_samples(self): - nm = mock.Mock() - nm.read_inlet_temperature.side_effect = self.fake_data - nm.read_outlet_temperature.side_effect = self.fake_data - nm.read_power_all.side_effect = self.fake_data - nm.read_airflow.side_effect = self.fake_data - nm.read_cups_index.side_effect = self.fake_data - nm.read_cups_utilization.side_effect = self.fake_data - nm.read_sensor_any.side_effect = self.fake_sensor_data - # We should mock the pollster first before initialize the Manager - # so that we don't trigger the sudo in pollsters' __init__(). - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', - return_value=nm)) - - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', - return_value=nm)) - - self.mgr = manager.AgentManager(['ipmi']) - - self.pollster = self.make_pollster() - - def _verify_metering(self, length, expected_vol=None, node=None): - cache = {} - resources = ['local_host'] - - samples = list(self.pollster.get_samples(self.mgr, cache, resources)) - self.assertEqual(length, len(samples)) - - if expected_vol: - self.assertTrue(any(s.volume == expected_vol for s in samples)) - if node: - self.assertTrue(any(s.resource_metadata['node'] == node - for s in samples)) diff --git a/ceilometer/tests/unit/ipmi/pollsters/test_node.py b/ceilometer/tests/unit/ipmi/pollsters/test_node.py deleted file mode 100644 index 4b3e7c6a..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/test_node.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import node -from ceilometer.tests.unit.ipmi.pollsters import base - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - - -class TestPowerPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['13', '00']} - - def make_pollster(self): - return node.PowerPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 19(0x13 as current_value) - self._verify_metering(1, 19, CONF.host) - - -class TestInletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['23', '00']} - - def make_pollster(self): - return node.InletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 35(0x23 as current_value) - self._verify_metering(1, 35, CONF.host) - - -class TestOutletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['25', '00']} - - def make_pollster(self): - return node.OutletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 37(0x25 as current_value) - self._verify_metering(1, 37, CONF.host) - - -class TestAirflowPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['be', '00']} - - def make_pollster(self): - return node.AirflowPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe as current_value) - self._verify_metering(1, 190, CONF.host) - - -class TestCUPSIndexPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CUPS_Index": ['2e', '00']} - - def make_pollster(self): - return node.CUPSIndexPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 46, CONF.host) - - -class CPUUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CPU_Utilization": - ['33', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.CPUUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 51, CONF.host) - - -class MemUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Mem_Utilization": - ['05', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.MemUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 5(0x05) - self._verify_metering(1, 5, CONF.host) - - -class IOUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"IO_Utilization": - ['00', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.IOUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 0(0x00) - self._verify_metering(1, 0, CONF.host) diff --git a/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py b/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py deleted file mode 100644 index 96f5a3f4..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import sensor -from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data -from ceilometer.tests.unit.ipmi.pollsters import base - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - -TEMPERATURE_SENSOR_DATA = { - 'Temperature': ipmi_test_data.TEMPERATURE_DATA -} - -CURRENT_SENSOR_DATA = { - 'Current': ipmi_test_data.CURRENT_DATA -} - -FAN_SENSOR_DATA = { - 'Fan': ipmi_test_data.FAN_DATA -} - -VOLTAGE_SENSOR_DATA = { - 'Voltage': ipmi_test_data.VOLTAGE_DATA -} - -MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] -MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] -MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] - - -class TestTemperatureSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return TEMPERATURE_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(10, float(32), CONF.host) - - -class TestMissingSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMalformedSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MALFORMED_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMissingSensorId(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_ID_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestFanSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return FAN_SENSOR_DATA - - def make_pollster(self): - return sensor.FanSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(12, float(7140), CONF.host) - - -class TestCurrentSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return CURRENT_SENSOR_DATA - - def make_pollster(self): - return sensor.CurrentSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(1, float(130), CONF.host) - - -class TestVoltageSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return VOLTAGE_SENSOR_DATA - - def make_pollster(self): - return sensor.VoltageSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(4, float(3.309), CONF.host) diff --git a/ceilometer/tests/unit/meter/test_meter_plugins.py b/ceilometer/tests/unit/meter/test_meter_plugins.py deleted file mode 100644 index 4d8d04d9..00000000 --- a/ceilometer/tests/unit/meter/test_meter_plugins.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import base - -from ceilometer.event import trait_plugins - - -class TestTimedeltaPlugin(base.BaseTestCase): - - def setUp(self): - super(TestTimedeltaPlugin, self).setUp() - self.plugin = trait_plugins.TimedeltaPlugin() - - def test_timedelta_transformation(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T16:04:32')] - value = self.plugin.trait_value(match_list) - self.assertEqual(3600, value) - - def test_timedelta_missing_field(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - log.warning.assert_called_once_with( - 'Timedelta plugin is required two timestamp fields to create ' - 'timedelta value.') - - def test_timedelta_exceed_field(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T16:04:32'), - ('test.timestamp3', '2016-03-02T16:10:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - log.warning.assert_called_once_with( - 'Timedelta plugin is required two timestamp fields to create ' - 'timedelta value.') - - def test_timedelta_invalid_timestamp(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T15:004:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - msg = log.warning._mock_call_args[0][0] - self.assertTrue(msg.startswith('Failed to parse date from set ' - 'fields, both fields ') - ) - - def test_timedelta_reverse_timestamp_order(self): - match_list = [('test.timestamp1', '2016-03-02T15:15:32'), - ('test.timestamp2', '2016-03-02T15:10:32')] - value = self.plugin.trait_value(match_list) - self.assertEqual(300, value) - - def test_timedelta_precise_difference(self): - match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), - ('test.timestamp2', '2016-03-02T15:10:32.786899')] - value = self.plugin.trait_value(match_list) - self.assertEqual(0.000006, value) diff --git a/ceilometer/tests/unit/meter/test_notifications.py b/ceilometer/tests/unit/meter/test_notifications.py deleted file mode 100644 index 7da56432..00000000 --- a/ceilometer/tests/unit/meter/test_notifications.py +++ /dev/null @@ -1,714 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.meter.notifications -""" -import copy -import mock -import os -import six -import yaml - -from oslo_config import fixture as fixture_config -from oslo_utils import encodeutils -from oslo_utils import fileutils - -import ceilometer -from ceilometer import declarative -from ceilometer.meter import notifications -from ceilometer import service as ceilometer_service -from ceilometer.tests import base as test - -NOTIFICATION = { - 'event_type': u'test.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', - u'timestamp': u'2015-06-19T09:19:35.785330', - u'created_at': u'2015-06-19T09:25:35.785330', - u'launched_at': u'2015-06-19T09:25:40.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - }, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', - 'publisher_id': "foo123" -} - -MIDDLEWARE_EVENT = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'objectstore.http.request', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'ceilometermiddleware', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None, - u'payload': { - 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', - 'eventTime': '2015-01-30T16: 38: 43.233621', - 'target': { - 'action': 'get', - 'typeURI': 'service/storage/object', - 'id': 'account', - 'metadata': { - 'path': '/1.0/CUSTOM_account/container/obj', - 'version': '1.0', - 'container': 'container', - 'object': 'obj' - } - }, - 'observer': { - 'id': 'target' - }, - 'eventType': 'activity', - 'measurements': [ - { - 'metric': { - 'metricId': 'openstack: uuid', - 'name': 'storage.objects.outgoing.bytes', - 'unit': 'B' - }, - 'result': 28 - }, - { - 'metric': { - 'metricId': 'openstack: uuid2', - 'name': 'storage.objects.incoming.bytes', - 'unit': 'B' - }, - 'result': 1 - } - ], - 'initiator': { - 'typeURI': 'service/security/account/user', - 'project_id': None, - 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' - }, - 'action': 'read', - 'outcome': 'success', - 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' - } -} - -FULL_MULTI_MSG = { - u'_context_domain': None, - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - 'event_type': u'full.sample', - 'timestamp': u'2015-06-1909: 19: 35.786893', - u'_context_auth_token': None, - u'_context_read_only': False, - 'payload': [{ - u'counter_name': u'instance1', - u'user_id': u'user1', - u'resource_id': u'res1', - u'counter_unit': u'ns', - u'counter_volume': 28.0, - u'project_id': u'proj1', - u'counter_type': u'gauge' - }, - { - u'counter_name': u'instance2', - u'user_id': u'user2', - u'resource_id': u'res2', - u'counter_unit': u'%', - u'counter_volume': 1.0, - u'project_id': u'proj2', - u'counter_type': u'delta' - }], - u'_context_resource_uuid': None, - u'_context_user_identity': u'fake_user_identity---', - u'_context_show_deleted': False, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - 'priority': 'info', - u'_context_is_admin': True, - u'_context_project_domain': None, - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'_context_user_domain': None, - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' -} - -METRICS_UPDATE = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'compute.metrics.update', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'payload': { - u'metrics': [ - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.frequency', 'value': 1600, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.time', 'value': 17421440000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.time', 'value': 7852600000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.time', 'value': 1307374400000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.time', 'value': 11697470000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.percent', 'value': 0.012959045637294348, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.percent', 'value': 0.027501485834103515, - 'source': 'libvirt.LibvirtDriver'}], - u'nodename': u'tianst.sh.intel.com', - u'host': u'tianst', - u'host_id': u'10.0.1.1'}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'compute.tianst.sh.intel.com', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None -} - - -class TestMeterDefinition(test.BaseTestCase): - - def test_config_definition(self): - cfg = dict(name="test", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id") - handler = notifications.MeterDefinition(cfg, mock.Mock()) - self.assertTrue(handler.match_type("test.create")) - sample = list(handler.to_samples(NOTIFICATION))[0] - self.assertEqual(1.0, sample["volume"]) - self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", - sample["resource_id"]) - self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", - sample["project_id"]) - - def test_config_required_missing_fields(self): - cfg = dict() - try: - notifications.MeterDefinition(cfg, mock.Mock()) - except declarative.DefinitionException as e: - self.assertIn("Required fields ['name', 'type', 'event_type'," - " 'unit', 'volume', 'resource_id']" - " not specified", - encodeutils.exception_to_unicode(e)) - - def test_bad_type_cfg_definition(self): - cfg = dict(name="test", type="foo", event_type="bar.create", - unit="foo", volume="bar", - resource_id="bea70e51c7340cb9d555b15cbfcaec23") - try: - notifications.MeterDefinition(cfg, mock.Mock()) - except declarative.DefinitionException as e: - self.assertIn("Invalid type foo specified", - encodeutils.exception_to_unicode(e)) - - -class TestMeterProcessing(test.BaseTestCase): - - def setUp(self): - super(TestMeterProcessing, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.handler = notifications.ProcessMeterNotifications(mock.Mock()) - - def test_fallback_meter_path(self): - self.CONF.set_override('meter_definitions_cfg_file', - '/not/existing/path', group='meter') - with mock.patch('ceilometer.declarative.open', - mock.mock_open(read_data='---\nmetric: []'), - create=True) as mock_open: - self.handler._load_definitions() - if six.PY3: - path = os.path.dirname(ceilometer.__file__) - else: - path = "ceilometer" - mock_open.assert_called_with(path + "/meter/data/meters.yaml") - - def _load_meter_def_file(self, cfg): - if six.PY3: - cfg = cfg.encode('utf-8') - meter_cfg_file = fileutils.write_to_tempfile(content=cfg, - prefix="meters", - suffix="yaml") - self.CONF.set_override('meter_definitions_cfg_file', - meter_cfg_file, group='meter') - self.handler.definitions = self.handler._load_definitions() - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_bad_meter_definition_skip(self, LOG): - cfg = yaml.dump( - {'metric': [dict(name="good_test_1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="bad_test_2", type="bad_type", - event_type="bar.create", - unit="foo", volume="bar", - resource_id="bea70e51c7340cb9d555b15cbfcaec23"), - dict(name="good_test_3", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - self.assertEqual(2, len(self.handler.definitions)) - args, kwargs = LOG.error.call_args_list[0] - self.assertEqual("Error loading meter definition: %s", args[0]) - self.assertTrue(args[1].endswith("Invalid type bad_type specified")) - - def test_jsonpath_values_parsed(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('test1', s1['name']) - self.assertEqual(1.0, s1['volume']) - self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) - self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) - - def test_multiple_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test2", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - data = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(2, len(data)) - expected_names = ['test1', 'test2'] - for s in data: - self.assertIn(s.as_dict()['name'], expected_names) - - def test_unmatched_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.update", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(0, len(c)) - - def test_regex_match_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - - def test_default_timestamp(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - multi="name")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(MIDDLEWARE_EVENT['timestamp'], s1['timestamp']) - - def test_custom_timestamp(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - multi="name", - timestamp='$.payload.eventTime')]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], - s1['timestamp']) - - def test_custom_timestamp_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="'prefix-' + $.payload.nodename", - timestamp="$.payload.metrics" - "[?(@.name='cpu.frequency')].timestamp")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual("2013-07-29T06:51:34.472416", s1['timestamp']) - - def test_default_metadata(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - meta = NOTIFICATION['payload'].copy() - meta['host'] = NOTIFICATION['publisher_id'] - meta['event_type'] = NOTIFICATION['event_type'] - self.assertEqual(meta, s1['resource_metadata']) - - def test_datetime_plugin(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="gauge", - unit="sec", - volume={"fields": ["$.payload.created_at", - "$.payload.launched_at"], - "plugin": "timedelta"}, - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(5.0, s1['volume']) - - def test_custom_metadata(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id", - metadata={'proj': '$.payload.project_id', - 'dict': '$.payload.resource_metadata'})]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - meta = {'proj': s1['project_id'], - 'dict': NOTIFICATION['payload']['resource_metadata']} - self.assertEqual(meta, s1['resource_metadata']) - - def test_multi_match_event_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test2", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(2, len(c)) - - def test_multi_meter_payload(self): - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "volume", "unit"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(MIDDLEWARE_EVENT)) - self.assertEqual(2, len(c)) - s1 = c[0].as_dict() - self.assertEqual('storage.objects.outgoing.bytes', s1['name']) - self.assertEqual(28, s1['volume']) - self.assertEqual('B', s1['unit']) - s2 = c[1].as_dict() - self.assertEqual('storage.objects.incoming.bytes', s2['name']) - self.assertEqual(1, s2['volume']) - self.assertEqual('B', s2['unit']) - - def test_multi_meter_payload_single(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('storage.objects.outgoing.bytes', s1['name']) - self.assertEqual(28, s1['volume']) - self.assertEqual('B', s1['unit']) - - def test_multi_meter_payload_none(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup="name")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - - def test_multi_meter_payload_all_multi(self): - cfg = yaml.dump( - {'metric': [dict(name="$.payload.[*].counter_name", - event_type="full.sample", - type="$.payload.[*].counter_type", - unit="$.payload.[*].counter_unit", - volume="$.payload.[*].counter_volume", - resource_id="$.payload.[*].resource_id", - project_id="$.payload.[*].project_id", - user_id="$.payload.[*].user_id", - lookup=['name', 'type', 'unit', 'volume', - 'resource_id', 'project_id', 'user_id'])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(FULL_MULTI_MSG)) - self.assertEqual(2, len(c)) - msg = FULL_MULTI_MSG['payload'] - for idx, val in enumerate(c): - s1 = val.as_dict() - self.assertEqual(msg[idx]['counter_name'], s1['name']) - self.assertEqual(msg[idx]['counter_volume'], s1['volume']) - self.assertEqual(msg[idx]['counter_unit'], s1['unit']) - self.assertEqual(msg[idx]['counter_type'], s1['type']) - self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) - self.assertEqual(msg[idx]['project_id'], s1['project_id']) - self.assertEqual(msg[idx]['user_id'], s1['user_id']) - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_multi_meter_payload_invalid_missing(self, LOG): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][0]['result'] - del event['payload']['measurements'][1]['result'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit", "volume"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - LOG.warning.assert_called_with('Only 0 fetched meters contain ' - '"volume" field instead of 2.') - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_multi_meter_payload_invalid_short(self, LOG): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][0]['result'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit", "volume"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - LOG.warning.assert_called_with('Only 1 fetched meters contain ' - '"volume" field instead of 2.') - - def test_arithmetic_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.percent', - event_type="compute.metrics.update", - type='gauge', - unit="percent", - volume="$.payload.metrics[" - "?(@.name='cpu.percent')].value" - " * 100", - resource_id="$.payload.host + '_'" - " + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.percent', s1['name']) - self.assertEqual(2.7501485834103514, s1['volume']) - self.assertEqual("tianst_tianst.sh.intel.com", - s1['resource_id']) - - def test_string_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="$.payload.host + '_'" - " + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual(1600, s1['volume']) - self.assertEqual("tianst_tianst.sh.intel.com", - s1['resource_id']) - - def test_prefix_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="'prefix-' + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual(1600, s1['volume']) - self.assertEqual("prefix-tianst.sh.intel.com", - s1['resource_id']) - - def test_duplicate_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) diff --git a/ceilometer/tests/unit/network/__init__.py b/ceilometer/tests/unit/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/services/__init__.py b/ceilometer/tests/unit/network/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/services/test_fwaas.py b/ceilometer/tests/unit/network/services/test_fwaas.py deleted file mode 100644 index bf300303..00000000 --- a/ceilometer/tests/unit/network/services/test_fwaas.py +++ /dev/null @@ -1,169 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import fwaas - - -class _BaseTestFWPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestFWPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestFirewallPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestFirewallPollster, self).setUp() - self.pollster = fwaas.FirewallPollster() - fake_fw = self.fake_get_fw_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'firewall_get_all', - return_value=fake_fw)) - - @staticmethod - def fake_get_fw_service(): - return [{'status': 'ACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'INACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'PENDING_CREATE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'error', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - ] - - def test_fw_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(set(['network.services.firewall']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_fws = discovery.FirewallDiscovery().discover(self.manager) - self.assertEqual(3, len(discovered_fws)) - - for vpn in self.fake_get_fw_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_fws) - else: - self.assertIn(vpn, discovered_fws) - - -class TestIPSecConnectionsPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = fwaas.FirewallPolicyPollster() - fake_fw_policy = self.fake_get_fw_policy() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'fw_policy_get_all', - return_value=fake_fw_policy)) - - @staticmethod - def fake_get_fw_policy(): - return [{'name': 'my_fw_policy', - 'description': 'fw_policy', - 'admin_state_up': True, - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'firewall_rules': [{'enabled': True, - 'action': 'allow', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '80', - 'source_ip_address': '10.24.4.2'}, - {'enabled': True, - 'action': 'deny', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '22'}], - 'shared': True, - 'audited': True, - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_policy_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_policy()[0][field], - samples[0].resource_metadata[field]) - - def test_get_policy_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(set(['network.services.firewall.policy']), - set([s.name for s in samples])) - - def test_fw_policy_discovery(self): - discovered_policy = discovery.FirewallPolicyDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_policy)) - self.assertEqual(self.fake_get_fw_policy(), discovered_policy) diff --git a/ceilometer/tests/unit/network/services/test_lbaas.py b/ceilometer/tests/unit/network/services/test_lbaas.py deleted file mode 100644 index c705d611..00000000 --- a/ceilometer/tests/unit/network/services/test_lbaas.py +++ /dev/null @@ -1,506 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import cfg -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import lbaas - - -class _BaseTestLBPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestLBPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - cfg.CONF.set_override('neutron_lbaas_version', - 'v1', - group='service_types') - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestLBPoolPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBPoolPollster, self).setUp() - self.pollster = lbaas.LBPoolPollster() - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'INACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb02', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'PENDING_CREATE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'UNKNOWN', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'error', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb_error', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - def test_pool_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_pools()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_pool_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(set(['network.services.lb.pool']), - set([s.name for s in samples])) - - def test_pool_discovery(self): - discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_pools)) - for pool in self.fake_get_pools(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_pools) - else: - self.assertIn(pool, discovered_pools) - - -class TestLBVipPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBVipPollster, self).setUp() - self.pollster = lbaas.LBVipPollster() - fake_vips = self.fake_get_vips() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vip_get_all', - return_value=fake_vips)) - - @staticmethod - def fake_get_vips(): - return [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - {'status': 'INACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.3', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip02'}, - {'status': 'PENDING_CREATE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.4', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'UNKNOWN', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'error', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip_error'}, - ] - - def test_vip_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vips()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vip_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(set(['network.services.lb.vip']), - set([s.name for s in samples])) - - def test_vip_discovery(self): - discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_vips)) - for pool in self.fake_get_vips(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_vips) - else: - self.assertIn(pool, discovered_vips) - - -class TestLBMemberPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBMemberPollster, self).setUp() - self.pollster = lbaas.LBMemberPollster() - fake_members = self.fake_get_members() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'member_get_all', - return_value=fake_members)) - - @staticmethod - def fake_get_members(): - return [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'INACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.5', - 'status_description': None, - 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'PENDING_CREATE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'UNKNOWN', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'error', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_members()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(set(['network.services.lb.member']), - set([s.name for s in samples])) - - def test_members_discovery(self): - discovered_members = discovery.LBMembersDiscovery().discover( - self.manager) - self.assertEqual(4, len(discovered_members)) - for pool in self.fake_get_members(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_members) - else: - self.assertIn(pool, discovered_members) - - -class TestLBHealthProbePollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBHealthProbePollster, self).setUp() - self.pollster = lbaas.LBHealthMonitorPollster() - fake_health_monitor = self.fake_get_health_monitor() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'health_monitor_get_all', - return_value=fake_health_monitor)) - - @staticmethod - def fake_get_health_monitor(): - return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_health_monitor()[0][field], - samples[0].resource_metadata[field]) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(set(['network.services.lb.health_monitor']), - set([s.name for s in samples])) - - def test_probes_discovery(self): - discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( - self.manager) - self.assertEqual(discovered_probes, self.fake_get_health_monitor()) - - -class TestLBStatsPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBStatsPollster, self).setUp() - fake_pool_stats = self.fake_pool_stats() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_stats', - return_value=fake_pool_stats)) - - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - @staticmethod - def fake_pool_stats(): - return {'stats': {'active_connections': 2, - 'bytes_in': 1, - 'bytes_out': 3, - 'total_connections': 4 - } - } - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, sample_name, expected_volume, - expected_type): - pollster = factory() - cache = {} - samples = list(pollster.get_samples(self.manager, cache, - self.fake_get_pools())) - self.assertEqual(1, len(samples)) - self.assertIsNotNone(samples) - self.assertIn('lbstats', cache) - self.assertEqual(set([sample_name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == sample_name] - self.assertEqual(1, len(match), 'missing counter %s' % sample_name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(expected_type, match[0].type) - - def test_lb_total_connections(self): - self._check_get_samples(lbaas.LBTotalConnectionsPollster, - 'network.services.lb.total.connections', - 4, 'cumulative') - - def test_lb_active_connections(self): - self._check_get_samples(lbaas.LBActiveConnectionsPollster, - 'network.services.lb.active.connections', - 2, 'gauge') - - def test_lb_incoming_bytes(self): - self._check_get_samples(lbaas.LBBytesInPollster, - 'network.services.lb.incoming.bytes', - 1, 'gauge') - - def test_lb_outgoing_bytes(self): - self._check_get_samples(lbaas.LBBytesOutPollster, - 'network.services.lb.outgoing.bytes', - 3, 'gauge') diff --git a/ceilometer/tests/unit/network/services/test_lbaas_v2.py b/ceilometer/tests/unit/network/services/test_lbaas_v2.py deleted file mode 100644 index 42fc73ca..00000000 --- a/ceilometer/tests/unit/network/services/test_lbaas_v2.py +++ /dev/null @@ -1,303 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import cfg -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import lbaas - - -class _BaseTestLBPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestLBPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestLBListenerPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBListenerPollster, self).setUp() - self.pollster = lbaas.LBListenerPollster() - self.pollster.lb_version = 'v2' - fake_listeners = self.fake_list_listeners() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_listener', - return_value=fake_listeners)) - - @staticmethod - def fake_list_listeners(): - return [{'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'a9729389-6147-41a3-ab22-a24aed8692b2'}], - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': 'mylistener_online', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'ONLINE'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a'}], - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylistener_offline', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'OFFLINE'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'mylistener_error', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'ERROR'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'mylistener_pending_create', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'PENDING_CREATE'} - ] - - def test_listener_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_list_listeners()[0][field], - samples[0].resource_metadata[field]) - - def test_listener_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(4, samples[2].volume) - - def test_list_listener_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(set(['network.services.lb.listener']), - set([s.name for s in samples])) - - def test_listener_discovery(self): - discovered_listeners = discovery.LBListenersDiscovery().discover( - self.manager) - self.assertEqual(4, len(discovered_listeners)) - for listener in self.fake_list_listeners(): - if listener['operating_status'] == 'pending_create': - self.assertNotIn(listener, discovered_listeners) - else: - self.assertIn(listener, discovered_listeners) - - -class TestLBLoadBalancerPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBLoadBalancerPollster, self).setUp() - self.pollster = lbaas.LBLoadBalancerPollster() - self.pollster.lb_version = 'v2' - fake_loadbalancers = self.fake_list_loadbalancers() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_loadbalancer', - return_value=fake_loadbalancers)) - - @staticmethod - def fake_list_loadbalancers(): - return [{'operating_status': 'ONLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'ACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'vip_address': '10.0.0.2', - 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_online'}, - {'operating_status': 'OFFLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a'}], - 'vip_address': '10.0.0.3', - 'vip_subnet_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'loadbalancer_offline'}, - {'operating_status': 'ERROR', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d8b'}], - 'vip_address': '10.0.0.4', - 'vip_subnet_id': '213d3059-87a4-45a5-91e9-d721068df0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_error'}, - {'operating_status': 'PENDING_CREATE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d4ed7c'}], - 'vip_address': '10.0.0.5', - 'vip_subnet_id': '123d3059-87a4-45a5-91e9-d721068ae0c3', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395763b2', - 'name': 'loadbalancer_pending_create'} - ] - - def test_loadbalancer_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_list_loadbalancers()[0][field], - samples[0].resource_metadata[field]) - - def test_loadbalancer_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(4, samples[2].volume) - - def test_list_loadbalancer_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(set(['network.services.lb.loadbalancer']), - set([s.name for s in samples])) - - def test_loadbalancer_discovery(self): - discovered_loadbalancers = \ - discovery.LBLoadBalancersDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_loadbalancers)) - for loadbalancer in self.fake_list_loadbalancers(): - if loadbalancer['operating_status'] == 'pending_create': - self.assertNotIn(loadbalancer, discovered_loadbalancers) - else: - self.assertIn(loadbalancer, discovered_loadbalancers) - - -class TestLBStatsPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBStatsPollster, self).setUp() - fake_balancer_stats = self.fake_balancer_stats() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'get_loadbalancer_stats', - return_value=fake_balancer_stats)) - - fake_loadbalancers = self.fake_list_loadbalancers() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_loadbalancer', - return_value=fake_loadbalancers)) - cfg.CONF.set_override('neutron_lbaas_version', - 'v2', - group='service_types') - - @staticmethod - def fake_list_loadbalancers(): - return [{'operating_status': 'ONLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'ACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'vip_address': '10.0.0.2', - 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_online'}, - ] - - @staticmethod - def fake_balancer_stats(): - return {'active_connections': 2, - 'bytes_in': 1, - 'bytes_out': 3, - 'total_connections': 4} - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, sample_name, expected_volume, - expected_type): - pollster = factory() - - cache = {} - samples = list(pollster.get_samples(self.manager, cache, - self.fake_list_loadbalancers())) - self.assertEqual(1, len(samples)) - self.assertIsNotNone(samples) - self.assertIn('lbstats', cache) - self.assertEqual(set([sample_name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == sample_name] - self.assertEqual(1, len(match), 'missing counter %s' % sample_name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(expected_type, match[0].type) - - def test_lb_total_connections(self): - self._check_get_samples(lbaas.LBTotalConnectionsPollster, - 'network.services.lb.total.connections', - 4, 'cumulative') - - def test_lb_active_connections(self): - self._check_get_samples(lbaas.LBActiveConnectionsPollster, - 'network.services.lb.active.connections', - 2, 'gauge') - - def test_lb_incoming_bytes(self): - self._check_get_samples(lbaas.LBBytesInPollster, - 'network.services.lb.incoming.bytes', - 1, 'gauge') - - def test_lb_outgoing_bytes(self): - self._check_get_samples(lbaas.LBBytesOutPollster, - 'network.services.lb.outgoing.bytes', - 3, 'gauge') diff --git a/ceilometer/tests/unit/network/services/test_vpnaas.py b/ceilometer/tests/unit/network/services/test_vpnaas.py deleted file mode 100644 index 399ff225..00000000 --- a/ceilometer/tests/unit/network/services/test_vpnaas.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import vpnaas - - -class _BaseTestVPNPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestVPNPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestVPNServicesPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestVPNServicesPollster, self).setUp() - self.pollster = vpnaas.VPNServicesPollster() - fake_vpn = self.fake_get_vpn_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vpn_get_all', - return_value=fake_vpn)) - - @staticmethod - def fake_get_vpn_service(): - return [{'status': 'ACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'INACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'PENDING_CREATE', - 'name': 'myvpn', - 'description': '', - 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'error', - 'name': 'myvpn', - 'description': '', - 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': False, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - ] - - def test_vpn_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vpn_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(set(['network.services.vpn']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_vpns = discovery.VPNServicesDiscovery().discover( - self.manager) - self.assertEqual(3, len(discovered_vpns)) - - for vpn in self.fake_get_vpn_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_vpns) - else: - self.assertIn(vpn, discovered_vpns) - - -class TestIPSecConnectionsPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = vpnaas.IPSecConnectionsPollster() - fake_conns = self.fake_get_ipsec_connections() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'ipsec_site_connections_get_all', - return_value=fake_conns)) - - @staticmethod - def fake_get_ipsec_connections(): - return [{'name': 'connection1', - 'description': 'Remote-connection1', - 'peer_address': '192.168.1.10', - 'peer_id': '192.168.1.10', - 'peer_cidrs': ['192.168.2.0/24', - '192.168.3.0/24'], - 'mtu': 1500, - 'psk': 'abcd', - 'initiator': 'bi-directional', - 'dpd': { - 'action': 'hold', - 'interval': 30, - 'timeout': 120}, - 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', - 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', - 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', - 'admin_state_up': True, - 'status': 'ACTIVE', - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_conns_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_ipsec_connections()[0][field], - samples[0].resource_metadata[field]) - - def test_get_conns_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(set(['network.services.vpn.connections']), - set([s.name for s in samples])) - - def test_conns_discovery(self): - discovered_conns = discovery.IPSecConnectionsDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_conns)) - self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) diff --git a/ceilometer/tests/unit/network/statistics/__init__.py b/ceilometer/tests/unit/network/statistics/__init__.py deleted file mode 100644 index 8602c6a8..00000000 --- a/ceilometer/tests/unit/network/statistics/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class _PollsterTestBase(base.BaseTestCase): - - def _test_pollster(self, pollster_class, meter_name, - meter_type, meter_unit): - - pollster = pollster_class() - - self.assertEqual(pollster.meter_name, meter_name) - self.assertEqual(pollster.meter_type, meter_type) - self.assertEqual(pollster.meter_unit, meter_unit) diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py b/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py b/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py deleted file mode 100644 index fdee69c9..00000000 --- a/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as config_fixture -from oslotest import base - -from ceilometer.network.statistics.opencontrail import client -from ceilometer import service as ceilometer_service - - -class TestOpencontrailClient(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailClient, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) - - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - self.get_resp.raw.version = 1.1 - self.get_resp.status_code = 200 - self.get_resp.reason = 'OK' - self.get_resp.content = '' - - def test_vm_statistics(self): - self.client.networks.get_vm_statistics('bbb') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa'} - self.assertEqual(expected_data, data) - - def test_vm_statistics_params(self): - self.client.networks.get_vm_statistics('bbb', - {'resource': 'fip_stats_list', - 'virtual_network': 'ccc'}) - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa', - 'resource': 'fip_stats_list', - 'virtual_network': 'ccc'} - self.assertEqual(expected_data, data) diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py b/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py deleted file mode 100644 index c308f446..00000000 --- a/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics.opencontrail import driver - - -class TestOpencontrailDriver(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailDriver, self).setUp() - - self.nc_ports = mock.patch('ceilometer.neutron_client' - '.Client.port_get_all', - return_value=self.fake_ports()) - self.nc_ports.start() - - self.driver = driver.OpencontrailDriver() - self.parse_url = urlparse.ParseResult('opencontrail', - '127.0.0.1:8143', - '/', None, None, None) - self.params = {'password': ['admin'], - 'scheme': ['http'], - 'username': ['admin'], - 'verify_ssl': ['false'], - 'resource': ['if_stats_list']} - - @staticmethod - def fake_ports(): - return [{'admin_state_up': True, - 'device_owner': 'compute:None', - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] - - @staticmethod - def fake_port_stats(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], - "fip_stats_list": [{ - "in_bytes": 33, - "iface_name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442"), - "out_bytes": 44, - "out_pkts": 10, - "virtual_network": "default-domain:openstack:public", - "in_pkts": 11, - "ip_address": "1.1.1.1" - }] - }}}]} - - @staticmethod - def fake_port_stats_with_node(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [ - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], 'node1'], - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 4, - "in_pkts": 13, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], - 'node2'] - ] - }}}]} - - def _test_meter(self, meter_name, expected, fake_port_stats=None): - if not fake_port_stats: - fake_port_stats = self.fake_port_stats() - with mock.patch('ceilometer.network.' - 'statistics.opencontrail.' - 'client.NetworksAPIClient.' - 'get_vm_statistics', - return_value=fake_port_stats) as port_stats: - - samples = self.driver.get_sample_data(meter_name, self.parse_url, - self.params, {}) - - self.assertEqual(expected, [s for s in samples]) - - port_stats.assert_called_with('*') - - def test_switch_port_receive_packets_with_node(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},), - (13, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected, - self.fake_port_stats_with_node()) - - def test_switch_port_receive_packets(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets(self): - expected = [(5, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes(self): - expected = [(23, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes(self): - expected = [(22, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_receive_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(11, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(10, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(33, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(44, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_transmit_bytes_non_existing_network(self): - self.params['virtual_network'] = ['aaa'] - self.params['resource'] = ['fip_stats_list'] - self._test_meter('switch.port.transmit.bytes', []) diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py b/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py b/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py deleted file mode 100644 index 7b2250ee..00000000 --- a/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as config_fixture -from oslotest import base -from requests import auth as req_auth -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer.network.statistics.opendaylight import client -from ceilometer import service as ceilometer_service - - -class TestClientHTTPBasicAuth(base.BaseTestCase): - - auth_way = 'basic' - scheme = 'http' - - def setUp(self): - super(TestClientHTTPBasicAuth, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.parsed_url = urlparse.urlparse( - 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' - 'container_name=egg&auth=%s&user=admin&password=admin_pass&' - 'scheme=%s' % (self.auth_way, self.scheme)) - self.params = urlparse.parse_qs(self.parsed_url.query) - self.endpoint = urlparse.urlunparse( - urlparse.ParseResult(self.scheme, - self.parsed_url.netloc, - self.parsed_url.path, - None, None, None)) - odl_params = {'auth': self.params.get('auth')[0], - 'user': self.params.get('user')[0], - 'password': self.params.get('password')[0]} - self.client = client.Client(self.endpoint, odl_params) - - self.resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.resp).start() - - self.resp.raw.version = 1.1 - self.resp.status_code = 200 - self.resp.reason = 'OK' - self.resp.headers = {} - self.resp.content = 'dummy' - - def _test_request(self, method, url): - data = method('default') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - # check url - real_url = url % {'container_name': 'default', - 'scheme': self.scheme} - self.assertEqual(real_url, call_args[0]) - - # check auth parameters - auth = call_kwargs.get('auth') - if self.auth_way == 'digest': - self.assertIsInstance(auth, req_auth.HTTPDigestAuth) - else: - self.assertIsInstance(auth, req_auth.HTTPBasicAuth) - self.assertEqual('admin', auth.username) - self.assertEqual('admin_pass', auth.password) - - # check header - self.assertEqual( - {'Accept': 'application/json'}, - call_kwargs['headers']) - - # check return value - self.assertEqual(self.get().json(), data) - - def test_flow_statistics(self): - self._test_request( - self.client.statistics.get_flow_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/flow') - - def test_port_statistics(self): - self._test_request( - self.client.statistics.get_port_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/port') - - def test_table_statistics(self): - self._test_request( - self.client.statistics.get_table_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/table') - - def test_topology(self): - self._test_request( - self.client.topology.get_topology, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s') - - def test_user_links(self): - self._test_request( - self.client.topology.get_user_links, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s/userLinks') - - def test_switch(self): - self._test_request( - self.client.switch_manager.get_nodes, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/switchmanager/%(container_name)s/nodes') - - def test_active_hosts(self): - self._test_request( - self.client.host_tracker.get_active_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/active') - - def test_inactive_hosts(self): - self._test_request( - self.client.host_tracker.get_inactive_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/inactive') - - def test_http_error(self): - self.resp.status_code = 404 - self.resp.reason = 'Not Found' - - try: - self.client.statistics.get_flow_statistics('default') - self.fail('') - except client.OpenDaylightRESTAPIFailed as e: - self.assertEqual( - _('OpenDaylitght API returned %(status)s %(reason)s') % - {'status': self.resp.status_code, - 'reason': self.resp.reason}, - six.text_type(e)) - - def test_other_error(self): - - class _Exception(Exception): - pass - - self.get = mock.patch('requests.get', - side_effect=_Exception).start() - - self.assertRaises(_Exception, - self.client.statistics.get_flow_statistics, - 'default') - - -class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): - - auth_way = 'digest' - - -class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): - - scheme = 'https' - - -class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): - - scheme = 'https' diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py b/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py deleted file mode 100644 index 891b3a1d..00000000 --- a/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import mock -from oslotest import base -import six -from six import moves -from six.moves.urllib import parse as url_parse - -from ceilometer.network.statistics.opendaylight import driver - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractproperty - def flow_data(self): - pass - - @abc.abstractproperty - def port_data(self): - pass - - @abc.abstractproperty - def table_data(self): - pass - - @abc.abstractproperty - def topology_data(self): - pass - - @abc.abstractproperty - def switch_data(self): - pass - - @abc.abstractproperty - def user_links_data(self): - pass - - @abc.abstractproperty - def active_hosts_data(self): - pass - - @abc.abstractproperty - def inactive_hosts_data(self): - pass - - fake_odl_url = url_parse.ParseResult('opendaylight', - 'localhost:8080', - 'controller/nb/v2', - None, - None, - None) - - fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=default&auth=basic') - - fake_params_multi_container = ( - url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=first&container_name=second&' - 'auth=basic')) - - def setUp(self): - super(_Base, self).setUp() - self.addCleanup(mock.patch.stopall) - - self.driver = driver.OpenDayLightDriver() - - self.get_flow_statistics = mock.patch( - 'ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - return_value=self.flow_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_table_statistics', - return_value=self.table_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_port_statistics', - return_value=self.port_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_topology', - return_value=self.topology_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_user_links', - return_value=self.user_links_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'SwitchManagerAPIClient.get_nodes', - return_value=self.switch_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_active_hosts', - return_value=self.active_hosts_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_inactive_hosts', - return_value=self.inactive_hosts_data).start() - - def _test_for_meter(self, meter_name, expected_data): - sample_data = self.driver.get_sample_data(meter_name, - self.fake_odl_url, - self.fake_params, - {}) - - for sample, expected in moves.zip(sample_data, expected_data): - self.assertEqual(expected[0], sample[0]) # check volume - self.assertEqual(expected[1], sample[1]) # check resource id - self.assertEqual(expected[2], sample[2]) # check resource metadata - - -class TestOpenDayLightDriverSpecial(_Base): - - flow_data = {"flowStatistics": []} - port_data = {"portStatistics": []} - table_data = {"tableStatistics": []} - topology_data = {"edgeProperties": []} - switch_data = {"nodeProperties": []} - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_not_implemented_meter(self): - sample_data = self.driver.get_sample_data('egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - sample_data = self.driver.get_sample_data('switch.table.egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - def test_cache(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(1, self.get_flow_statistics.call_count) - - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - def test_multi_container(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('first', odl_data) - self.assertIn('second', odl_data) - - def test_http_error(self): - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=Exception()).start() - - sample_data = self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - {}) - - self.assertEqual(0, len(sample_data)) - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=[Exception(), self.flow_data]).start() - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('second', odl_data) - - -class TestOpenDayLightDriverSimple(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - ] - } - ] - } - topology_data = {"edgeProperties": []} - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - ] - } - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_timeStamp_connectedSince": "1377291227877" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) - - -class TestOpenDayLightDriverComplex(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.2" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "89", - "durationNanoseconds": "200000", - "durationSeconds": "5648", - "packetCount": "30", - "tableId": "1" - } - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12740", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "182", - "transmitBytes": "12110", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "173" - }, - { - "nodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12180", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "174", - "transmitBytes": "12670", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "181" - }, - { - "nodeConnector": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - } - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - { - "activeCount": "20", - "lookupCount": "10", - "matchedCount": "5", - "nodeTable": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - } - ] - } - ] - } - topology_data = { - "edgeProperties": [ - { - "edge": { - "headNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "bandwidth": { - "value": 10000000000 - }, - "config": { - "value": 1 - }, - "name": { - "value": "s2-eth3" - }, - "state": { - "value": 1 - }, - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - }, - { - "edge": { - "headNodeConnector": { - "id": "5", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:04", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - } - ] - } - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "buffers": { - "value": "256" - }, - "capabilities": { - "value": "199" - }, - "description": { - "value": "None" - }, - "macAddress": { - "value": "00:00:00:00:00:02" - }, - "tables": { - "value": "-1" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - { - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "properties": { - "actions": { - "value": "1024" - }, - "buffers": { - "value": "512" - }, - "capabilities": { - "value": "1000" - }, - "description": { - "value": "Foo Bar" - }, - "macAddress": { - "value": "00:00:00:00:00:03" - }, - "tables": { - "value": "10" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291228000" - } - } - } - ] - } - user_links_data = { - "userLinks": [ - { - "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", - "name": "link1", - "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", - "status": "Success" - } - ] - } - active_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:00:01:01", - "networkAddress": "1.1.1.1", - "nodeConnectorId": "9", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:00:02:02", - "networkAddress": "2.2.2.2", - "nodeConnectorId": "1", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "true", - "vlan": "0" - } - ] - } - inactive_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:01:01:01", - "networkAddress": "1.1.1.3", - "nodeConnectorId": "8", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:01:02:02", - "networkAddress": "2.2.2.4", - "nodeConnectorId": "0", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "false", - "vlan": "1" - } - ] - } - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_buffers": "256", - "properties_capabilities": "199", - "properties_description": "None", - "properties_macAddress": "00:00:00:00:00:02", - "properties_tables": "-1", - "properties_timeStamp_connectedSince": "1377291227877" - }), - (1, "00:00:00:00:00:00:00:03", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "1024", - "properties_buffers": "512", - "properties_capabilities": "1000", - "properties_description": "Foo Bar", - "properties_macAddress": "00:00:00:00:00:03", - "properties_tables": "10", - "properties_timeStamp_connectedSince": "1377291228000" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3', - 'user_link_node_id': '00:00:00:00:00:00:00:05', - 'user_link_node_port': '5', - 'user_link_status': 'Success', - 'user_link_name': 'link1', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2', - 'topology_node_id': '00:00:00:00:00:00:00:03', - 'topology_node_port': '2', - "topology_bandwidth": 10000000000, - "topology_config": 1, - "topology_name": "s2-eth3", - "topology_state": 1, - "topology_timeStamp_creation": 1379527162648 - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1', - 'host_status': 'active', - 'host_dataLayerAddress': '00:00:00:00:02:02', - 'host_networkAddress': '2.2.2.2', - 'host_staticHost': 'true', - 'host_vlan': '0', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0', - 'host_status': 'inactive', - 'host_dataLayerAddress': '00:00:00:01:02:02', - 'host_networkAddress': '2.2.2.4', - 'host_staticHost': 'false', - 'host_vlan': '1', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (182, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (174, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (173, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (181, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12740, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12180, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12110, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12670, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (20, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (10, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (5, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (5648, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (200000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (30, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (89, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) diff --git a/ceilometer/tests/unit/network/statistics/test_driver.py b/ceilometer/tests/unit/network/statistics/test_driver.py deleted file mode 100644 index 9964b7f4..00000000 --- a/ceilometer/tests/unit/network/statistics/test_driver.py +++ /dev/null @@ -1,37 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.network.statistics import driver - - -class TestDriver(base.BaseTestCase): - - @staticmethod - def test_driver_ok(): - - class OkDriver(driver.Driver): - - def get_sample_data(self, meter_name, resources, cache): - pass - - OkDriver() - - def test_driver_ng(self): - - class NgDriver(driver.Driver): - """get_sample_data method is lost.""" - - self.assertRaises(TypeError, NgDriver) diff --git a/ceilometer/tests/unit/network/statistics/test_flow.py b/ceilometer/tests/unit/network/statistics/test_flow.py deleted file mode 100644 index e25b559f..00000000 --- a/ceilometer/tests/unit/network/statistics/test_flow.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import flow -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestFlowPollsters(statistics._PollsterTestBase): - - def test_flow_pollster(self): - self._test_pollster( - flow.FlowPollster, - 'switch.flow', - sample.TYPE_GAUGE, - 'flow') - - def test_flow_pollster_duration_seconds(self): - self._test_pollster( - flow.FlowPollsterDurationSeconds, - 'switch.flow.duration_seconds', - sample.TYPE_GAUGE, - 's') - - def test_flow_pollster_duration_nanoseconds(self): - self._test_pollster( - flow.FlowPollsterDurationNanoseconds, - 'switch.flow.duration_nanoseconds', - sample.TYPE_GAUGE, - 'ns') - - def test_flow_pollster_packets(self): - self._test_pollster( - flow.FlowPollsterPackets, - 'switch.flow.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_flow_pollster_bytes(self): - self._test_pollster( - flow.FlowPollsterBytes, - 'switch.flow.bytes', - sample.TYPE_CUMULATIVE, - 'B') diff --git a/ceilometer/tests/unit/network/statistics/test_port.py b/ceilometer/tests/unit/network/statistics/test_port.py deleted file mode 100644 index d05f9127..00000000 --- a/ceilometer/tests/unit/network/statistics/test_port.py +++ /dev/null @@ -1,112 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import port -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestPortPollsters(statistics._PollsterTestBase): - - def test_port_pollster(self): - self._test_pollster( - port.PortPollster, - 'switch.port', - sample.TYPE_GAUGE, - 'port') - - def test_port_pollster_receive_packets(self): - self._test_pollster( - port.PortPollsterReceivePackets, - 'switch.port.receive.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_packets(self): - self._test_pollster( - port.PortPollsterTransmitPackets, - 'switch.port.transmit.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_bytes(self): - self._test_pollster( - port.PortPollsterReceiveBytes, - 'switch.port.receive.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_transmit_bytes(self): - self._test_pollster( - port.PortPollsterTransmitBytes, - 'switch.port.transmit.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_receive_drops(self): - self._test_pollster( - port.PortPollsterReceiveDrops, - 'switch.port.receive.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_drops(self): - self._test_pollster( - port.PortPollsterTransmitDrops, - 'switch.port.transmit.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_errors(self): - self._test_pollster( - port.PortPollsterReceiveErrors, - 'switch.port.receive.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_errors(self): - self._test_pollster( - port.PortPollsterTransmitErrors, - 'switch.port.transmit.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_frame_errors(self): - self._test_pollster( - port.PortPollsterReceiveFrameErrors, - 'switch.port.receive.frame_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_overrun_errors(self): - self._test_pollster( - port.PortPollsterReceiveOverrunErrors, - 'switch.port.receive.overrun_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_crc_errors(self): - self._test_pollster( - port.PortPollsterReceiveCRCErrors, - 'switch.port.receive.crc_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_collision_count(self): - self._test_pollster( - port.PortPollsterCollisionCount, - 'switch.port.collision.count', - sample.TYPE_CUMULATIVE, - 'packet') diff --git a/ceilometer/tests/unit/network/statistics/test_statistics.py b/ceilometer/tests/unit/network/statistics/test_statistics.py deleted file mode 100644 index 5afdd790..00000000 --- a/ceilometer/tests/unit/network/statistics/test_statistics.py +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.network import statistics -from ceilometer.network.statistics import driver -from ceilometer import sample - - -class TestBase(base.BaseTestCase): - - @staticmethod - def test_subclass_ok(): - - class OkSubclass(statistics._Base): - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - OkSubclass() - - def test_subclass_ng(self): - - class NgSubclass1(statistics._Base): - """meter_name is lost.""" - - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - class NgSubclass2(statistics._Base): - """meter_type is lost.""" - - meter_name = 'foo' - meter_unit = 'B' - - class NgSubclass3(statistics._Base): - """meter_unit is lost.""" - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - - self.assertRaises(TypeError, NgSubclass1) - self.assertRaises(TypeError, NgSubclass2) - self.assertRaises(TypeError, NgSubclass3) - - -class TestBaseGetSamples(base.BaseTestCase): - - def setUp(self): - super(TestBaseGetSamples, self).setUp() - - class FakePollster(statistics._Base): - meter_name = 'foo' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'bar' - - self.pollster = FakePollster() - - def tearDown(self): - statistics._Base.drivers = {} - super(TestBaseGetSamples, self).tearDown() - - @staticmethod - def _setup_ext_mgr(**drivers): - statistics._Base.drivers = drivers - - def _make_fake_driver(self, *return_values): - class FakeDriver(driver.Driver): - - def __init__(self): - self.index = 0 - - def get_sample_data(self, meter_name, parse_url, params, cache): - if self.index >= len(return_values): - yield None - retval = return_values[self.index] - self.index += 1 - yield retval - return FakeDriver - - @staticmethod - def _make_timestamps(count): - now = timeutils.utcnow() - return [(now + datetime.timedelta(seconds=i)).isoformat() - for i in range(count)] - - def _get_samples(self, *resources): - - return [v for v in self.pollster.get_samples(self, {}, resources)] - - def _assert_sample(self, s, volume, resource_id, resource_metadata): - self.assertEqual('foo', s.name) - self.assertEqual(sample.TYPE_CUMULATIVE, s.type) - self.assertEqual('bar', s.unit) - self.assertEqual(volume, s.volume) - self.assertIsNone(s.user_id) - self.assertIsNone(s.project_id) - self.assertEqual(resource_id, s.resource_id) - self.assertEqual(resource_metadata, s.resource_metadata) - - def test_get_samples_one_driver_one_resource(self): - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None)) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - - def test_get_samples_one_driver_two_resource(self): - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None), - (3, 'c', None)) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo', 'http://bar') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - self._assert_sample(samples[1], 2, 'b', {}) - - def test_get_samples_two_driver_one_resource(self): - fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None)) - - fake_driver2 = self._make_fake_driver((11, 'A', None), - (12, 'B', None)) - - self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - - def test_get_samples_multi_samples(self): - fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'},), - (2, 'b', None)]) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - self._assert_sample(samples[1], 2, 'b', {}) - - def test_get_samples_return_none(self): - fake_driver = self._make_fake_driver(None) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(0, len(samples)) - - def test_get_samples_return_no_generator(self): - class NoneFakeDriver(driver.Driver): - - def get_sample_data(self, meter_name, parse_url, params, cache): - return None - - self._setup_ext_mgr(http=NoneFakeDriver()) - samples = self._get_samples('http://foo') - self.assertFalse(samples) diff --git a/ceilometer/tests/unit/network/statistics/test_switch.py b/ceilometer/tests/unit/network/statistics/test_switch.py deleted file mode 100644 index c532a3f0..00000000 --- a/ceilometer/tests/unit/network/statistics/test_switch.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import switch -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestSwitchPollster(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - switch.SWPollster, - 'switch', - sample.TYPE_GAUGE, - 'switch') diff --git a/ceilometer/tests/unit/network/statistics/test_table.py b/ceilometer/tests/unit/network/statistics/test_table.py deleted file mode 100644 index 533e2a61..00000000 --- a/ceilometer/tests/unit/network/statistics/test_table.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import table -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestTablePollsters(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - table.TablePollster, - 'switch.table', - sample.TYPE_GAUGE, - 'table') - - def test_table_pollster_active_entries(self): - self._test_pollster( - table.TablePollsterActiveEntries, - 'switch.table.active.entries', - sample.TYPE_GAUGE, - 'entry') - - def test_table_pollster_lookup_packets(self): - self._test_pollster( - table.TablePollsterLookupPackets, - 'switch.table.lookup.packets', - sample.TYPE_GAUGE, - 'packet') - - def test_table_pollster_matched_packets(self): - self._test_pollster( - table.TablePollsterMatchedPackets, - 'switch.table.matched.packets', - sample.TYPE_GAUGE, - 'packet') diff --git a/ceilometer/tests/unit/network/test_floating_ip.py b/ceilometer/tests/unit/network/test_floating_ip.py deleted file mode 100644 index 5e8f7e72..00000000 --- a/ceilometer/tests/unit/network/test_floating_ip.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Sungard Availability Services -# Copyright 2016 Red Hat -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network import floatingip -from ceilometer.network.services import discovery - - -class _BaseTestFloatingIPPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestFloatingIPPollster, self).setUp() - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - - -class TestFloatingIPPollster(_BaseTestFloatingIPPollster): - - def setUp(self): - super(TestFloatingIPPollster, self).setUp() - self.pollster = floatingip.FloatingIPPollster() - fake_fip = self.fake_get_fip_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'fip_get_all', - return_value=fake_fip)) - - @staticmethod - def fake_get_fip_service(): - return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', - 'status': 'ACTIVE', - 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.6', - 'floating_ip_address': '65.79.162.11', - 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, - {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', - 'status': 'DOWN', - 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.7', - 'floating_ip_address': '65.79.162.12', - 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, - {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', - 'status': 'error', - 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.8', - 'floating_ip_address': '65.79.162.13', - 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}] - - def test_fip_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fip_service()[0][field], - samples[0].resource_metadata[field]) - - def test_fip_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(1, samples[0].volume) - - def test_get_fip_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(set(['ip.floating']), - set([s.name for s in samples])) - - def test_fip_discovery(self): - discovered_fips = discovery.FloatingIPDiscovery().discover( - self.manager) - self.assertEqual(3, len(discovered_fips)) diff --git a/ceilometer/tests/unit/network/test_notifications.py b/ceilometer/tests/unit/network/test_notifications.py deleted file mode 100644 index 80eda675..00000000 --- a/ceilometer/tests/unit/network/test_notifications.py +++ /dev/null @@ -1,1480 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.network.notifications -""" - -import mock - -from ceilometer.network import notifications -from ceilometer.tests import base as test - -NOTIFICATION_NETWORK_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - -NOTIFICATION_BULK_NETWORK_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 24: 56.335612', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', - u'priority': 'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 24: 56.285975', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'networks': [{u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test2', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', - u'provider: segmentation_id': None}, - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test3', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', - u'provider: segmentation_id': None}] - } -} - -NOTIFICATION_SUBNET_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'timestamp': u'2012-09-27 14:11:27.426620', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'subnet': { - u'name': u'mysubnet', - u'enable_dhcp': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'dns_nameservers': [], - u'allocation_pools': [{u'start': u'192.168.42.2', - u'end': u'192.168.42.254'}], - u'host_routes': [], - u'ip_version': 4, - u'gateway_ip': u'192.168.42.1', - u'cidr': u'192.168.42.0/24', - u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:27.214490', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} - -NOTIFICATION_BULK_SUBNET_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 47: 08.133888', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', - u'priority': u'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 47: 07.970043', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'subnets': [{u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.4.2', - u'end': u'10.0.4.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.4.1', - u'cidr': u'10.0.4.0/24', - u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, - {u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.5.2', - u'end': u'10.0.5.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.5.1', - u'cidr': u'10.0.5.0/24', - u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] - } -} - -NOTIFICATION_PORT_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'timestamp': u'2012-09-27 14:28:31.536370', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:28:31.438919', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} - -NOTIFICATION_BULK_PORT_CREATE = { - u'_context_roles': [u'_member_', - u'SwiftOperator'], - u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'_context_project_name': u'demo', - u'timestamp': u'2014-05-0909: 19: 58.317548', - u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_timestamp': u'2014-05-0909: 19: 58.160011', - u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', - u'payload': { - u'ports': [{u'status': u'DOWN', - u'name': u'port--1501135095', - u'allowed_address_pairs': [], - u'admin_state_up': True, - u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 37: 10: 39', - u'fixed_ips': [], - u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}, - {u'status': u'DOWN', - u'name': u'', - u'allowed_address_pairs': [], - u'admin_state_up': False, - u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', - u'fixed_ips': [], - u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}] - }, - u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', - u'_context_is_admin': False, - u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_tenant_name': u'demo', - u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'_context_user_name': u'demo', - u'publisher_id': u'network.os-ci-test12', - u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', - u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'priority': u'INFO' -} - -NOTIFICATION_PORT_UPDATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.update.end', - u'timestamp': u'2012-09-27 14:35:09.514052', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'bonjour', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:35:09.447682', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} - - -NOTIFICATION_NETWORK_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_ROUTER_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'router.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'router': - {'status': u'ACTIVE', - 'external_gateway_info': - {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, - 'name': u'router1', - 'admin_state_up': True, - 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', - 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'floatingip.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'floatingip': - {'router_id': None, - 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', - 'floating_network_id': - u'001400f7-1710-4245-98c3-39ba131cc39a', - 'fixed_ip_address': None, - 'floating_ip_address': u'172.24.4.227', - 'port_id': None, - 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_UPDATE_START = { - '_context_roles': [u'_member_', - u'admin', - u'heat_stack_owner'], - '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', - '_context_read_deleted': u'no', - 'event_type': u'floatingip.update.start', - '_context_user_name': u'admin', - '_context_project_name': u'admin', - 'timestamp': u'2014-05-3107: 19: 43.463101', - '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_tenant_name': u'admin', - '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', - 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', - 'priority': 'info', - '_context_is_admin': True, - '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_timestamp': u'2014-05-3107: 19: 43.460767', - '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - 'publisher_id': u'network.devstack', - 'payload': { - u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', - u'floatingip': { - u'fixed_ip_address': u'172.24.4.227', - u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' - } - } -} - - -NOTIFICATION_POOL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.create.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_POOL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.update.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - -NOTIFICATION_EMPTY_PAYLOAD = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": {}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -class TestNotifications(test.BaseTestCase): - def test_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.create", samples[1].name) - - def test_bulk_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_BULK_NETWORK_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("network", samples[0].name) - self.assertEqual("network.create", samples[1].name) - self.assertEqual("network", samples[2].name) - self.assertEqual("network.create", samples[3].name) - - def test_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("subnet.create", samples[1].name) - - def test_bulk_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("subnet", samples[0].name) - self.assertEqual("subnet.create", samples[1].name) - self.assertEqual("subnet", samples[2].name) - self.assertEqual("subnet.create", samples[3].name) - - def test_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.create", samples[1].name) - - def test_bulk_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("port", samples[0].name) - self.assertEqual("port.create", samples[1].name) - self.assertEqual("port", samples[2].name) - self.assertEqual("port.create", samples[3].name) - - def test_port_update(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.update", samples[1].name) - - def test_network_exists(self): - v = notifications.Network(mock.Mock()) - samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_router_exists(self): - v = notifications.Router(mock.Mock()) - samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_floatingip_exists(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) - self.assertEqual(1, len(samples)) - self.assertEqual("ip.floating", samples[0].name) - - def test_floatingip_update(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FLOATINGIP_UPDATE_START)) - self.assertEqual(len(samples), 2) - self.assertEqual("ip.floating", samples[0].name) - - def test_pool_create(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_create(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_create(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_create(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_create(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_create(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_create(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_create(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_create(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_create(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_create(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_pool_update(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_update(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_update(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_update(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_update(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_update(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_update(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_update(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_update(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_update(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_update(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_empty_event_payload(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_EMPTY_PAYLOAD)) - self.assertEqual(0, len(samples)) - - -class TestEventTypes(test.BaseTestCase): - - def test_network(self): - v = notifications.Network(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_subnet(self): - v = notifications.Subnet(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_port(self): - v = notifications.Port(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_router(self): - self.assertTrue(notifications.Router(mock.Mock()).event_types) - - def test_floatingip(self): - self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) - - def test_pool(self): - self.assertTrue(notifications.Pool(mock.Mock()).event_types) - - def test_vip(self): - self.assertTrue(notifications.Vip(mock.Mock()).event_types) - - def test_member(self): - self.assertTrue(notifications.Member(mock.Mock()).event_types) - - def test_health_monitor(self): - self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) - - def test_firewall(self): - self.assertTrue(notifications.Firewall(mock.Mock()).event_types) - - def test_vpnservice(self): - self.assertTrue(notifications.VPNService(mock.Mock()).event_types) - - def test_ipsec_connection(self): - self.assertTrue(notifications.IPSecSiteConnection( - mock.Mock()).event_types) - - def test_firewall_policy(self): - self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) - - def test_firewall_rule(self): - self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) - - def test_ipsec_policy(self): - self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) - - def test_ike_policy(self): - self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) diff --git a/ceilometer/tests/unit/objectstore/__init__.py b/ceilometer/tests/unit/objectstore/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/objectstore/test_rgw.py b/ceilometer/tests/unit/objectstore/test_rgw.py deleted file mode 100644 index 5e0600d5..00000000 --- a/ceilometer/tests/unit/objectstore/test_rgw.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import rgw -from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client - -bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] -bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] -bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] - -GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list1}), - ('tenant-001', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list2}), - ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, - 'buckets': bucket_list3})] - -GET_USAGE = [('tenant-000', 10), - ('tenant-001', 11), - ('tenant-002-ignored', 12)] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - self._catalog = (self._keystone.session.auth.get_access. - return_value.service_catalog) - self._catalog.url_for.return_value = 'http://foobar/endpoint' - - -class TestRgwPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('radosgw.objects', - {'factory': rgw.ObjectsPollster}), - ('radosgw.objects.size', - {'factory': rgw.ObjectsSizePollster}), - ('radosgw.objects.containers', - {'factory': rgw.ObjectsContainersPollster}), - ('radosgw.containers.objects', - {'factory': rgw.ContainersObjectsPollster}), - ('radosgw.containers.objects.size', - {'factory': rgw.ContainersSizePollster}), - ('radosgw.api.request', - {'factory': rgw.UsagePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestRgwPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': - self.ACCOUNTS = GET_BUCKETS - else: - self.ACCOUNTS = GET_USAGE - - def tearDown(self): - super(TestRgwPollster, self).tearDown() - rgw._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call rgw_clinet. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = 'get_%s' % self.pollster.METHOD - - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'http://127.0.0.1:8000/admin' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(t.id) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - mock_url_for = mock.MagicMock() - mock_url_for.return_value = '/endpoint' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff --git a/ceilometer/tests/unit/objectstore/test_rgw_client.py b/ceilometer/tests/unit/objectstore/test_rgw_client.py deleted file mode 100644 index f2d1ef60..00000000 --- a/ceilometer/tests/unit/objectstore/test_rgw_client.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock -from oslotest import base - -from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed -from ceilometer.objectstore.rgw_client import RGWAdminClient - - -RGW_ADMIN_BUCKETS = ''' -[ - { - "max_marker": "", - "ver": 2001, - "usage": { - "rgw.main": { - "size_kb_actual": 16000, - "num_objects": 1000, - "size_kb": 1000 - } - }, - "bucket": "somefoo", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176126, - "marker": "default.4126.1", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.1", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - }, - { - "max_marker": "", - "ver": 3, - "usage": { - "rgw.main": { - "size_kb_actual": 43, - "num_objects": 1, - "size_kb": 42 - } - }, - "bucket": "somefoo31", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176134, - "marker": "default.4126.5", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.5", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - } -]''' - -RGW_ADMIN_USAGE = ''' -{ "entries": [ - { "owner": "5f7fe2d5352e466f948f49341e33d107", - "buckets": [ - { "bucket": "", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}]}, - { "bucket": "foodsgh", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}]}]}], - "summary": [ - { "user": "5f7fe2d5352e466f948f49341e33d107", - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}], - "total": { "bytes_sent": 46, - "bytes_received": 238, - "ops": 7, - "successful_ops": 6}}]} -''' - -buckets_json = json.loads(RGW_ADMIN_BUCKETS) -usage_json = json.loads(RGW_ADMIN_USAGE) - - -class TestRGWAdminClient(base.BaseTestCase): - - def setUp(self): - super(TestRGWAdminClient, self).setUp() - self.client = RGWAdminClient('http://127.0.0.1:8080/admin', - 'abcde', 'secret') - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - - def test_make_request_exception(self): - self.get_resp.status_code = 403 - self.assertRaises(RGWAdminAPIFailed, self.client._make_request, - *('foo', {})) - - def test_make_request(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client._make_request('foo', []) - self.assertEqual(buckets_json, actual) - - def test_get_buckets(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client.get_bucket('foo') - bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), - RGWAdminClient.Bucket('somefoo31', 1, 42), - ] - expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, - 'buckets': bucket_list} - self.assertEqual(expected, actual) - - def test_get_usage(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = usage_json - actual = self.client.get_usage('foo') - expected = 7 - self.assertEqual(expected, actual) diff --git a/ceilometer/tests/unit/objectstore/test_swift.py b/ceilometer/tests/unit/objectstore/test_swift.py deleted file mode 100644 index 318dd874..00000000 --- a/ceilometer/tests/unit/objectstore/test_swift.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -from swiftclient import client as swift_client -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import swift - -HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, - 'x-account-bytes-used': 321321321, - 'x-account-container-count': 7, - }), - ('tenant-001', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - }), - ('tenant-002-ignored', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - })] - -GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, - 'x-account-bytes-used': 123123, - 'x-account-container-count': 2, - }, - [{'count': 10, - 'bytes': 123123, - 'name': 'my_container'}, - {'count': 0, - 'bytes': 0, - 'name': 'new_container' - }])), - ('tenant-001', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, [])), - ('tenant-002-ignored', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, []))] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.MagicMock() - self._keystone_last_exception = None - self._service_catalog = (self._keystone.session.auth. - get_access.return_value.service_catalog) - self._auth_token = (self._keystone.session.auth. - get_access.return_value.auth_token) - - -class TestSwiftPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('storage.objects', - {'factory': swift.ObjectsPollster}), - ('storage.objects.size', - {'factory': swift.ObjectsSizePollster}), - ('storage.objects.containers', - {'factory': swift.ObjectsContainersPollster}), - ('storage.containers.objects', - {'factory': swift.ContainersObjectsPollster}), - ('storage.containers.objects.size', - {'factory': swift.ContainersSizePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestSwiftPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': - self.ACCOUNTS = HEAD_ACCOUNTS - else: - self.ACCOUNTS = GET_ACCOUNTS - - def tearDown(self): - super(TestSwiftPollster, self).tearDown() - swift._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call swiftclient. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject(self.factory, '_neaten_url'): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_neaten_url(self): - test_endpoints = ['http://127.0.0.1:8080', - 'http://127.0.0.1:8080/swift'] - test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' - for test_endpoint in test_endpoints: - standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id - - url = swift._Base._neaten_url(test_endpoint, test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/v1', - test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(standard_url, test_tenant_id) - self.assertEqual(standard_url, url) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'end://point/' - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), - self.manager._auth_token) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - endpoint = 'end://point/' - mock_url_for = mock.MagicMock(return_value=endpoint) - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff --git a/ceilometer/tests/unit/publisher/__init__.py b/ceilometer/tests/unit/publisher/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/publisher/test_file.py b/ceilometer/tests/unit/publisher/test_file.py deleted file mode 100644 index 9857b1a3..00000000 --- a/ceilometer/tests/unit/publisher/test_file.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/file.py -""" - -import datetime -import logging.handlers -import os -import tempfile - -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import file -from ceilometer import sample - - -class TestFilePublisher(base.BaseTestCase): - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_file_publisher_maxbytes(self): - # Test valid configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file' % tempdir - parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' - % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([50, name, 3], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # The rotating file gets created since only allow 50 bytes. - self.assertTrue(os.path.exists('%s.1' % name)) - - def test_file_publisher(self): - # Test missing max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file_plain' % tempdir - parsed_url = netutils.urlsplit('file://%s' % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([0, name, 0], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # Test the content is corrected saved in the file - self.assertTrue(os.path.exists(name)) - with open(name, 'r') as f: - content = f.read() - for sample_item in self.test_data: - self.assertIn(sample_item.id, content) - self.assertIn(sample_item.timestamp, content) - - def test_file_publisher_invalid(self): - # Test invalid max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - parsed_url = netutils.urlsplit( - 'file://%s/log_file_bad' - '?max_bytes=yus&backup_count=5y' % tempdir) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - self.assertIsNone(publisher.publisher_logger) diff --git a/ceilometer/tests/unit/publisher/test_http.py b/ceilometer/tests/unit/publisher/test_http.py deleted file mode 100644 index 78ceb987..00000000 --- a/ceilometer/tests/unit/publisher/test_http.py +++ /dev/null @@ -1,170 +0,0 @@ -# -# Copyright 2016 IBM -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/http.py -""" - -import datetime -import mock -from oslotest import base -from requests import Session -from six.moves.urllib import parse as urlparse -import uuid - -from ceilometer.event.storage import models as event -from ceilometer.publisher import http -from ceilometer import sample - - -class TestHttpPublisher(base.BaseTestCase): - - resource_id = str(uuid.uuid4()) - - sample_data = [ - sample.Sample( - name='alpha', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='beta', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='gamma', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.now().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - event_data = [event.Event( - message_id=str(uuid.uuid4()), event_type='event_%d' % i, - generated=datetime.datetime.utcnow().isoformat(), - traits=[], raw={'payload': {'some': 'aa'}}) for i in range(0, 2)] - - empty_event_data = [event.Event( - message_id=str(uuid.uuid4()), event_type='event_%d' % i, - generated=datetime.datetime.utcnow().isoformat(), - traits=[], raw={'payload': {}}) for i in range(0, 2)] - - def test_http_publisher_config(self): - """Test publisher config parameters.""" - # invalid hostname, the given url, results in an empty hostname - parsed_url = urlparse.urlparse('http:/aaa.bb/path') - self.assertRaises(ValueError, http.HttpPublisher, - parsed_url) - - # invalid port - parsed_url = urlparse.urlparse('http://aaa:bb/path') - self.assertRaises(ValueError, http.HttpPublisher, - parsed_url) - - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - # By default, timeout and retry_count should be set to 1000 and 2 - # respectively - self.assertEqual(1, publisher.timeout) - self.assertEqual(2, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'timeout=19&max_retries=4') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(19, publisher.timeout) - self.assertEqual(4, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'timeout=19') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(19, publisher.timeout) - self.assertEqual(2, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'max_retries=6') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(1, publisher.timeout) - self.assertEqual(6, publisher.max_retries) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_samples(self, thelog): - """Test publisher post.""" - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.sample_data) - - self.assertEqual(1, m_req.call_count) - self.assertFalse(thelog.error.called) - - res.status_code = 401 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.sample_data) - - self.assertEqual(1, m_req.call_count) - self.assertTrue(thelog.error.called) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_events(self, thelog): - """Test publisher post.""" - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_events(self.event_data) - - self.assertEqual(1, m_req.call_count) - self.assertFalse(thelog.error.called) - - res.status_code = 401 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.event_data) - - self.assertEqual(1, m_req.call_count) - self.assertTrue(thelog.error.called) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_empty_data(self, thelog): - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_events(self.empty_event_data) - - self.assertEqual(0, m_req.call_count) - self.assertTrue(thelog.debug.called) diff --git a/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py b/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py deleted file mode 100644 index aec30d84..00000000 --- a/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2015 Cisco Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/kafka_broker.py -""" -import datetime -import uuid - -import mock -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import kafka_broker as kafka -from ceilometer.publisher import messaging as msg_publisher -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -@mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) -@mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', - mock.Mock()) -class TestKafkaPublisher(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_publish(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_samples(self.test_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_without_options(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_samples(self.test_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_without_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - self.assertEqual('default', publisher.policy) - - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) - self.assertEqual('default', publisher.policy) - - def test_publish_to_host_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = TypeError - self.assertRaises(msg_publisher.DeliveryFailure, - publisher.publish_samples, - self.test_data) - self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_drop_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - publisher.publish_samples(self.test_data) - self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_queue_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - publisher.publish_samples(self.test_data) - self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(1, len(publisher.local_queue)) - - def test_publish_to_down_host_with_default_queue_size(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - - for i in range(0, 2000): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual('test-976', - publisher.local_queue[0][1][0]['counter_name']) - self.assertEqual('test-1999', - publisher.local_queue[1023][1][0]['counter_name']) - - def test_publish_to_host_from_down_to_up_with_queue(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - for i in range(0, 16): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(self.test_data) - - self.assertEqual(16, len(publisher.local_queue)) - - fake_producer.send_messages.side_effect = None - for s in self.test_data: - s.name = 'test-%d' % 16 - publisher.publish_samples(self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_event_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_events(self.test_event_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - self.assertRaises(msg_publisher.DeliveryFailure, - publisher.publish_events, - self.test_event_data) - self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) diff --git a/ceilometer/tests/unit/publisher/test_messaging_publisher.py b/ceilometer/tests/unit/publisher/test_messaging_publisher.py deleted file mode 100644 index 203424e2..00000000 --- a/ceilometer/tests/unit/publisher/test_messaging_publisher.py +++ /dev/null @@ -1,290 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/messaging.py -""" -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslo_utils import netutils -import testscenarios.testcase - -from ceilometer.event.storage import models as event -from ceilometer.publisher import messaging as msg_publisher -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class BasePublisherTestCase(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_sample_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def setUp(self): - super(BasePublisherTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - -class NotifierOnlyPublisherTest(BasePublisherTestCase): - - @mock.patch('oslo_messaging.Notifier') - def test_publish_topic_override(self, notifier): - msg_publisher.SampleNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_topic')) - notifier.assert_called_with(mock.ANY, topics=['custom_topic'], - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - msg_publisher.EventNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_event_topic')) - notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'], - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - -class TestPublisher(testscenarios.testcase.WithScenarios, - BasePublisherTestCase): - scenarios = [ - ('notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.SampleNotifierPublisher, - test_data=BasePublisherTestCase.test_sample_data, - pub_func='publish_samples', attr='source')), - ('event_notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.EventNotifierPublisher, - test_data=BasePublisherTestCase.test_event_data, - pub_func='publish_events', attr='event_type')), - ] - - def setUp(self): - super(TestPublisher, self).setUp() - self.topic = (self.CONF.publisher_notifier.event_topic - if self.pub_func == 'publish_events' else - self.CONF.publisher_notifier.metering_topic) - - -class TestPublisherPolicy(TestPublisher): - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_no_policy(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_block(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=default' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_incorrect(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=notexist' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.warning.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - -@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) -class TestPublisherPolicyReactions(TestPublisher): - - def test_published_with_policy_drop_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=drop' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - - getattr(publisher, self.pub_func)(self.test_data) - self.assertEqual(1, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down_up(self): - self.rpc_unreachable = True - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(1, len(publisher.local_queue)) - - fake_send.side_effect = mock.MagicMock() - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(0, len(publisher.local_queue)) - - topic = self.topic - expected = [mock.call(topic, mock.ANY), - mock.call(topic, mock.ANY), - mock.call(topic, mock.ANY)] - self.assertEqual(expected, fake_send.mock_calls) - - def test_published_with_policy_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls(netutils.urlsplit( - '%s://?policy=queue&max_queue_length=3' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 5): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(3, len(publisher.local_queue)) - self.assertEqual( - 'test-2', - publisher.local_queue[0][1][0][self.attr] - ) - self.assertEqual( - 'test-3', - publisher.local_queue[1][1][0][self.attr] - ) - self.assertEqual( - 'test-4', - publisher.local_queue[2][1][0][self.attr] - ) - - def test_published_with_policy_default_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 2000): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual( - 'test-976', - publisher.local_queue[0][1][0][self.attr] - ) - self.assertEqual( - 'test-1999', - publisher.local_queue[1023][1][0][self.attr] - ) diff --git a/ceilometer/tests/unit/publisher/test_udp.py b/ceilometer/tests/unit/publisher/test_udp.py deleted file mode 100644 index ada303eb..00000000 --- a/ceilometer/tests/unit/publisher/test_udp.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/udp.py -""" - -import datetime -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import udp -from ceilometer.publisher import utils -from ceilometer import sample - - -COUNTER_SOURCE = 'testsource' - - -class TestUDPPublisher(base.BaseTestCase): - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - ] - - @staticmethod - def _make_fake_socket(published): - def _fake_socket_socket(family, type): - def record_data(msg, dest): - published.append((msg, dest)) - - udp_socket = mock.Mock() - udp_socket.sendto = record_data - return udp_socket - - return _fake_socket_socket - - def setUp(self): - super(TestUDPPublisher, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.publisher.telemetry_secret = 'not-so-secret' - - def _check_udp_socket(self, url, expected_addr_family): - with mock.patch.object(socket, 'socket') as mock_socket: - udp.UDPPublisher(netutils.urlsplit(url)) - mock_socket.assert_called_with(expected_addr_family, - socket.SOCK_DGRAM) - - def test_publisher_udp_socket_ipv4(self): - self._check_udp_socket('udp://127.0.0.1:4952', - socket.AF_INET) - - def test_publisher_udp_socket_ipv6(self): - self._check_udp_socket('udp://[::1]:4952', - socket.AF_INET6) - - def test_published(self): - self.data_sent = [] - with mock.patch('socket.socket', - self._make_fake_socket(self.data_sent)): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://somehost')) - publisher.publish_samples(self.test_data) - - self.assertEqual(5, len(self.data_sent)) - - sent_counters = [] - - for data, dest in self.data_sent: - counter = msgpack.loads(data, encoding="utf-8") - sent_counters.append(counter) - - # Check destination - self.assertEqual(('somehost', - self.CONF.collector.udp_port), dest) - - # Check that counters are equal - def sort_func(counter): - return counter['counter_name'] - - counters = [utils.meter_message_from_counter(d, "not-so-secret") - for d in self.test_data] - counters.sort(key=sort_func) - sent_counters.sort(key=sort_func) - self.assertEqual(counters, sent_counters) - - @staticmethod - def _raise_ioerror(*args): - raise IOError - - def _make_broken_socket(self, family, type): - udp_socket = mock.Mock() - udp_socket.sendto = self._raise_ioerror - return udp_socket - - def test_publish_error(self): - with mock.patch('socket.socket', - self._make_broken_socket): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://localhost')) - publisher.publish_samples(self.test_data) diff --git a/ceilometer/tests/unit/publisher/test_utils.py b/ceilometer/tests/unit/publisher/test_utils.py deleted file mode 100644 index 5b5f6736..00000000 --- a/ceilometer/tests/unit/publisher/test_utils.py +++ /dev/null @@ -1,135 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/utils.py -""" -from oslo_serialization import jsonutils -from oslotest import base - -from ceilometer.publisher import utils - - -class TestSignature(base.BaseTestCase): - def test_compute_signature_change_key(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_change_value(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_same(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - sig2 = utils.compute_signature(data, 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_use_configured_secret(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - sig2 = utils.compute_signature(data, 'different-value') - self.assertNotEqual(sig1, sig2) - - def test_verify_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unsigned(self): - data = {'a': 'A', 'b': 'B'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_incorrect(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': 'Not the same'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_invalid_encoding(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': ''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unicode(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': u''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested_json(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - 'c': ('c',), - 'd': ['d'] - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_verify_unicode_symbols(self): - data = {u'a\xe9\u0437': 'A', - 'b': u'B\xe9\u0437' - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_besteffort_compare_digest(self): - hash1 = "f5ac3fe42b80b80f979825d177191bc5" - hash2 = "f5ac3fe42b80b80f979825d177191bc5" - hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" - hash4 = b"f5ac3fe42b80b80f979825d177191bc5" - hash5 = b"f5ac3fe42b80b80f979825d177191bc5" - hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" - - self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) - self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) - self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) - self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) - - def test_verify_no_secret(self): - data = {'a': 'A', 'b': 'B'} - self.assertTrue(utils.verify_signature(data, '')) diff --git a/ceilometer/tests/unit/storage/test_base.py b/ceilometer/tests/unit/storage/test_base.py deleted file mode 100644 index f6b3e989..00000000 --- a/ceilometer/tests/unit/storage/test_base.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import math - -from oslotest import base as testbase - -from ceilometer.storage import base - - -class BaseTest(testbase.BaseTestCase): - - def test_iter_period(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 1, 12, 0), - datetime.datetime(2013, 1, 1, 13, 0), - 60)) - self.assertEqual(60, len(times)) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), - datetime.datetime(2013, 1, 1, 12, 11)), times[10]) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), - datetime.datetime(2013, 1, 1, 12, 22)), times[21]) - - def test_iter_period_bis(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 2, 13, 0), - datetime.datetime(2013, 1, 2, 14, 0), - 55)) - self.assertEqual(math.ceil(3600 / 55.0), len(times)) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), - datetime.datetime(2013, 1, 2, 13, 10, 5)), - times[10]) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), - datetime.datetime(2013, 1, 2, 13, 20, 10)), - times[21]) - - def test_handle_sort_key(self): - sort_keys_meter = base._handle_sort_key('meter', 'foo') - self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) - - sort_keys_resource = base._handle_sort_key('resource', 'project_id') - self.assertEqual(['project_id', 'user_id', 'timestamp'], - sort_keys_resource) diff --git a/ceilometer/tests/unit/storage/test_get_connection.py b/ceilometer/tests/unit/storage/test_get_connection.py index 4eb094e9..4adee6dd 100644 --- a/ceilometer/tests/unit/storage/test_get_connection.py +++ b/ceilometer/tests/unit/storage/test_get_connection.py @@ -14,34 +14,27 @@ # under the License. """Tests for ceilometer/storage/ """ -import unittest import mock from oslo_config import fixture as fixture_config from oslotest import base import retrying -try: - from ceilometer.event.storage import impl_hbase as impl_hbase_event -except ImportError: - impl_hbase_event = None +from ceilometer.event.storage import impl_log +from ceilometer.event.storage import impl_sqlalchemy from ceilometer import storage -from ceilometer.storage import impl_log -from ceilometer.storage import impl_sqlalchemy import six class EngineTest(base.BaseTestCase): def test_get_connection(self): - engine = storage.get_connection('log://localhost', - 'ceilometer.metering.storage') + engine = storage.get_connection('log://localhost') self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): try: - storage.get_connection('no-such-engine://localhost', - 'ceilometer.metering.storage') + storage.get_connection('no-such-engine://localhost') except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) @@ -74,44 +67,15 @@ class ConnectionConfigTest(base.BaseTestCase): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) def test_two_urls(self): - self.CONF.set_override("connection", "log://", group="database") + self.CONF.set_override("connection", "sqlite://", group="database") + self.CONF.set_override("event_connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls(self): - self.CONF.set_override("connection", "log://", group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls_no_default(self): - self.CONF.set_override("connection", None, group="database") - self.CONF.set_override("metering_connection", "log://", - group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) def test_sqlalchemy_driver(self): self.CONF.set_override("connection", "sqlite+pysqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_sqlalchemy.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_sqlalchemy.Connection) diff --git a/ceilometer/tests/unit/storage/test_models.py b/ceilometer/tests/unit/storage/test_models.py deleted file mode 100644 index 9790d241..00000000 --- a/ceilometer/tests/unit/storage/test_models.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslotest import base as testbase -import six - -from ceilometer.event.storage import models as event_models -from ceilometer.storage import base -from ceilometer.storage import models - - -class FakeModel(base.Model): - def __init__(self, arg1, arg2): - base.Model.__init__(self, arg1=arg1, arg2=arg2) - - -class ModelTest(testbase.BaseTestCase): - - def test_create_attributes(self): - m = FakeModel(1, 2) - self.assertEqual(1, m.arg1) - self.assertEqual(2, m.arg2) - - def test_as_dict(self): - m = FakeModel(1, 2) - d = m.as_dict() - self.assertEqual({'arg1': 1, 'arg2': 2}, d) - - def test_as_dict_recursive(self): - m = FakeModel(1, FakeModel('a', 'b')) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': {'arg1': 'a', - 'arg2': 'b'}}, - d) - - def test_as_dict_recursive_list(self): - m = FakeModel(1, [FakeModel('a', 'b')]) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': [{'arg1': 'a', - 'arg2': 'b'}]}, - d) - - def test_event_repr_no_traits(self): - x = event_models.Event("1", "name", "now", None, {}) - self.assertEqual("", repr(x)) - - def test_get_field_names_of_sample(self): - sample_fields = ["source", "counter_name", "counter_type", - "counter_unit", "counter_volume", "user_id", - "project_id", "resource_id", "timestamp", - "resource_metadata", "message_id", - "message_signature", "recorded_at"] - - self.assertEqual(set(sample_fields), - set(models.Sample.get_field_names())) - - -class TestTraitModel(testbase.BaseTestCase): - - def test_convert_value(self): - v = event_models.Trait.convert_value( - event_models.Trait.INT_TYPE, '10') - self.assertEqual(10, v) - self.assertIsInstance(v, int) - v = event_models.Trait.convert_value( - event_models.Trait.FLOAT_TYPE, '10') - self.assertEqual(10.0, v) - self.assertIsInstance(v, float) - - v = event_models.Trait.convert_value( - event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) - self.assertIsInstance(v, datetime.datetime) - - v = event_models.Trait.convert_value( - event_models.Trait.TEXT_TYPE, 10) - self.assertEqual("10", v) - self.assertIsInstance(v, six.text_type) diff --git a/ceilometer/tests/unit/telemetry/__init__.py b/ceilometer/tests/unit/telemetry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/telemetry/test_notifications.py b/ceilometer/tests/unit/telemetry/test_notifications.py deleted file mode 100644 index 292c7cd0..00000000 --- a/ceilometer/tests/unit/telemetry/test_notifications.py +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - -from ceilometer.telemetry import notifications - -NOTIFICATION = { - u'_context_domain': None, - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - 'event_type': u'telemetry.api', - 'timestamp': u'2015-06-1909: 19: 35.786893', - u'_context_auth_token': None, - u'_context_read_only': False, - 'payload': {'samples': - [{'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge'}, - {u'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature12', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge'}]}, - u'_context_resource_uuid': None, - u'_context_user_identity': u'fake_user_identity---', - u'_context_show_deleted': False, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - 'priority': 'info', - u'_context_is_admin': True, - u'_context_project_domain': None, - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'_context_user_domain': None, - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' -} - - -class TelemetryIpcTestCase(base.BaseTestCase): - - def test_process_notification(self): - sample_creation = notifications.TelemetryIpc(None) - samples = list(sample_creation.process_notification(NOTIFICATION)) - self.assertEqual(2, len(samples)) - payload = NOTIFICATION["payload"]['samples'] - for index, sample in enumerate(samples): - self.assertEqual(payload[index]["user_id"], sample.user_id) - self.assertEqual(payload[index]["counter_name"], sample.name) - self.assertEqual(payload[index]["resource_id"], sample.resource_id) - self.assertEqual(payload[index]["timestamp"], sample.timestamp) - self.assertEqual(payload[index]["resource_metadata"], - sample.resource_metadata) - self.assertEqual(payload[index]["counter_volume"], sample.volume) - self.assertEqual(payload[index]["source"], sample.source) - self.assertEqual(payload[index]["counter_type"], sample.type) - self.assertEqual(payload[index]["message_id"], sample.id) - self.assertEqual(payload[index]["counter_unit"], sample.unit) diff --git a/ceilometer/tests/unit/test_coordination.py b/ceilometer/tests/unit/test_coordination.py deleted file mode 100644 index 966946b1..00000000 --- a/ceilometer/tests/unit/test_coordination.py +++ /dev/null @@ -1,283 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import mock -from oslo_config import fixture as fixture_config -import tooz.coordination - -from ceilometer import coordination -from ceilometer.tests import base -from ceilometer import utils - - -class MockToozCoordinator(object): - def __init__(self, member_id, shared_storage): - self._member_id = member_id - self._groups = shared_storage - self.is_started = False - - def start(self): - self.is_started = True - - def stop(self): - pass - - def heartbeat(self): - pass - - def create_group(self, group_id): - if group_id in self._groups: - return MockAsyncError( - tooz.coordination.GroupAlreadyExist(group_id)) - self._groups[group_id] = {} - return MockAsyncResult(None) - - def join_group(self, group_id, capabilities=b''): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - if self._member_id in self._groups[group_id]: - return MockAsyncError( - tooz.coordination.MemberAlreadyExist(group_id, - self._member_id)) - self._groups[group_id][self._member_id] = { - "capabilities": capabilities, - } - return MockAsyncResult(None) - - def leave_group(self, group_id): - return MockAsyncResult(None) - - def get_members(self, group_id): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - return MockAsyncResult(self._groups[group_id]) - - -class MockToozCoordExceptionRaiser(MockToozCoordinator): - def start(self): - raise tooz.coordination.ToozError('error') - - def heartbeat(self): - raise tooz.coordination.ToozError('error') - - def join_group(self, group_id, capabilities=b''): - raise tooz.coordination.ToozError('error') - - def get_members(self, group_id): - raise tooz.coordination.ToozError('error') - - -class MockToozCoordExceptionOnJoinRaiser(MockToozCoordinator): - def __init__(self, member_id, shared_storage, retry_count=None): - super(MockToozCoordExceptionOnJoinRaiser, - self).__init__(member_id, shared_storage) - self.tooz_error_count = retry_count - self.count = 0 - - def join_group(self, group_id, capabilities=b''): - if self.count == self.tooz_error_count: - return MockAsyncResult(None) - else: - self.count += 1 - raise tooz.coordination.ToozError('error') - - -class MockAsyncResult(tooz.coordination.CoordAsyncResult): - def __init__(self, result): - self.result = result - - def get(self, timeout=0): - return self.result - - @staticmethod - def done(): - return True - - -class MockAsyncError(tooz.coordination.CoordAsyncResult): - def __init__(self, error): - self.error = error - - def get(self, timeout=0): - raise self.error - - @staticmethod - def done(): - return True - - -class MockLoggingHandler(logging.Handler): - """Mock logging handler to check for expected logs.""" - - def __init__(self, *args, **kwargs): - self.reset() - logging.Handler.__init__(self, *args, **kwargs) - - def emit(self, record): - self.messages[record.levelname.lower()].append(record.getMessage()) - - def reset(self): - self.messages = {'debug': [], - 'info': [], - 'warning': [], - 'error': [], - 'critical': []} - - -class TestPartitioning(base.BaseTestCase): - - def setUp(self): - super(TestPartitioning, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.str_handler = MockLoggingHandler() - coordination.LOG.logger.addHandler(self.str_handler) - self.shared_storage = {} - - def _get_new_started_coordinator(self, shared_storage, agent_id=None, - coordinator_cls=None, retry_count=None, - cleanup_stop=True): - coordinator_cls = coordinator_cls or MockToozCoordinator - self.CONF.set_override('backend_url', 'xxx://yyy', - group='coordination') - with mock.patch('tooz.coordination.get_coordinator', - lambda _, member_id: - coordinator_cls(member_id, shared_storage, - retry_count) if retry_count else - coordinator_cls(member_id, shared_storage)): - pc = coordination.PartitionCoordinator(agent_id) - pc.start() - if cleanup_stop: - self.addCleanup(pc.stop) - return pc - - def _usage_simulation(self, *agents_kwargs): - partition_coordinators = [] - for kwargs in agents_kwargs: - partition_coordinator = self._get_new_started_coordinator( - self.shared_storage, kwargs['agent_id'], kwargs.get( - 'coordinator_cls')) - partition_coordinator.join_group(kwargs['group_id']) - partition_coordinators.append(partition_coordinator) - - for i, kwargs in enumerate(agents_kwargs): - all_resources = kwargs.get('all_resources', []) - expected_resources = kwargs.get('expected_resources', []) - actual_resources = partition_coordinators[i].extract_my_subset( - kwargs['group_id'], all_resources) - self.assertEqual(expected_resources, actual_resources) - - def test_single_group(self): - agents = [dict(agent_id='agent1', group_id='group'), - dict(agent_id='agent2', group_id='group')] - self._usage_simulation(*agents) - - self.assertEqual(['group'], sorted(self.shared_storage.keys())) - self.assertEqual(['agent1', 'agent2'], - sorted(self.shared_storage['group'].keys())) - - def test_multiple_groups(self): - agents = [dict(agent_id='agent1', group_id='group1'), - dict(agent_id='agent2', group_id='group2')] - self._usage_simulation(*agents) - - self.assertEqual(['group1', 'group2'], - sorted(self.shared_storage.keys())) - - def test_partitioning(self): - all_resources = ['resource_%s' % i for i in range(1000)] - agents = ['agent_%s' % i for i in range(10)] - - expected_resources = [list() for _ in range(len(agents))] - hr = utils.HashRing(agents) - for r in all_resources: - key = agents.index(hr.get_node(r)) - expected_resources[key].append(r) - - agents_kwargs = [] - for i, agent in enumerate(agents): - agents_kwargs.append(dict(agent_id=agent, - group_id='group', - all_resources=all_resources, - expected_resources=expected_resources[i])) - self._usage_simulation(*agents_kwargs) - - def test_coordination_backend_offline(self): - agents = [dict(agent_id='agent1', - group_id='group', - all_resources=['res1', 'res2'], - expected_resources=[], - coordinator_cls=MockToozCoordExceptionRaiser)] - self._usage_simulation(*agents) - expected_errors = ['Error getting group membership info from ' - 'coordination backend.', - 'Error connecting to coordination backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - def test_coordination_backend_connection_fail_on_join(self): - coord = self._get_new_started_coordinator( - {'group'}, 'agent1', MockToozCoordExceptionOnJoinRaiser, - retry_count=2) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionOnJoinRaiser): - coord.join_group(group_id='group') - - expected_errors = ['Error joining partitioning group group,' - ' re-trying', - 'Error joining partitioning group group,' - ' re-trying'] - self.assertEqual(expected_errors, self.str_handler.messages['error']) - - def test_reconnect(self): - coord = self._get_new_started_coordinator({}, 'a', - MockToozCoordExceptionRaiser) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionRaiser('a', {})): - coord.heartbeat() - expected_errors = ['Error connecting to coordination backend.', - 'Error sending a heartbeat to coordination ' - 'backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - self.str_handler.messages['error'] = [] - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordinator('a', {})): - coord.heartbeat() - for e in expected_errors: - self.assertNotIn(e, self.str_handler.messages['error']) - - def test_group_id_none(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._coordinator.is_started) - - with mock.patch.object(coord._coordinator, 'join_group') as mocked: - coord.join_group(None) - self.assertEqual(0, mocked.call_count) - with mock.patch.object(coord._coordinator, 'leave_group') as mocked: - coord.leave_group(None) - self.assertEqual(0, mocked.call_count) - - def test_stop(self): - coord = self._get_new_started_coordinator({}, 'a', cleanup_stop=False) - self.assertTrue(coord._coordinator.is_started) - coord.join_group("123") - coord.stop() - self.assertIsEmpty(coord._groups) - self.assertIsNone(coord._coordinator) diff --git a/ceilometer/tests/unit/test_declarative.py b/ceilometer/tests/unit/test_declarative.py deleted file mode 100644 index 03b1e396..00000000 --- a/ceilometer/tests/unit/test_declarative.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2016 Mirantis, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import mockpatch - -from ceilometer import declarative -from ceilometer.tests import base - - -class TestDefinition(base.BaseTestCase): - - def setUp(self): - super(TestDefinition, self).setUp() - self.configs = [ - "_field1", - "_field2|_field3", - {'fields': 'field4.`split(., 1, 1)`'}, - {'fields': ['field5.arg', 'field6'], 'type': 'text'} - ] - self.parser = mock.MagicMock() - parser_patch = mockpatch.Patch( - "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", - new=self.parser) - self.useFixture(parser_patch) - - def test_caching_parsers(self): - for config in self.configs * 2: - declarative.Definition("test", config, mock.MagicMock()) - self.assertEqual(4, self.parser.call_count) - self.parser.assert_has_calls([ - mock.call("_field1"), - mock.call("_field2|_field3"), - mock.call("field4.`split(., 1, 1)`"), - mock.call("(field5.arg)|(field6)"), - ]) diff --git a/ceilometer/tests/unit/test_decoupled_pipeline.py b/ceilometer/tests/unit/test_decoupled_pipeline.py deleted file mode 100644 index 283144af..00000000 --- a/ceilometer/tests/unit/test_decoupled_pipeline.py +++ /dev/null @@ -1,296 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from ceilometer import pipeline -from ceilometer import sample -from ceilometer.tests import pipeline_base - - -class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): - def _setup_pipeline_cfg(self): - source = {'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'transformers': [{'name': 'update', 'parameters': {}}], - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - - def _break_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def test_source_no_sink(self): - del self.pipeline_cfg['sinks'] - self._exception_create_pipelinemanager() - - def test_source_no_meters_or_counters(self): - del self.pipeline_cfg['sources'][0]['counters'] - self._exception_create_pipelinemanager() - - def test_source_dangling_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self._exception_create_pipelinemanager() - - def test_sink_no_source(self): - del self.pipeline_cfg['sources'] - self._exception_create_pipelinemanager() - - def test_source_with_multiple_sinks(self): - counter_cfg = ['a', 'b'] - self._set_pipeline_cfg('counters', counter_cfg) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('test_source:second_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher, sfx in [(test_publisher, '_update'), - (new_publisher, '_new')]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) - self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) - - def test_multiple_sources_with_single_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('second_source:test_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - another_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher in [test_publisher, another_publisher]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('b_update', getattr(publisher.samples[1], "name")) - - transformed_samples = self.TransformerClass.samples - self.assertEqual(2, len(transformed_samples)) - self.assertEqual(['a', 'b'], - [getattr(s, 'name') for s in transformed_samples]) - - def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, - meters, units): - with open('etc/ceilometer/pipeline.yaml') as fap: - data = fap.read() - pipeline_cfg = yaml.safe_load(data) - for s in pipeline_cfg['sinks']: - s['publishers'] = ['test://'] - pipeline_manager = pipeline.PipelineManager(pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[index] - self._do_test_rate_of_change_mapping(pipe, meters, units) - - def test_rate_of_change_boilerplate_disk_read_cfg(self): - meters = ('disk.read.bytes', 'disk.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_disk_write_cfg(self): - meters = ('disk.write.bytes', 'disk.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_network_incoming_cfg(self): - meters = ('network.incoming.bytes', 'network.incoming.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): - meters = ('disk.device.read.bytes', 'disk.device.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): - meters = ('disk.device.write.bytes', 'disk.device.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_network_outgoing_cfg(self): - meters = ('network.outgoing.bytes', 'network.outgoing.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, - meters, - units) - - def test_duplicated_sinks_names(self): - self.pipeline_cfg['sinks'].append({ - 'name': 'test_sink', - 'publishers': ['except'], - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) - - def test_duplicated_source_names(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) diff --git a/ceilometer/tests/unit/test_event_pipeline.py b/ceilometer/tests/unit/test_event_pipeline.py deleted file mode 100644 index 5c88b4f3..00000000 --- a/ceilometer/tests/unit/test_event_pipeline.py +++ /dev/null @@ -1,410 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import traceback -import uuid - -import mock -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslotest import base -from oslotest import mockpatch - -from ceilometer.event.storage import models -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer.publisher import utils - - -class EventPipelineTestCase(base.BaseTestCase): - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'except://': self.PublisherClassException} - return fake_drivers[url](url) - - class PublisherClassException(publisher.PublisherBase): - def publish_samples(self, samples): - pass - - def publish_events(self, events): - raise Exception() - - def setUp(self): - super(EventPipelineTestCase, self).setUp() - self.p_type = pipeline.EVENT_TYPE - self.transformer_manager = None - - self.test_event = models.Event( - message_id=uuid.uuid4(), - event_type='a', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'started'} - ) - - self.test_event2 = models.Event( - message_id=uuid.uuid4(), - event_type='b', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'stopped'} - ) - - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - self._setup_pipeline_cfg() - - self._reraise_exception = True - self.useFixture(mockpatch.Patch( - 'ceilometer.pipeline.LOG.exception', - side_effect=self._handle_reraise_exception)) - - def _handle_reraise_exception(self, msg): - if self._reraise_exception: - raise Exception(traceback.format_exc()) - - def _setup_pipeline_cfg(self): - """Setup the appropriate form of pipeline config.""" - source = {'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - """Augment the pipeline config with an additional element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['new://'], - }) - - def _break_pipeline_cfg(self): - """Break the pipeline config with a malformed element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - """Break the pipeline config with duplicate pipeline name.""" - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def _exception_create_pipelinemanager(self): - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - def test_no_events(self): - self._unset_pipeline_cfg('events') - self._exception_create_pipelinemanager() - - def test_no_name(self): - self._unset_pipeline_cfg('name') - self._exception_create_pipelinemanager() - - def test_name(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - for pipe in pipeline_manager.pipelines: - self.assertTrue(pipe.name.startswith('event:')) - - def test_no_publishers(self): - self._unset_pipeline_cfg('publishers') - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude_same(self): - event_cfg = ['a', '!a'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude(self): - event_cfg = ['a', '!b'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_wildcard_included(self): - event_cfg = ['a', '*'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_publishers_invalid_publisher(self): - publisher_cfg = ['test_invalid'] - self._set_pipeline_cfg('publishers', publisher_cfg) - - def test_multiple_included_events(self): - event_cfg = ['a', 'b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - - with pipeline_manager.publisher() as p: - p([self.test_event2]) - - self.assertEqual(2, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - self.assertEqual('b', getattr(publisher.events[1], 'event_type')) - - def test_event_non_match(self): - event_cfg = ['nomatch'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.events)) - self.assertEqual(0, publisher.calls) - - def test_wildcard_event(self): - event_cfg = ['*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_wildcard_excluded_events(self): - event_cfg = ['*', '!a'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - - def test_wildcard_excluded_events_not_excluded(self): - event_cfg = ['*', '!b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_not_excluded(self): - event_cfg = ['!b', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_excluded(self): - event_cfg = ['!a', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) - self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) - - def test_wildcard_and_excluded_wildcard_events(self): - event_cfg = ['*', '!compute.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - - def test_included_event_and_wildcard_events(self): - event_cfg = ['compute.instance.create.start', 'identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_excluded_event_and_excluded_wildcard_events(self): - event_cfg = ['!compute.instance.create.start', '!identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_multiple_pipeline(self): - self._augment_pipeline_cfg() - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event, self.test_event2]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - new_publisher = pipeline_manager.pipelines[1].publishers[0] - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual(1, new_publisher.calls) - self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) - - def test_multiple_publisher(self): - self._set_pipeline_cfg('publishers', ['test://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_multiple_publisher_isolation(self): - self._reraise_exception = False - self._set_pipeline_cfg('publishers', ['except://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_unique_pipeline_names(self): - self._dup_pipeline_name_cfg() - self._exception_create_pipelinemanager() - - def test_event_pipeline_endpoint_requeue_on_failure(self): - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([]) - - self.CONF.set_override("ack_on_event_error", False, - group="notification") - self.CONF.set_override("telemetry_secret", "not-so-secret", - group="publisher") - test_data = { - 'message_id': uuid.uuid4(), - 'event_type': 'a', - 'generated': '2013-08-08 21:06:37.803826', - 'traits': [ - {'name': 't_text', - 'value': 1, - 'dtype': 'text_trait' - } - ], - 'raw': {'status': 'started'} - } - message_sign = utils.compute_signature(test_data, 'not-so-secret') - test_data['message_signature'] = message_sign - - fake_publisher = mock.Mock() - self.useFixture(mockpatch.Patch( - 'ceilometer.publisher.test.TestPublisher', - return_value=fake_publisher)) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - event_pipeline_endpoint = pipeline.EventPipelineEndpoint( - pipeline_manager.pipelines[0]) - - fake_publisher.publish_events.side_effect = Exception - ret = event_pipeline_endpoint.sample([ - {'ctxt': {}, 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'a', 'payload': [test_data], 'metadata': {}}]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) diff --git a/ceilometer/tests/unit/test_messaging.py b/ceilometer/tests/unit/test_messaging.py deleted file mode 100644 index 78595762..00000000 --- a/ceilometer/tests/unit/test_messaging.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging.conffixture -from oslotest import base - -from ceilometer import messaging - - -class MessagingTests(base.BaseTestCase): - def setUp(self): - super(MessagingTests, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) - - def test_get_transport_invalid_url(self): - self.assertRaises(oslo_messaging.InvalidTransportURL, - messaging.get_transport, "notvalid!") - - def test_get_transport_url_caching(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://') - self.assertEqual(t1, t2) - - def test_get_transport_default_url_caching(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport() - self.assertEqual(t1, t2) - - def test_get_transport_default_url_no_caching(self): - t1 = messaging.get_transport(cache=False) - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_no_caching(self): - t1 = messaging.get_transport('fake://', cache=False) - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_default_url_caching_mix(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_caching_mix(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_optional(self): - self.CONF.set_override('rpc_backend', '') - self.assertIsNone(messaging.get_transport(optional=True, - cache=False)) diff --git a/ceilometer/tests/unit/test_middleware.py b/ceilometer/tests/unit/test_middleware.py deleted file mode 100644 index 85aba8ec..00000000 --- a/ceilometer/tests/unit/test_middleware.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as fixture_config - -from ceilometer import middleware -from ceilometer.tests import base - - -HTTP_REQUEST = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.request', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -HTTP_RESPONSE = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.response', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}, - u'response': {'status': '200 OK'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - - -class TestNotifications(base.BaseTestCase): - - def setUp(self): - super(TestNotifications, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - def test_process_request_notification(self): - sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( - HTTP_REQUEST - ))[0] - self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_process_response_notification(self): - sample = list(middleware.HTTPResponse( - mock.Mock()).process_notification(HTTP_RESPONSE))[0] - self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_targets(self): - targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) - self.assertEqual(4, len(targets)) diff --git a/ceilometer/tests/unit/test_neutronclient.py b/ceilometer/tests/unit/test_neutronclient.py deleted file mode 100644 index 4bf61fc3..00000000 --- a/ceilometer/tests/unit/test_neutronclient.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslotest import base - -from ceilometer import neutron_client - - -class TestNeutronClient(base.BaseTestCase): - - def setUp(self): - super(TestNeutronClient, self).setUp() - self.nc = neutron_client.Client() - self.nc.lb_version = 'v1' - - @staticmethod - def fake_ports_list(): - return {'ports': - [{'admin_state_up': True, - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'device_owner': 'network:router_gateway', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, - ]} - - def test_port_get_all(self): - with mock.patch.object(self.nc.client, 'list_ports', - side_effect=self.fake_ports_list): - ports = self.nc.port_get_all() - - self.assertEqual(1, len(ports)) - self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', - ports[0]['id']) - - @staticmethod - def fake_networks_list(): - return {'networks': - [{'admin_state_up': True, - 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'name': 'public', - 'provider:network_type': 'gre', - 'provider:physical_network': None, - 'provider:segmentation_id': 2, - 'router:external': True, - 'shared': False, - 'status': 'ACTIVE', - 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], - 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, - ]} - - @staticmethod - def fake_pool_list(): - return {'pools': [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ]} - - def test_pool_list(self): - with mock.patch.object(self.nc.client, 'list_pools', - side_effect=self.fake_pool_list): - pools = self.nc.pool_get_all() - - self.assertEqual(1, len(pools)) - self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', - pools[0]['id']) - - @staticmethod - def fake_vip_list(): - return {'vips': [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - ]} - - def test_vip_list(self): - with mock.patch.object(self.nc.client, 'list_vips', - side_effect=self.fake_vip_list): - vips = self.nc.vip_get_all() - - self.assertEqual(1, len(vips)) - self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - vips[0]['id']) - - @staticmethod - def fake_member_list(): - return {'members': [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ]} - - def test_member_list(self): - with mock.patch.object(self.nc.client, 'list_members', - side_effect=self.fake_member_list): - members = self.nc.member_get_all() - - self.assertEqual(1, len(members)) - self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', - members[0]['id']) - - @staticmethod - def fake_monitors_list(): - return {'health_monitors': - [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }]} - - def test_monitor_list(self): - with mock.patch.object(self.nc.client, 'list_health_monitors', - side_effect=self.fake_monitors_list): - monitors = self.nc.health_monitor_get_all() - - self.assertEqual(1, len(monitors)) - self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', - monitors[0]['id']) - - @staticmethod - def fake_pool_stats(fake_pool): - return {'stats': - [{'active_connections': 1, - 'total_connections': 2, - 'bytes_in': 3, - 'bytes_out': 4 - }]} - - def test_pool_stats(self): - with mock.patch.object(self.nc.client, 'retrieve_pool_stats', - side_effect=self.fake_pool_stats): - stats = self.nc.pool_stats('fake_pool')['stats'] - - self.assertEqual(1, len(stats)) - self.assertEqual(1, stats[0]['active_connections']) - self.assertEqual(2, stats[0]['total_connections']) - self.assertEqual(3, stats[0]['bytes_in']) - self.assertEqual(4, stats[0]['bytes_out']) diff --git a/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py b/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py deleted file mode 100644 index 6b88aa5a..00000000 --- a/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py +++ /dev/null @@ -1,336 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from neutronclient.v2_0 import client -from oslotest import base - -from ceilometer import neutron_client - - -class TestNeutronClientLBaaSV2(base.BaseTestCase): - - def setUp(self): - super(TestNeutronClientLBaaSV2, self).setUp() - self.nc = neutron_client.Client() - - @staticmethod - def fake_list_lbaas_pools(): - return { - 'pools': [{ - 'lb_algorithm': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': 'simple pool', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'healthmonitor_id': None, - 'listeners': [{ - 'id': "35cb8516-1173-4035-8dae-0dae3453f37f" - } - ], - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858'} - ], - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'name': 'pool1' - }] - } - - @staticmethod - def fake_list_lbaas_members(): - return { - 'members': [{ - 'weight': 1, - 'admin_state_up': True, - 'subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'address': '10.0.0.8', - 'protocol_port': 80, - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858' - }] - } - - @staticmethod - def fake_list_lbaas_healthmonitors(): - return { - 'healthmonitors': [{ - 'admin_state_up': True, - 'tenant_id': '6f3584d5754048a18e30685362b88411', - 'delay': 1, - 'expected_codes': '200,201,202', - 'max_retries': 5, - 'http_method': 'GET', - 'timeout': 1, - 'pools': [{ - 'id': '74aa2010-a59f-4d35-a436-60a6da882819' - }], - 'url_path': '/index.html', - 'type': 'HTTP', - 'id': '0a9ac99d-0a09-4b18-8499-a0796850279a' - }] - } - - @staticmethod - def fake_show_listener(): - return { - 'listener': { - 'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'loadbalancers': [{ - 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' - }], - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'connection_limit': 100, - 'protocol_port': 80, - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': '' - } - } - - @staticmethod - def fake_retrieve_loadbalancer_status(): - return { - 'statuses': { - 'loadbalancer': { - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'listeners': [{ - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', - 'provisioning_status': 'ACTIVE' - } - }] - }] - } - } - } - - @staticmethod - def fake_retrieve_loadbalancer_status_complex(): - return { - 'statuses': { - 'loadbalancer': { - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'listeners': [{ - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }, - { - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf969', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', - 'provisioning_status': 'ACTIVE' - } - }, - { - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfa7a', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4629', - 'provisioning_status': 'ACTIVE' - } - }] - }, - { - 'id': '35cb8516-1173-4035-8dae-0dae3453f48e', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce7g7', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfb8b', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b473a', - 'provisioning_status': 'ACTIVE' - } - }] - }] - } - } - } - - @staticmethod - def fake_list_lbaas_listeners(): - return { - 'listeners': [{ - 'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'loadbalancers': [{ - 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' - }], - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'connection_limit': 100, - 'protocol_port': 80, - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': 'listener_one' - }]} - - @mock.patch.object(client.Client, - 'list_lbaas_pools') - @mock.patch.object(client.Client, - 'show_listener') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_pools_v2(self, mock_status, mock_show, mock_list): - mock_status.return_value = self.fake_retrieve_loadbalancer_status() - mock_show.return_value = self.fake_show_listener() - mock_list.return_value = self.fake_list_lbaas_pools() - pools = self.nc.list_pools_v2() - self.assertEqual(1, len(pools)) - for pool in pools: - self.assertEqual('ONLINE', pool['status']) - self.assertEqual('ROUND_ROBIN', pool['lb_method']) - - @mock.patch.object(client.Client, - 'list_lbaas_pools') - @mock.patch.object(client.Client, - 'list_lbaas_members') - @mock.patch.object(client.Client, - 'show_listener') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_members_v2(self, mock_status, mock_show, mock_list_members, - mock_list_pools): - mock_status.return_value = self.fake_retrieve_loadbalancer_status() - mock_show.return_value = self.fake_show_listener() - mock_list_pools.return_value = self.fake_list_lbaas_pools() - mock_list_members.return_value = self.fake_list_lbaas_members() - members = self.nc.list_members_v2() - self.assertEqual(1, len(members)) - for member in members: - self.assertEqual('ONLINE', member['status']) - self.assertEqual('4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - member['pool_id']) - - @mock.patch.object(client.Client, - 'list_lbaas_healthmonitors') - def test_list_health_monitors_v2(self, mock_list_healthmonitors): - mock_list_healthmonitors.return_value = ( - self.fake_list_lbaas_healthmonitors()) - healthmonitors = self.nc.list_health_monitors_v2() - self.assertEqual(1, len(healthmonitors)) - for healthmonitor in healthmonitors: - self.assertEqual(5, healthmonitor['max_retries']) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_member_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - listener_id = '35cb8516-1173-4035-8dae-0dae3453f37f' - pool_id = '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5' - parent_id = [listener_id, pool_id] - result_status = self.nc._get_member_status(loadbalancer_id, - parent_id) - expected_keys = ['fcf23bde-8cf9-4616-883f-208cebcbf858', - 'fcf23bde-8cf9-4616-883f-208cebcbf969'] - excepted_status = { - 'fcf23bde-8cf9-4616-883f-208cebcbf858': 'ONLINE', - 'fcf23bde-8cf9-4616-883f-208cebcbf969': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_pool_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - parent_id = '35cb8516-1173-4035-8dae-0dae3453f37f' - result_status = self.nc._get_pool_status(loadbalancer_id, - parent_id) - expected_keys = ['4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6'] - excepted_status = { - '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5': 'ONLINE', - '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_listener_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - result_status = self.nc._get_listener_status(loadbalancer_id) - expected_keys = ['35cb8516-1173-4035-8dae-0dae3453f37f', - '35cb8516-1173-4035-8dae-0dae3453f48e'] - excepted_status = { - '35cb8516-1173-4035-8dae-0dae3453f37f': 'ONLINE', - '35cb8516-1173-4035-8dae-0dae3453f48e': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(client.Client, - 'list_listeners') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_listener(self, mock_status, mock_list_listeners): - mock_list_listeners.return_value = ( - self.fake_list_lbaas_listeners()) - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status()) - listeners = self.nc.list_listener() - expected_key = '35cb8516-1173-4035-8dae-0dae3453f37f' - expected_status = 'ONLINE' - self.assertEqual(1, len(listeners)) - self.assertEqual(expected_key, listeners[0]['id']) - self.assertEqual(expected_status, listeners[0]['operating_status']) diff --git a/ceilometer/tests/unit/test_novaclient.py b/ceilometer/tests/unit/test_novaclient.py deleted file mode 100644 index cb3fc847..00000000 --- a/ceilometer/tests/unit/test_novaclient.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import novaclient -from oslo_config import fixture as fixture_config -from oslotest import base -from oslotest import mockpatch - -from ceilometer import nova_client - - -class TestNovaClient(base.BaseTestCase): - - def setUp(self): - super(TestNovaClient, self).setUp() - self._flavors_count = 0 - self._images_count = 0 - self.nv = nova_client.Client() - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.flavors, 'get', - side_effect=self.fake_flavors_get)) - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.images, 'get', - side_effect=self.fake_images_get)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def fake_flavors_get(self, *args, **kwargs): - self._flavors_count += 1 - a = mock.MagicMock() - a.id = args[0] - if a.id == 1: - a.name = 'm1.tiny' - elif a.id == 2: - a.name = 'm1.large' - else: - raise novaclient.exceptions.NotFound('foobar') - return a - - def fake_images_get(self, *args, **kwargs): - self._images_count += 1 - a = mock.MagicMock() - a.id = args[0] - image_details = { - 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), - 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), - 3: ('rhel-6-x64', None), - 4: ('rhel-6-x64', dict()), - 5: ('rhel-6-x64', dict(kernel_id=11)), - 6: ('rhel-6-x64', dict(ramdisk_id=21)) - } - - if a.id in image_details: - a.name = image_details[a.id][0] - a.metadata = image_details[a.id][1] - else: - raise novaclient.exceptions.NotFound('foobar') - - return a - - @staticmethod - def fake_servers_list(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 1} - b = mock.MagicMock() - b.id = 43 - b.flavor = {'id': 2} - b.image = {'id': 2} - return [a, b] - - def test_instance_get_all_by_host(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(2, len(instances)) - self.assertEqual('m1.tiny', instances[0].flavor['name']) - self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) - self.assertEqual(11, instances[0].kernel_id) - self.assertEqual(21, instances[0].ramdisk_id) - - def test_instance_get_all(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all() - - self.assertEqual(2, len(instances)) - self.assertEqual(42, instances[0].id) - self.assertEqual(1, instances[0].flavor['id']) - self.assertEqual(1, instances[0].image['id']) - - @staticmethod - def fake_servers_list_unknown_flavor(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = {'id': 1} - return [a] - - def test_instance_get_all_by_host_unknown_flavor(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_flavor): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].flavor['name']) - - @staticmethod - def fake_servers_list_unknown_image(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 666} - return [a] - - @staticmethod - def fake_servers_list_image_missing_metadata(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': args[0]} - return [a] - - @staticmethod - def fake_instance_image_missing(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = None - return [a] - - def test_instance_get_all_by_host_unknown_image(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_image): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].image['name']) - - def test_with_flavor_and_image(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - instance = results[0] - self.assertEqual(2, len(results)) - self.assertEqual('ubuntu-12.04-x86', instance.image['name']) - self.assertEqual('m1.tiny', instance.flavor['name']) - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_image(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_flavor(self): - instances = self.fake_servers_list_unknown_flavor() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.flavor['name']) - self.assertEqual(0, instance.flavor['vcpus']) - self.assertEqual(0, instance.flavor['ram']) - self.assertEqual(0, instance.flavor['disk']) - self.assertNotEqual(instance.image['name'], 'unknown-id-666') - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_none_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(3) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(4) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_ramdisk(self): - instances = self.fake_servers_list_image_missing_metadata(5) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual(11, instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_kernel(self): - instances = self.fake_servers_list_image_missing_metadata(6) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_no_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - self.assertEqual(2, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) - self.assertEqual(4, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_unknown_image_cache(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances * 2) - self.assertEqual(2, len(results)) - self.assertEqual(1, self._flavors_count) - self.assertEqual(1, self._images_count) - for instance in results: - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_missing_image_instance(self): - instances = self.fake_instance_image_missing() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.image) - self.assertIsNone(instance.ramdisk_id) - - def test_with_nova_http_log_debug(self): - self.CONF.set_override("nova_http_log_debug", True) - self.nv = nova_client.Client() - self.assertIsNotNone(self.nv.nova_client.client.logger) diff --git a/ceilometer/tests/unit/test_sample.py b/ceilometer/tests/unit/test_sample.py deleted file mode 100644 index b64e6709..00000000 --- a/ceilometer/tests/unit/test_sample.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for ceilometer/sample.py""" - -import datetime - -from ceilometer import sample -from ceilometer.tests import base - - -class TestSample(base.BaseTestCase): - SAMPLE = sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume='1234567', - user_id='56c5692032f34041900342503fecab30', - project_id='ac9494df2d9d4e709bac378cceabaf23', - resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', - timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), - resource_metadata={} - ) - - def test_sample_string_format(self): - expected = ('') - self.assertEqual(expected, str(self.SAMPLE)) - - def test_sample_from_notifications_list(self): - msg = { - 'event_type': u'sample.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': [{u'counter_name': u'instance100'}], - 'priority': 'info', - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' - } - s = sample.Sample.from_notification( - 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) - expected = {'event_type': msg['event_type'], - 'host': msg['publisher_id']} - self.assertEqual(expected, s.resource_metadata) - - def test_sample_from_notifications_dict(self): - msg = { - 'event_type': u'sample.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': {u'counter_name': u'instance100'}, - 'priority': 'info', - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' - } - s = sample.Sample.from_notification( - 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) - msg['payload']['event_type'] = msg['event_type'] - msg['payload']['host'] = msg['publisher_id'] - self.assertEqual(msg['payload'], s.resource_metadata) diff --git a/ceilometer/tests/unit/test_utils.py b/ceilometer/tests/unit/test_utils.py index 429313e8..769b8bac 100644 --- a/ceilometer/tests/unit/test_utils.py +++ b/ceilometer/tests/unit/test_utils.py @@ -84,41 +84,6 @@ class TestUtils(base.BaseTestCase): else: self.assertIn((k, v), expected) - def test_restore_nesting_unested(self): - metadata = {'a': 'A', 'b': 'B'} - unwound = utils.restore_nesting(metadata) - self.assertIs(metadata, unwound) - - def test_restore_nesting(self): - metadata = {'a': 'A', 'b': 'B', - 'nested:a': 'A', - 'nested:b': 'B', - 'nested:twice:c': 'C', - 'nested:twice:d': 'D', - 'embedded:e': 'E'} - unwound = utils.restore_nesting(metadata) - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - - def test_restore_nesting_with_separator(self): - metadata = {'a': 'A', 'b': 'B', - 'nested.a': 'A', - 'nested.b': 'B', - 'nested.twice.c': 'C', - 'nested.twice.d': 'D', - 'embedded.e': 'E'} - unwound = utils.restore_nesting(metadata, separator='.') - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - def test_decimal_to_dt_with_none_parameter(self): self.assertIsNone(utils.decimal_to_dt(None)) @@ -138,43 +103,3 @@ class TestUtils(base.BaseTestCase): ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) - - def test_hash_of_set(self): - x = ['a', 'b'] - y = ['a', 'b', 'a'] - z = ['a', 'c'] - self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) - self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) - self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) - - def test_hash_ring(self): - num_nodes = 10 - num_keys = 1000 - - nodes = [str(x) for x in range(num_nodes)] - hr = utils.HashRing(nodes) - - buckets = [0] * num_nodes - assignments = [-1] * num_keys - for k in range(num_keys): - n = int(hr.get_node(str(k))) - self.assertTrue(0 <= n <= num_nodes) - buckets[n] += 1 - assignments[k] = n - - # at least something in each bucket - self.assertTrue(all((c > 0 for c in buckets))) - - # approximately even distribution - diff = max(buckets) - min(buckets) - self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) - - # consistency - num_nodes += 1 - nodes.append(str(num_nodes + 1)) - hr = utils.HashRing(nodes) - for k in range(num_keys): - n = int(hr.get_node(str(k))) - assignments[k] -= n - reassigned = len([c for c in assignments if c != 0]) - self.assertTrue(reassigned < num_keys / num_nodes) diff --git a/ceilometer/tests/unit/transformer/__init__.py b/ceilometer/tests/unit/transformer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/transformer/test_conversions.py b/ceilometer/tests/unit/transformer/test_conversions.py deleted file mode 100644 index 702f0f67..00000000 --- a/ceilometer/tests/unit/transformer/test_conversions.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Copyright 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_utils import timeutils -from oslotest import base - -from ceilometer import sample -from ceilometer.transformer import conversions - - -class AggregatorTransformerTestCase(base.BaseTestCase): - SAMPLE = sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume='1234567', - user_id='56c5692032f34041900342503fecab30', - project_id='ac9494df2d9d4e709bac378cceabaf23', - resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', - timestamp="2015-10-29 14:12:15.485877+00:00", - resource_metadata={} - ) - - def setUp(self): - super(AggregatorTransformerTestCase, self).setUp() - self._sample_offset = 0 - - def test_init_input_validation(self): - aggregator = conversions.AggregatorTransformer("2", "15", None, - None, None) - self.assertEqual(2, aggregator.size) - self.assertEqual(15, aggregator.retention_time) - - def test_init_no_size_or_rention_time(self): - aggregator = conversions.AggregatorTransformer() - self.assertEqual(1, aggregator.size) - self.assertIsNone(aggregator.retention_time) - - def test_init_size_zero(self): - aggregator = conversions.AggregatorTransformer(size="0") - self.assertEqual(1, aggregator.size) - self.assertIsNone(aggregator.retention_time) - - def test_init_input_validation_size_invalid(self): - self.assertRaises(ValueError, conversions.AggregatorTransformer, - "abc", "15", None, None, None) - - def test_init_input_validation_retention_time_invalid(self): - self.assertRaises(ValueError, conversions.AggregatorTransformer, - "2", "abc", None, None, None) - - def test_init_no_timestamp(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None) - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_none(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, None) - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_first(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, "first") - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_last(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, "last") - self.assertEqual("last", aggregator.timestamp) - - def test_init_timestamp_invalid(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, - "invalid_option") - self.assertEqual("first", aggregator.timestamp) - - def test_size_unbounded(self): - aggregator = conversions.AggregatorTransformer(size="0", - retention_time="300") - self._insert_sample_data(aggregator) - - samples = aggregator.flush() - - self.assertEqual([], samples) - - def test_size_bounded(self): - aggregator = conversions.AggregatorTransformer(size="100") - self._insert_sample_data(aggregator) - - samples = aggregator.flush() - - self.assertEqual(100, len(samples)) - - def _insert_sample_data(self, aggregator): - for _ in range(100): - sample = copy.copy(self.SAMPLE) - sample.resource_id = sample.resource_id + str(self._sample_offset) - sample.timestamp = timeutils.isotime() - aggregator.handle_sample(sample) - self._sample_offset += 1 diff --git a/ceilometer/transformer/__init__.py b/ceilometer/transformer/__init__.py deleted file mode 100644 index 48d78b4d..00000000 --- a/ceilometer/transformer/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -import six - - -@six.add_metaclass(abc.ABCMeta) -class TransformerBase(object): - """Base class for plugins that transform the sample.""" - - def __init__(self, **kwargs): - """Setup transformer. - - Each time a transformed is involved in a pipeline, a new transformer - instance is created and chained into the pipeline. i.e. transformer - instance is per pipeline. This helps if transformer need keep some - cache and per-pipeline information. - - :param kwargs: The parameters that are defined in pipeline config file. - """ - super(TransformerBase, self).__init__() - - @abc.abstractmethod - def handle_sample(self, sample): - """Transform a sample. - - :param sample: A sample. - """ - - @abc.abstractproperty - def grouping_keys(self): - """Keys used to group transformer.""" - - @staticmethod - def flush(): - """Flush samples cached previously.""" - return [] - - -class Namespace(object): - """Encapsulates the namespace. - - Encapsulation is done by wrapping the evaluation of the configured rule. - This allows nested dicts to be accessed in the attribute style, - and missing attributes to yield false when used in a boolean expression. - """ - def __init__(self, seed): - self.__dict__ = collections.defaultdict(lambda: Namespace({})) - self.__dict__.update(seed) - for k, v in six.iteritems(self.__dict__): - if isinstance(v, dict): - self.__dict__[k] = Namespace(v) - - def __getattr__(self, attr): - return self.__dict__[attr] - - def __getitem__(self, key): - return self.__dict__[key] - - def __nonzero__(self): - return len(self.__dict__) > 0 - __bool__ = __nonzero__ diff --git a/ceilometer/transformer/accumulator.py b/ceilometer/transformer/accumulator.py deleted file mode 100644 index 1e14497c..00000000 --- a/ceilometer/transformer/accumulator.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright 2013 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer import transformer - - -class TransformerAccumulator(transformer.TransformerBase): - """Transformer that accumulates samples until a threshold. - - And then flushes them out into the wild. - """ - - grouping_keys = ['resource_id'] - - def __init__(self, size=1, **kwargs): - if size >= 1: - self.samples = [] - self.size = size - super(TransformerAccumulator, self).__init__(**kwargs) - - def handle_sample(self, sample): - if self.size >= 1: - self.samples.append(sample) - else: - return sample - - def flush(self): - if len(self.samples) >= self.size: - x = self.samples - self.samples = [] - return x - return [] diff --git a/ceilometer/transformer/arithmetic.py b/ceilometer/transformer/arithmetic.py deleted file mode 100644 index d0bbccb7..00000000 --- a/ceilometer/transformer/arithmetic.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import keyword -import math -import re - -from oslo_log import log -import six - -from ceilometer.i18n import _ -from ceilometer import sample -from ceilometer import transformer - -LOG = log.getLogger(__name__) - - -class ArithmeticTransformer(transformer.TransformerBase): - """Multi meter arithmetic transformer. - - Transformer that performs arithmetic operations - over one or more meters and/or their metadata. - """ - - grouping_keys = ['resource_id'] - - meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') - - def __init__(self, target=None, **kwargs): - super(ArithmeticTransformer, self).__init__(**kwargs) - target = target or {} - self.target = target - self.expr = target.get('expr', '') - self.expr_escaped, self.escaped_names = self.parse_expr(self.expr) - self.required_meters = list(self.escaped_names.values()) - self.misconfigured = len(self.required_meters) == 0 - if not self.misconfigured: - self.reference_meter = self.required_meters[0] - # convert to set for more efficient contains operation - self.required_meters = set(self.required_meters) - self.cache = collections.defaultdict(dict) - self.latest_timestamp = None - else: - LOG.warning(_('Arithmetic transformer must use at least one' - ' meter in expression \'%s\''), self.expr) - - def _update_cache(self, _sample): - """Update the cache with the latest sample.""" - escaped_name = self.escaped_names.get(_sample.name, '') - if escaped_name not in self.required_meters: - return - self.cache[_sample.resource_id][escaped_name] = _sample - - def _check_requirements(self, resource_id): - """Check if all the required meters are available in the cache.""" - return len(self.cache[resource_id]) == len(self.required_meters) - - def _calculate(self, resource_id): - """Evaluate the expression and return a new sample if successful.""" - ns_dict = dict((m, s.as_dict()) for m, s - in six.iteritems(self.cache[resource_id])) - ns = transformer.Namespace(ns_dict) - try: - new_volume = eval(self.expr_escaped, {}, ns) - if math.isnan(new_volume): - raise ArithmeticError(_('Expression evaluated to ' - 'a NaN value!')) - - reference_sample = self.cache[resource_id][self.reference_meter] - return sample.Sample( - name=self.target.get('name', reference_sample.name), - unit=self.target.get('unit', reference_sample.unit), - type=self.target.get('type', reference_sample.type), - volume=float(new_volume), - user_id=reference_sample.user_id, - project_id=reference_sample.project_id, - resource_id=reference_sample.resource_id, - timestamp=self.latest_timestamp, - resource_metadata=reference_sample.resource_metadata - ) - except Exception as e: - LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'), - {'expr': self.expr, 'exc': e}) - - def handle_sample(self, _sample): - self._update_cache(_sample) - self.latest_timestamp = _sample.timestamp - - def flush(self): - new_samples = [] - cache_clean_list = [] - if not self.misconfigured: - for resource_id in self.cache: - if self._check_requirements(resource_id): - new_samples.append(self._calculate(resource_id)) - cache_clean_list.append(resource_id) - for res_id in cache_clean_list: - self.cache.pop(res_id) - return new_samples - - @classmethod - def parse_expr(cls, expr): - """Transforms meter names in the expression into valid identifiers. - - :param expr: unescaped expression - :return: A tuple of the escaped expression and a dict representing - the translation of meter names into Python identifiers - """ - - class Replacer(object): - """Replaces matched meter names with escaped names. - - If the meter name is not followed by parameter access in the - expression, it defaults to accessing the 'volume' parameter. - """ - - def __init__(self, original_expr): - self.original_expr = original_expr - self.escaped_map = {} - - def __call__(self, match): - meter_name = match.group(1) - escaped_name = self.escape(meter_name) - self.escaped_map[meter_name] = escaped_name - - if (match.end(0) == len(self.original_expr) or - self.original_expr[match.end(0)] != '.'): - escaped_name += '.volume' - return escaped_name - - @staticmethod - def escape(name): - has_dot = '.' in name - if has_dot: - name = name.replace('.', '_') - - if has_dot or name.endswith('ESC') or name in keyword.kwlist: - name = "_" + name + '_ESC' - return name - - replacer = Replacer(expr) - expr = re.sub(cls.meter_name_re, replacer, expr) - return expr, replacer.escaped_map diff --git a/ceilometer/transformer/conversions.py b/ceilometer/transformer/conversions.py deleted file mode 100644 index f4ea252e..00000000 --- a/ceilometer/transformer/conversions.py +++ /dev/null @@ -1,340 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import re - -from oslo_log import log -from oslo_utils import timeutils -import six - -from ceilometer.i18n import _, _LW -from ceilometer import sample -from ceilometer import transformer - -LOG = log.getLogger(__name__) - - -class BaseConversionTransformer(transformer.TransformerBase): - """Transformer to derive conversion.""" - - grouping_keys = ['resource_id'] - - def __init__(self, source=None, target=None, **kwargs): - """Initialize transformer with configured parameters. - - :param source: dict containing source sample unit - :param target: dict containing target sample name, type, - unit and scaling factor (a missing value - connotes no change) - """ - source = source or {} - target = target or {} - self.source = source - self.target = target - super(BaseConversionTransformer, self).__init__(**kwargs) - - def _map(self, s, attr): - """Apply the name or unit mapping if configured.""" - mapped = None - from_ = self.source.get('map_from') - to_ = self.target.get('map_to') - if from_ and to_: - if from_.get(attr) and to_.get(attr): - try: - mapped = re.sub(from_[attr], to_[attr], getattr(s, attr)) - except Exception: - pass - return mapped or self.target.get(attr, getattr(s, attr)) - - -class DeltaTransformer(BaseConversionTransformer): - """Transformer based on the delta of a sample volume.""" - - def __init__(self, target=None, growth_only=False, **kwargs): - """Initialize transformer with configured parameters. - - :param growth_only: capture only positive deltas - """ - super(DeltaTransformer, self).__init__(target=target, **kwargs) - self.growth_only = growth_only - self.cache = {} - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - key = s.name + s.resource_id - prev = self.cache.get(key) - timestamp = timeutils.parse_isotime(s.timestamp) - self.cache[key] = (s.volume, timestamp) - - if prev: - prev_volume = prev[0] - prev_timestamp = prev[1] - time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) - # disallow violations of the arrow of time - if time_delta < 0: - LOG.warning(_LW('Dropping out of time order sample: %s'), (s,)) - # Reset the cache to the newer sample. - self.cache[key] = prev - return None - volume_delta = s.volume - prev_volume - if self.growth_only and volume_delta < 0: - LOG.warning(_LW('Negative delta detected, dropping value')) - s = None - else: - s = self._convert(s, volume_delta) - LOG.debug('Converted to: %s', s) - else: - LOG.warning(_LW('Dropping sample with no predecessor: %s'), (s,)) - s = None - return s - - def _convert(self, s, delta): - """Transform the appropriate sample fields.""" - return sample.Sample( - name=self._map(s, 'name'), - unit=s.unit, - type=sample.TYPE_DELTA, - volume=delta, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - resource_metadata=s.resource_metadata - ) - - -class ScalingTransformer(BaseConversionTransformer): - """Transformer to apply a scaling conversion.""" - - def __init__(self, source=None, target=None, **kwargs): - """Initialize transformer with configured parameters. - - :param source: dict containing source sample unit - :param target: dict containing target sample name, type, - unit and scaling factor (a missing value - connotes no change) - """ - super(ScalingTransformer, self).__init__(source=source, target=target, - **kwargs) - self.scale = self.target.get('scale') - LOG.debug('scaling conversion transformer with source:' - ' %(source)s target: %(target)s:', {'source': self.source, - 'target': self.target}) - - def _scale(self, s): - """Apply the scaling factor. - - Either a straight multiplicative factor or else a string to be eval'd. - """ - ns = transformer.Namespace(s.as_dict()) - - scale = self.scale - return ((eval(scale, {}, ns) if isinstance(scale, six.string_types) - else s.volume * scale) if scale else s.volume) - - def _convert(self, s, growth=1): - """Transform the appropriate sample fields.""" - return sample.Sample( - name=self._map(s, 'name'), - unit=self._map(s, 'unit'), - type=self.target.get('type', s.type), - volume=self._scale(s) * growth, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - resource_metadata=s.resource_metadata - ) - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - LOG.debug('handling sample %s', s) - if self.source.get('unit', s.unit) == s.unit: - s = self._convert(s) - LOG.debug('converted to: %s', s) - return s - - -class RateOfChangeTransformer(ScalingTransformer): - """Transformer based on the rate of change of a sample volume. - - For example, taking the current and previous volumes of a cumulative sample - and producing a gauge value based on the proportion of some maximum used. - """ - - def __init__(self, **kwargs): - """Initialize transformer with configured parameters.""" - super(RateOfChangeTransformer, self).__init__(**kwargs) - self.cache = {} - self.scale = self.scale or '1' - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - LOG.debug('handling sample %s', s) - key = s.name + s.resource_id - prev = self.cache.get(key) - timestamp = timeutils.parse_isotime(s.timestamp) - self.cache[key] = (s.volume, timestamp) - - if prev: - prev_volume = prev[0] - prev_timestamp = prev[1] - time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) - # disallow violations of the arrow of time - if time_delta < 0: - LOG.warning(_('dropping out of time order sample: %s'), (s,)) - # Reset the cache to the newer sample. - self.cache[key] = prev - return None - # we only allow negative volume deltas for noncumulative - # samples, whereas for cumulative we assume that a reset has - # occurred in the interim so that the current volume gives a - # lower bound on growth - volume_delta = (s.volume - prev_volume - if (prev_volume <= s.volume or - s.type != sample.TYPE_CUMULATIVE) - else s.volume) - rate_of_change = ((1.0 * volume_delta / time_delta) - if time_delta else 0.0) - - s = self._convert(s, rate_of_change) - LOG.debug('converted to: %s', s) - else: - LOG.warning(_('dropping sample with no predecessor: %s'), - (s,)) - s = None - return s - - -class AggregatorTransformer(ScalingTransformer): - """Transformer that aggregates samples. - - Aggregation goes until a threshold or/and a retention_time, and then - flushes them out into the wild. - - Example: - To aggregate sample by resource_metadata and keep the - resource_metadata of the latest received sample; - - AggregatorTransformer(retention_time=60, resource_metadata='last') - - To aggregate sample by user_id and resource_metadata and keep the - user_id of the first received sample and drop the resource_metadata. - - AggregatorTransformer(size=15, user_id='first', - resource_metadata='drop') - - To keep the timestamp of the last received sample rather - than the first: - - AggregatorTransformer(timestamp="last") - - """ - - def __init__(self, size=1, retention_time=None, - project_id=None, user_id=None, resource_metadata="last", - timestamp="first", **kwargs): - super(AggregatorTransformer, self).__init__(**kwargs) - self.samples = {} - self.counts = collections.defaultdict(int) - self.size = int(size) if size else None - self.retention_time = float(retention_time) if retention_time else None - if not (self.size or self.retention_time): - self.size = 1 - - if timestamp in ["first", "last"]: - self.timestamp = timestamp - else: - self.timestamp = "first" - - self.initial_timestamp = None - self.aggregated_samples = 0 - - self.key_attributes = [] - self.merged_attribute_policy = {} - - self._init_attribute('project_id', project_id) - self._init_attribute('user_id', user_id) - self._init_attribute('resource_metadata', resource_metadata, - is_droppable=True, mandatory=True) - - def _init_attribute(self, name, value, is_droppable=False, - mandatory=False): - drop = ['drop'] if is_droppable else [] - if value or mandatory: - if value not in ['last', 'first'] + drop: - LOG.warning('%s is unknown (%s), using last' % (name, value)) - value = 'last' - self.merged_attribute_policy[name] = value - else: - self.key_attributes.append(name) - - def _get_unique_key(self, s): - # NOTE(arezmerita): in samples generated by ceilometer middleware, - # when accessing without authentication publicly readable/writable - # swift containers, the project_id and the user_id are missing. - # They will be replaced by for unique key construction. - keys = ['' if getattr(s, f) is None else getattr(s, f) - for f in self.key_attributes] - non_aggregated_keys = "-".join(keys) - # NOTE(sileht): it assumes, a meter always have the same unit/type - return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) - - def handle_sample(self, sample_): - if not self.initial_timestamp: - self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp) - - self.aggregated_samples += 1 - key = self._get_unique_key(sample_) - self.counts[key] += 1 - if key not in self.samples: - self.samples[key] = self._convert(sample_) - if self.merged_attribute_policy[ - 'resource_metadata'] == 'drop': - self.samples[key].resource_metadata = {} - else: - if self.timestamp == "last": - self.samples[key].timestamp = sample_.timestamp - if sample_.type == sample.TYPE_CUMULATIVE: - self.samples[key].volume = self._scale(sample_) - else: - self.samples[key].volume += self._scale(sample_) - for field in self.merged_attribute_policy: - if self.merged_attribute_policy[field] == 'last': - setattr(self.samples[key], field, - getattr(sample_, field)) - - def flush(self): - if not self.initial_timestamp: - return [] - - expired = (self.retention_time and - timeutils.is_older_than(self.initial_timestamp, - self.retention_time)) - full = self.size and self.aggregated_samples >= self.size - if full or expired: - x = list(self.samples.values()) - # gauge aggregates need to be averages - for s in x: - if s.type == sample.TYPE_GAUGE: - key = self._get_unique_key(s) - s.volume /= self.counts[key] - self.samples.clear() - self.counts.clear() - self.aggregated_samples = 0 - self.initial_timestamp = None - return x - return [] diff --git a/ceilometer/utils.py b/ceilometer/utils.py index a4495da7..244490fc 100644 --- a/ceilometer/utils.py +++ b/ceilometer/utils.py @@ -18,45 +18,16 @@ """Utilities and helper functions.""" -import bisect import calendar import copy import datetime import decimal -import hashlib -import struct -import threading -from oslo_concurrency import processutils -from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import units import six -OPTS = [ - cfg.StrOpt('rootwrap_config', - default="/etc/ceilometer/rootwrap.conf", - help='Path to the rootwrap configuration file to' - 'use for running commands as root'), -] -CONF = cfg.CONF -CONF.register_opts(OPTS) - -EPOCH_TIME = datetime.datetime(1970, 1, 1) - - -def _get_root_helper(): - return 'sudo ceilometer-rootwrap %s' % CONF.rootwrap_config - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method.""" - if 'run_as_root' in kwargs and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - return processutils.execute(*cmd, **kwargs) - - def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): @@ -93,19 +64,6 @@ def recursive_keypairs(d, separator=':'): yield name, value -def restore_nesting(d, separator=':'): - """Unwinds a flattened dict to restore nesting.""" - d = copy.copy(d) if any([separator in k for k in d.keys()]) else d - for k, v in d.copy().items(): - if separator in k: - top, rem = k.split(separator, 1) - nest = d[top] if isinstance(d.get(top), dict) else {} - nest[rem] = v - d[top] = restore_nesting(nest, separator) - del d[k] - return d - - def dt_to_decimal(utc): """Datetime to Decimal. @@ -141,13 +99,6 @@ def sanitize_timestamp(timestamp): return timeutils.normalize_time(timestamp) -def stringify_timestamps(data): - """Stringify any datetime in given dict.""" - isa_timestamp = lambda v: isinstance(v, datetime.datetime) - return dict((k, v.isoformat() if isa_timestamp(v) else v) - for (k, v) in six.iteritems(data)) - - def dict_to_keyval(value, key_base=None): """Expand a given dict to its corresponding key-value pairs. @@ -172,21 +123,6 @@ def dict_to_keyval(value, key_base=None): yield key_gen, v -def lowercase_keys(mapping): - """Converts the values of the keys in mapping to lowercase.""" - items = mapping.items() - for key, value in items: - del mapping[key] - mapping[key.lower()] = value - - -def lowercase_values(mapping): - """Converts the values in the mapping dict to lowercase.""" - items = mapping.items() - for key, value in items: - mapping[key] = value.lower() - - def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. @@ -200,65 +136,3 @@ def update_nested(original_dict, updates): else: dict_to_update[key] = updates[key] return dict_to_update - - -def uniq(dupes, attrs): - """Exclude elements of dupes with a duplicated set of attribute values.""" - key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs]) - keys = [] - deduped = [] - for d in dupes: - if key(d) not in keys: - deduped.append(d) - keys.append(key(d)) - return deduped - - -def hash_of_set(s): - return str(hash(frozenset(s))) - - -class HashRing(object): - - def __init__(self, nodes, replicas=100): - self._ring = dict() - self._sorted_keys = [] - - for node in nodes: - for r in six.moves.range(replicas): - hashed_key = self._hash('%s-%s' % (node, r)) - self._ring[hashed_key] = node - self._sorted_keys.append(hashed_key) - self._sorted_keys.sort() - - @staticmethod - def _hash(key): - return struct.unpack_from('>I', - hashlib.md5(str(key).encode()).digest())[0] - - def _get_position_on_ring(self, key): - hashed_key = self._hash(key) - position = bisect.bisect(self._sorted_keys, hashed_key) - return position if position < len(self._sorted_keys) else 0 - - def get_node(self, key): - if not self._ring: - return None - pos = self._get_position_on_ring(key) - return self._ring[self._sorted_keys[pos]] - - -def kill_listeners(listeners): - # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), - # which stops new messages, and wait(), which processes remaining - # messages and closes connection - for listener in listeners: - listener.stop() - listener.wait() - - -def spawn_thread(target, *args, **kwargs): - t = threading.Thread(target=target, args=args, kwargs=kwargs) - t.daemon = True - t.start() - return t diff --git a/devstack/README.rst b/devstack/README.rst index 0c99a7e9..2499aa67 100644 --- a/devstack/README.rst +++ b/devstack/README.rst @@ -12,11 +12,6 @@ Enabling Ceilometer in DevStack [[local|localrc]] enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - To use stable branches, make sure devstack is on that branch, and specify - the branch name to enable_plugin, for example:: - - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer stable/mitaka - There are some options, such as CEILOMETER_BACKEND, defined in ``ceilometer/devstack/settings``, they can be used to configure the installation of Ceilometer. If you don't want to use their default value, diff --git a/devstack/files/rpms/ceilometer b/devstack/files/rpms/ceilometer deleted file mode 100644 index 9c87c401..00000000 --- a/devstack/files/rpms/ceilometer +++ /dev/null @@ -1 +0,0 @@ -selinux-policy-targeted diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 7e508b24..d5f4ff1e 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -6,38 +6,10 @@ # [[local|localrc]] # enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # -# By default all ceilometer services are started (see devstack/settings) -# except for the ceilometer-aipmi service. To disable a specific service -# use the disable_service function. -# -# NOTE: Currently, there are two ways to get the IPMI based meters in -# OpenStack. One way is to configure Ironic conductor to report those meters -# for the nodes managed by Ironic and to have Ceilometer notification -# agent to collect them. Ironic by default does NOT enable that reporting -# functionality. So in order to do so, users need to set the option of -# conductor.send_sensor_data to true in the ironic.conf configuration file -# for the Ironic conductor service, and also enable the -# ceilometer-anotification service. -# -# The other way is to use Ceilometer ipmi agent only to get the IPMI based -# meters. To make use of the Ceilometer ipmi agent, it must be explicitly -# enabled with the following setting: -# -# enable_service ceilometer-aipmi -# -# To avoid duplicated meters, users need to make sure to set the -# option of conductor.send_sensor_data to false in the ironic.conf -# configuration file if the node on which Ceilometer ipmi agent is running -# is also managed by Ironic. -# # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # -# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') -# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. -# CEILOMETER_EVENTS: Set to True to enable event collection -# CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -51,13 +23,6 @@ else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi -# Test if any Ceilometer services are enabled -# is_ceilometer_enabled -function is_ceilometer_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 - return 1 -} - function ceilometer_service_url { echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" } @@ -85,20 +50,6 @@ function _ceilometer_install_mongodb { sleep 5 } -# _ceilometer_install_redis() - Install the redis server and python lib. -function _ceilometer_install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - restart_service redis - fi - - pip_install_gr redis -} - # Configure mod_wsgi function _ceilometer_config_apache_wsgi { sudo mkdir -p $CEILOMETER_WSGI_DIR @@ -124,15 +75,6 @@ function _ceilometer_config_apache_wsgi { " -i $ceilometer_apache_conf } -# Install required services for coordination -function _ceilometer_prepare_coordination { - if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then - install_package memcached - elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then - _ceilometer_install_redis - fi -} - # Install required services for storage backends function _ceilometer_prepare_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then @@ -147,21 +89,6 @@ function _ceilometer_prepare_storage_backend { } -# Install the python modules for inspecting nova virt instances -function _ceilometer_prepare_virt_drivers { - # Only install virt drivers if we're running nova compute - if is_service_enabled n-cpu ; then - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - pip_install_gr libvirt-python - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_install_gr oslo.vmware - fi - fi -} - - # Create ceilometer related accounts in Keystone function _ceilometer_create_accounts { if is_service_enabled ceilometer-api; then @@ -174,11 +101,6 @@ function _ceilometer_create_accounts { "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" - - if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access Swift account stats. - get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME - fi fi } @@ -197,7 +119,7 @@ function _ceilometer_cleanup_apache_wsgi { } function _drop_database { - if is_service_enabled ceilometer-collector ceilometer-api ; then + if is_service_enabled ceilometer-api ; then if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then @@ -215,52 +137,16 @@ function cleanup_ceilometer { sudo rmdir "$CEILOMETER_CONF_DIR" } -# Set configuraiton for cache backend. -# NOTE(cdent): This currently only works for redis. Still working -# out how to express the other backends. -function _ceilometer_configure_cache_backend { - iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND - iniset $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL - iniadd_literal $CEILOMETER_CONF cache backend_argument distributed_lock:True - if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then - iniadd_literal $CEILOMETER_CONF cache backend_argument db:0 - iniadd_literal $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 - fi -} - - # Set configuration for storage backend. function _ceilometer_configure_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then # es is only supported for events. we will use sql for metering. iniset $CEILOMETER_CONF database event_connection es://localhost:9200 - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) ${TOP_DIR}/pkg/elasticsearch.sh start elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer - iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer - elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then - gnocchi_url=$(gnocchi_service_url) - iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi - # FIXME(sileht): We shouldn't load event_dispatchers if store_event is False - iniset $CEILOMETER_CONF DEFAULT event_dispatchers "" - iniset $CEILOMETER_CONF notification store_events False - # NOTE(gordc): set higher retry in case gnocchi is started after ceilometer on a slow machine - iniset $CEILOMETER_CONF storage max_retries 20 - # NOTE(gordc): set batching to better handle recording on a slow machine - iniset $CEILOMETER_CONF collector batch_size 50 - iniset $CEILOMETER_CONF collector batch_timeout 5 - iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url - iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" - else - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" - fi else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi @@ -272,80 +158,29 @@ function configure_ceilometer { local conffile - iniset_rpc_backend ceilometer $CEILOMETER_CONF - - iniset $CEILOMETER_CONF oslo_messaging_notifications topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then - iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL - iniset $CEILOMETER_CONF compute workload_partitioning True - iniset $CEILOMETER_CONF notification workload_partitioning True - iniset $CEILOMETER_CONF notification workers $API_WORKERS - fi - - if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then - _ceilometer_configure_cache_backend - fi - # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. - for conffile in policy.json api_paste.ini pipeline.yaml \ - event_definitions.yaml event_pipeline.yaml \ - gnocchi_resources.yaml; do + for conffile in policy.json api_paste.ini; do cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR done - if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then - sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml - fi - if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then - if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then - sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml - fi - fi - - # The compute and central agents need these credentials in order to - # call out to other services' public APIs. - iniset $CEILOMETER_CONF service_credentials auth_type password - iniset $CEILOMETER_CONF service_credentials user_domain_id default - iniset $CEILOMETER_CONF service_credentials project_domain_id default - iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME - iniset $CEILOMETER_CONF service_credentials username ceilometer - iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD - iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME - iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI - configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR - iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS - # Configure storage - if is_service_enabled ceilometer-collector ceilometer-api; then + if is_service_enabled ceilometer-api; then _ceilometer_configure_storage_backend - iniset $CEILOMETER_CONF collector workers $API_WORKERS - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere - iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" fi if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then iniset $CEILOMETER_CONF api pecan_debug "False" _ceilometer_config_apache_wsgi fi - - if is_service_enabled ceilometer-aipmi; then - # Configure rootwrap for the ipmi agent - configure_rootwrap ceilometer - fi } # init_ceilometer() - Initialize etc. @@ -356,7 +191,7 @@ function init_ceilometer { sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if is_service_enabled ceilometer-collector ceilometer-api && is_service_enabled mysql postgresql ; then + if is_service_enabled ceilometer-api && is_service_enabled mysql postgresql ; then if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then recreate_database ceilometer $CEILOMETER_BIN_DIR/ceilometer-dbsync @@ -365,45 +200,17 @@ function init_ceilometer { } # Install Ceilometer. -# The storage and coordination backends are installed here because the -# virtualenv context is active at this point and python drivers need to be -# installed. The context is not active during preinstall (when it would -# otherwise makes sense to do the backend services). function install_ceilometer { - if is_service_enabled ceilometer-acentral ceilometer-acompute ceilometer-anotification ; then - _ceilometer_prepare_coordination - fi - - if is_service_enabled ceilometer-collector ceilometer-api; then + if is_service_enabled ceilometer-api; then _ceilometer_prepare_storage_backend fi - if is_service_enabled ceilometer-acompute ; then - _ceilometer_prepare_virt_drivers - fi - - install_ceilometerclient setup_develop $CEILOMETER_DIR sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR } -# install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient { - if use_library_from_git "python-ceilometerclient"; then - git_clone_by_name "python-ceilometerclient" - setup_dev_lib "python-ceilometerclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion - else - pip_install_gr python-ceilometerclient - fi -} - # start_ceilometer() - Start running processes, including screen function start_ceilometer { - run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" - run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" - if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --config-file $CEILOMETER_CONF" elif is_service_enabled ceilometer-api; then @@ -413,19 +220,6 @@ function start_ceilometer { tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log fi - # run the collector after restarting apache as it needs - # operational keystone if using gnocchi - run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" - - # Start the compute agent late to allow time for the collector to - # fully wake up and connect to the message bus. See bug #1355809 - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP - fi - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" - fi - # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." @@ -445,11 +239,6 @@ function stop_ceilometer { stop_process ceilometer-api fi fi - - # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector; do - stop_process $serv - done } # This is the main for plugin.sh diff --git a/devstack/settings b/devstack/settings index 06c7c0ce..6cb91a58 100644 --- a/devstack/settings +++ b/devstack/settings @@ -1,11 +1,3 @@ -# turn on all the ceilometer services by default (except for ipmi pollster) -# Pollsters -enable_service ceilometer-acompute ceilometer-acentral -# Notification Agent -enable_service ceilometer-anotification -# Data Collector -enable_service ceilometer-collector -# API service enable_service ceilometer-api # Default directories @@ -18,38 +10,12 @@ CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} -# Gnocchi default archive_policy for Ceilometer -GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} - # Ceilometer connection info. CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} -# To enable OSprofiler change value of this variable to "notifications,profiler" -CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} -CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} - -CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} -CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} - -# Cache Options -# NOTE(cdent): These are incomplete and specific for this testing. -CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} -CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} - -CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ceilometer - -# Set up default directories for client and middleware -GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} -GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} -GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient -GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware - # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings index 6a2dad94..53c93ef6 100644 --- a/devstack/upgrade/settings +++ b/devstack/upgrade/settings @@ -1,7 +1,7 @@ register_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest +devstack_localrc base enable_service ceilometer-api tempest devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest +devstack_localrc target enable_service ceilometer-api tempest diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh index ec0e692b..74d7d99a 100755 --- a/devstack/upgrade/shutdown.sh +++ b/devstack/upgrade/shutdown.sh @@ -22,6 +22,6 @@ stop_ceilometer # ensure everything is stopped -SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api" +SERVICES_DOWN="ceilometer-api" ensure_services_stopped $SERVICES_DOWN diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh index ee4240b0..61fd0d75 100755 --- a/devstack/upgrade/upgrade.sh +++ b/devstack/upgrade/upgrade.sh @@ -73,15 +73,7 @@ $CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error" # Start Ceilometer start_ceilometer -# Note these are process names, not service names -# Note(liamji): Disable the test for -# "ceilometer-polling --polling-namespaces ipmi". In the test environment, -# the impi is not ready. The ceilometer-polling should fail. -ensure_services_started "ceilometer-polling --polling-namespaces compute" \ - "ceilometer-polling --polling-namespaces central" \ - ceilometer-agent-notification \ - ceilometer-api \ - ceilometer-collector +ensure_services_started ceilometer-api # Save mongodb state (replace with snapshot) if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then diff --git a/doc/source/1-agents.png b/doc/source/1-agents.png deleted file mode 100644 index 3cfa8701..00000000 Binary files a/doc/source/1-agents.png and /dev/null differ diff --git a/doc/source/2-1-collection-notification.png b/doc/source/2-1-collection-notification.png deleted file mode 100644 index f4096af6..00000000 Binary files a/doc/source/2-1-collection-notification.png and /dev/null differ diff --git a/doc/source/2-2-collection-poll.png b/doc/source/2-2-collection-poll.png deleted file mode 100644 index 30383811..00000000 Binary files a/doc/source/2-2-collection-poll.png and /dev/null differ diff --git a/doc/source/2-accessmodel.png b/doc/source/2-accessmodel.png deleted file mode 100644 index 39ef2670..00000000 Binary files a/doc/source/2-accessmodel.png and /dev/null differ diff --git a/doc/source/3-Pipeline.png b/doc/source/3-Pipeline.png deleted file mode 100644 index 5948d1c4..00000000 Binary files a/doc/source/3-Pipeline.png and /dev/null differ diff --git a/doc/source/4-Transformer.png b/doc/source/4-Transformer.png deleted file mode 100644 index 4aa24059..00000000 Binary files a/doc/source/4-Transformer.png and /dev/null differ diff --git a/doc/source/5-multi-publish.png b/doc/source/5-multi-publish.png deleted file mode 100644 index 8a373466..00000000 Binary files a/doc/source/5-multi-publish.png and /dev/null differ diff --git a/doc/source/6-storagemodel.png b/doc/source/6-storagemodel.png deleted file mode 100644 index f72ece62..00000000 Binary files a/doc/source/6-storagemodel.png and /dev/null differ diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index 2460efe8..00000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,243 +0,0 @@ -.. _architecture: - -===================== - System Architecture -===================== - -.. index:: - single: agent; architecture - double: compute agent; architecture - double: collector; architecture - double: data store; architecture - double: database; architecture - double: API; architecture - -High-Level Architecture -======================= - -.. The source for the following diagram can be found at: https://docs.google.com/presentation/d/1XiOiaq9zI_DIpxY1tlkysg9VAEw2r8aYob0bjG71pNg/edit?usp=sharing - -.. figure:: ./ceilo-arch.png - :width: 100% - :align: center - :alt: Architecture summary - - An overall summary of Ceilometer's logical architecture. - -Each of Ceilometer's services are designed to scale horizontally. Additional -workers and nodes can be added depending on the expected load. Ceilometer -offers three core services, the data agents designed to work independently from -collection, but also designed to work together as a complete solution: - -1. polling agent - daemon designed to poll OpenStack services and build Meters. -2. notification agent - daemon designed to listen to notifications on message queue, - convert them to Events and Samples, and apply pipeline actions. -3. (optional) collector - daemon designed to gather and record event and metering data - created by notification and polling agents (if using Gnocchi or full-fidelity storage). -4. (optional) api - service to query and view data recorded by collector - in internal full-fidelity database (if enabled). - -As Ceilometer has grown to capture more data, it became apparent that data -storage would need to be optimised. To address this, Gnocchi_ (resource metering -as a service) was developed to capture the data in a time series database to -optimise storage and querying. Gnocchi is intended to replace the existing -metering database interface. - -.. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ - -.. figure:: ./ceilo-gnocchi-arch.png - :width: 100% - :align: center - :alt: Ceilometer+Gnocchi Architecture summary - - An overall summary of Ceilometer+Gnocchi's logical architecture. - - -Gathering the data -================== - -How is data collected? ----------------------- - -.. figure:: ./1-agents.png - :width: 100% - :align: center - :alt: Collectors and agents - - This is a representation of how the collectors and agents gather data from - multiple sources. - -The Ceilometer project created 2 methods to collect data: - -1. :term:`Bus listener agent` which takes events generated on the - notification bus and transforms them into Ceilometer samples. This - is **the preferred method** of data collection. If you are working on some - OpenStack related project and are using the Oslo library, you are kindly - invited to come and talk to one of the project members to learn how you - could quickly add instrumentation for your project. -2. :term:`Polling agents`, which is the less preferred method, will poll - some API or other tool to collect information at a regular interval. - The polling approach is less preferred due to the load it can impose - on the API services. - -The first method is supported by the ceilometer-notification agent, which -monitors the message queues for notifications. Polling agents can be configured -either to poll the local hypervisor or remote APIs (public REST APIs exposed by -services and host-level SNMP/IPMI daemons). - -Notification Agents: Listening for data ---------------------------------------- - -.. index:: - double: notifications; architecture - -.. figure:: ./2-1-collection-notification.png - :width: 100% - :align: center - :alt: Notification agents - - Notification agents consuming messages from services. - -The heart of the system is the notification daemon (agent-notification) -which monitors the message bus for data being provided by other -OpenStack components such as Nova, Glance, Cinder, Neutron, Swift, Keystone, -and Heat, as well as Ceilometer internal communication. - -The notification daemon loads one or more *listener* plugins, using the -namespace ``ceilometer.notification``. Each plugin can listen to any topics, -but by default it will listen to ``notifications.info``. The listeners grab -messages off the defined topics and redistributes them to the appropriate -plugins(endpoints) to be processed into Events and Samples. - -Sample-oriented plugins provide a method to list the event types they're interested -in and a callback for processing messages accordingly. The registered name of the -callback is used to enable or disable it using the pipeline of the notification -daemon. The incoming messages are filtered based on their event type value before -being passed to the callback so the plugin only receives events it has -expressed an interest in seeing. For example, a callback asking for -``compute.instance.create.end`` events under -``ceilometer.compute.notifications`` would be invoked for those notification -events on the ``nova`` exchange using the ``notifications.info`` topic. Event -matching can also work using wildcards e.g. ``compute.instance.*``. - -.. _polling: - -Polling Agents: Asking for data -------------------------------- - -.. index:: - double: polling; architecture - -.. figure:: ./2-2-collection-poll.png - :width: 100% - :align: center - :alt: Polling agents - - Polling agents querying services for data. - -Polling for compute resources is handled by a polling agent running -on the compute node (where communication with the hypervisor is more -efficient), often referred to as the compute-agent. Polling via -service APIs for non-compute resources is handled by an agent running -on a cloud controller node, often referred to the central-agent. -A single agent can fulfill both roles in an all-in-one deployment. -Conversely, multiple instances of an agent may be deployed, in -which case the workload is shared. The polling agent -daemon is configured to run one or more *pollster* plugins using either the -``ceilometer.poll.compute`` and/or ``ceilometer.poll.central`` namespaces. - -The agents periodically ask each pollster for instances of -``Sample`` objects. The frequency of polling is controlled via the pipeline -configuration. See :ref:`Pipeline-Configuration` for details. -The agent framework then passes the samples to the notification agent for processing. - - -Processing the data -=================== - -.. _multi-publisher: - -Pipeline Manager ----------------- - -.. figure:: ./3-Pipeline.png - :width: 100% - :align: center - :alt: Ceilometer pipeline - - The assembly of components making the Ceilometer pipeline. - -Ceilometer offers the ability to take data gathered by the agents, manipulate -it, and publish it in various combinations via multiple pipelines. This -functionality is handled by the notification agents. - -Transforming the data ---------------------- - -.. figure:: ./4-Transformer.png - :width: 100% - :align: center - :alt: Transformer example - - Example of aggregation of multiple cpu time usage samples in a single - cpu percentage sample. - -The data gathered from the polling and notifications agents contains a wealth -of data and if combined with historical or temporal context, can be used to -derive even more data. Ceilometer offers various transformers which can be used -to manipulate data in the pipeline. - -Publishing the data -------------------- - -.. figure:: ./5-multi-publish.png - :width: 100% - :align: center - :alt: Multi-publish - - This figure shows how a sample can be published to multiple destinations. - -Currently, processed data can be published using 5 different transports: -notifier, a notification based publisher which pushes samples to a message -queue which can be consumed by the collector or an external system; udp, which -publishes samples using UDP packets; http, which targets a REST interface; -and kafka, which publishes data to a Kafka message queue to be consumed by any -system that supports Kafka. - - -Storing the data -================ - -Collector Service ------------------ - -The collector daemon gathers the processed event and metering data captured by -the notification and polling agents. It validates the incoming data and (if -the signature is valid) then writes the messages to a declared target: -database, file, gnocchi or http. - -More details on database and Gnocchi targets can be found in the install guide. - - -Accessing the data -================== - -API Service ------------ - -If the collected data from polling and notification agents are stored in Ceilometer's -database(s) (see the section :ref:`choosing_db_backend`), a REST API is available -to access the collected data rather than by accessing the underlying database directly. - -.. figure:: ./2-accessmodel.png - :width: 100% - :align: center - :alt: data access model - - This is a representation of how to access data stored by Ceilometer - -Moreover, end users can also -:ref:`send their own application specific data ` into the -database through the REST API for a various set of use cases. - -.. _send their own application centric data: ./webapi/v2.html#user-defined-data diff --git a/doc/source/ceilo-arch.png b/doc/source/ceilo-arch.png deleted file mode 100644 index 7a3b4250..00000000 Binary files a/doc/source/ceilo-arch.png and /dev/null differ diff --git a/doc/source/ceilo-gnocchi-arch.png b/doc/source/ceilo-gnocchi-arch.png deleted file mode 100644 index b513b623..00000000 Binary files a/doc/source/ceilo-gnocchi-arch.png and /dev/null differ diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst deleted file mode 100644 index 8e074779..00000000 --- a/doc/source/configuration.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================= - Configuration Options -======================= - -For the list and description of configuration options that can be set for Ceilometer in -order to set up the services please see the -`Telemetry section `_ -in the OpenStack Manuals Configuration Reference. - -Sample Configuration file -========================= - -The sample configuration file for Ceilometer, named -etc/ceilometer/ceilometer.conf.sample, was removed from version control after -the Icehouse release. For more details, please read the file -etc/ceilometer/README-ceilometer.conf.txt. You can generate this sample -configuration file by running ``tox -e genconfig``. - -.. _Pipeline-Configuration: - -Pipelines -========= - -Pipelines describe a coupling between sources of samples and the -corresponding sinks for transformation and publication of the samples. - -A source is a producer of samples, in effect a set of pollsters and/or -notification handlers emitting samples for a set of matching meters. -See :doc:`plugins` for details on how to write and plug in your plugins. - -Each source configuration encapsulates meter name matching, polling -interval determination, optional resource enumeration or discovery, -and mapping to one or more sinks for publication. - -A sink on the other hand is a consumer of samples, providing logic for -the transformation and publication of samples emitted from related sources. -Each sink configuration is concerned `only` with the transformation rules -and publication conduits for samples. - -In effect, a sink describes a chain of handlers. The chain starts with -zero or more transformers and ends with one or more publishers. The first -transformer in the chain is passed samples from the corresponding source, -takes some action such as deriving rate of change, performing unit conversion, -or aggregating, before passing the modified sample to next step. - -The chains end with one or more publishers. This component makes it possible -to persist the data into storage through the message bus or to send it to one -or more external consumers. One chain can contain multiple publishers, see the -:ref:`multi-publisher` section. - - -Pipeline configuration ----------------------- - -Pipeline configuration by default, is stored in a separate configuration file, -called pipeline.yaml, next to the ceilometer.conf file. The pipeline -configuration file can be set in the *pipeline_cfg_file* parameter in -ceilometer.conf. Multiple chains can be defined in one configuration file. - -The chain definition looks like the following:: - - --- - sources: - - name: 'source name' - interval: 'how often should the samples be injected into the pipeline' - meters: - - 'meter filter' - resources: - - 'list of resource URLs' - discovery: - - 'list of discoverers' - sinks - - 'sink name' - sinks: - - name: 'sink name' - transformers: 'definition of transformers' - publishers: - - 'list of publishers' - -The *name* parameter of a source is unrelated to anything else; -nothing references a source by name, and a source's name does not have -to match anything. - -The *interval* parameter in the sources section should be defined in seconds. It -determines the cadence of sample injection into the pipeline, where samples are -produced under the direct control of an agent, i.e. via a polling cycle as opposed -to incoming notifications. - -There are several ways to define the list of meters for a pipeline source. The -list of valid meters can be found in the :ref:`measurements` section. There is -a possibility to define all the meters, or just included or excluded meters, -with which a source should operate: - -* To include all meters, use the '*' wildcard symbol. -* To define the list of meters, use either of the following: - - * To define the list of included meters, use the 'meter_name' syntax - * To define the list of excluded meters, use the '!meter_name' syntax - * For meters, which identify a complex Sample field, use the wildcard - symbol to select all, e.g. for "disk.read.bytes", use "disk.\*" - -The above definition methods can be used in the following combinations: - -* Only the wildcard symbol -* The list of included meters -* The list of excluded meters -* Wildcard symbol with the list of excluded meters - -.. note:: - At least one of the above variations should be included in the meters - section. Included and excluded meters cannot co-exist in the same - pipeline. Wildcard and included meters cannot co-exist in the same - pipeline definition section. - -A given polling plugin is invoked according to each source section -whose *meters* parameter matches the plugin's meter name. That is, -the matching source sections are combined by union, not intersection, -of the prescribed time series. - -The optional *resources* section of a pipeline source allows a list of -static resource URLs to be configured. An amalgamated list of all -statically configured resources for a set of pipeline sources with a -common interval is passed to individual pollsters matching those pipelines. - -The optional *discovery* section of a pipeline source contains the list of -discoverers. These discoverers can be used to dynamically discover the -resources to be polled by the pollsters defined in this pipeline. The name -of the discoverers should be the same as the related names of plugins in -setup.cfg. - -If *resources* or *discovery* section is not set, the default value would -be an empty list. If both *resources* and *discovery* are set, the final -resources passed to the pollsters will be the combination of the dynamic -resources returned by the discoverers and the static resources defined -in the *resources* section. If there are some duplications between the -resources returned by the discoverers and those defined in the *resources* -section, the duplication will be removed before passing those resources -to the pollsters. - -There are three ways a pollster can get a list of resources to poll, as the -following in descending order of precedence: - - 1. From the per-pipeline configured discovery and/or static resources. - 2. From the per-pollster default discovery. - 3. From the per-agent default discovery. - -The *transformers* section of a pipeline sink provides the possibility to add a -list of transformer definitions. The names of the transformers should be the same -as the names of the related extensions in setup.cfg. For a more detailed -description, please see the `transformers`_ section of the Administrator Guide -of Ceilometer. - -.. _transformers: http://docs.openstack.org/admin-guide/telemetry-data-collection.html#transformers - -The *publishers* section contains the list of publishers, where the samples -data should be sent after the possible transformations. The names of the -publishers should be the same as the related names of the plugins in -setup.cfg. - -The default configuration can be found in `pipeline.yaml`_. - -.. _pipeline.yaml: https://git.openstack.org/cgit/openstack/ceilometer/tree/etc/ceilometer/pipeline.yaml - -Publishers -++++++++++ - -For more information about publishers see the `publishers`_ section of the -Administrator Guide of Ceilometer. - -.. _publishers: http://docs.openstack.org/admin-guide/telemetry-data-retrieval.html#publishers diff --git a/doc/source/events.rst b/doc/source/events.rst deleted file mode 100644 index 9091d5ed..00000000 --- a/doc/source/events.rst +++ /dev/null @@ -1,291 +0,0 @@ -.. - Copyright 2013 Rackspace Hosting. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _events: - -============================= - Events and Event Processing -============================= - -Events vs. Samples -================== - -In addition to Meters, and related Sample data, Ceilometer can also process -Events. While a Sample represents a single numeric datapoint, driving a Meter -that represents the changes in that value over time, an Event represents the -state of an object in an OpenStack service (such as an Instance in Nova, or -an Image in Glance) at a point in time when something of interest has occurred. -This can include non-numeric data, such as an instance's flavor, or network -address. - -In general, Events let you know when something has changed about an -object in an OpenStack system, such as the resize of an instance, or creation -of an image. - -While Samples can be relatively cheap (small), -disposable (losing an individual sample datapoint won't matter much), -and fast, Events are larger, more informative, and should be handled more -consistently (you do not want to lose one). - -Event Structure -=============== - -To facilitate downstream processing (billing and/or aggregation), a -:doc:`minimum required data set and format ` has been defined for -services, however events generally contain the following information: - - -event_type - A dotted string defining what event occurred, such as "compute.instance.resize.start" - -message_id - A UUID for this event. - -generated - A timestamp of when the event occurred on the source system. - -traits - A flat mapping of key-value pairs. - The event's Traits contain most of the details of the event. Traits are - typed, and can be strings, ints, floats, or datetimes. - -raw - (Optional) Mainly for auditing purpose, the full notification message - can be stored (unindexed) for future evaluation. - -Events from Notifications -========================= - -Events are primarily created via the notifications system in OpenStack. -OpenStack systems, such as Nova, Glance, Neutron, etc. will emit -notifications in a JSON format to the message queue when some notable action is -taken by that system. Ceilometer will consume such notifications from the -message queue, and process them. - -The general philosophy of notifications in OpenStack is to emit any and all -data someone might need, and let the consumer filter out what they are not -interested in. In order to make processing simpler and more efficient, -the notifications are stored and processed within Ceilometer as Events. -The notification payload, which can be an arbitrarily complex JSON data -structure, is converted to a flat set of key-value pairs known as Traits. -This conversion is specified by a config file, so that only the specific -fields within the notification that are actually needed for processing the -event will have to be stored as Traits. - -Note that the Event format is meant for efficient processing and querying, -there are other means available for archiving notifications (i.e. for audit -purposes, etc), possibly to different datastores. - -Converting Notifications to Events ----------------------------------- - -In order to make it easier to allow users to extract what they need, -the conversion from Notifications to Events is driven by a -configuration file (specified by the flag definitions_cfg_file_ in -ceilometer.conf). - -This includes descriptions of how to map fields in the notification body -to Traits, and optional plugins for doing any programmatic translations -(splitting a string, forcing case, etc.) - -The mapping of notifications to events is defined per event_type, which -can be wildcarded. Traits are added to events if the corresponding fields -in the notification exist and are non-null. (As a special case, an empty -string is considered null for non-text traits. This is due to some openstack -projects (mostly Nova) using empty string for null dates.) - -If the definitions file is not present, a warning will be logged, but an empty -set of definitions will be assumed. By default, any notifications that -do not have a corresponding event definition in the definitions file will be -converted to events with a set of minimal, default traits. This can be -changed by setting the flag drop_unmatched_notifications_ in the -ceilometer.conf file. If this is set to True, then any notifications -that don't have events defined for them in the file will be dropped. -This can be what you want, the notification system is quite chatty by design -(notifications philosophy is "tell us everything, we'll ignore what we don't -need"), so you may want to ignore the noisier ones if you don't use them. - -.. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html -.. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html - -There is a set of default traits (all are TEXT type) that will be added to -all events if the notification has the relevant data: - -* service: (All notifications should have this) notification's publisher -* tenant_id -* request_id -* project_id -* user_id - -These do not have to be specified in the event definition, they are -automatically added, but their definitions can be overridden for a given -event_type. - -Definitions file format ------------------------ - -The event definitions file is in YAML format. It consists of a list of event -definitions, which are mappings. Order is significant, the list of definitions -is scanned in *reverse* order (last definition in the file to the first), -to find a definition which matches the notification's event_type. That -definition will be used to generate the Event. The reverse ordering is done -because it is common to want to have a more general wildcarded definition -(such as "compute.instance.*" ) with a set of traits common to all of those -events, with a few more specific event definitions (like -"compute.instance.exists") afterward that have all of the above traits, plus -a few more. This lets you put the general definition first, followed by the -specific ones, and use YAML mapping include syntax to avoid copying all of the -trait definitions. - -Event Definitions ------------------ - -Each event definition is a mapping with two keys (both required): - -event_type - This is a list (or a string, which will be taken as a 1 element - list) of event_types this definition will handle. These can be - wildcarded with unix shell glob syntax. An exclusion listing - (starting with a '!') will exclude any types listed from matching. - If ONLY exclusions are listed, the definition will match anything - not matching the exclusions. -traits - This is a mapping, the keys are the trait names, and the values are - trait definitions. - -Trait Definitions ------------------ - -Each trait definition is a mapping with the following keys: - -type - (optional) The data type for this trait. (as a string). Valid - options are: *text*, *int*, *float*, and *datetime*. - defaults to *text* if not specified. -fields - A path specification for the field(s) in the notification you wish - to extract for this trait. Specifications can be written to match - multiple possible fields, the value for the trait will be derived - from the matching fields that exist and have a non-null values in - the notification. By default the value will be the first such field. - (plugins can alter that, if they wish). This is normally a string, - but, for convenience, it can be specified as a list of - specifications, which will match the fields for all of them. (See - `Field Path Specifications`_ for more info on this syntax.) -plugin - (optional) This is a mapping (For convenience, this value can also - be specified as a string, which is interpreted as the name of a - plugin to be loaded with no parameters) with the following keys - - name - (string) name of a plugin to load - - parameters - (optional) Mapping of keyword arguments to pass to the plugin on - initialization. (See documentation on each plugin to see what - arguments it accepts.) - -Field Path Specifications -------------------------- - -The path specifications define which fields in the JSON notification -body are extracted to provide the value for a given trait. The paths -can be specified with a dot syntax (e.g. "payload.host"). Square -bracket syntax (e.g. "payload[host]") is also supported. In either -case, if the key for the field you are looking for contains special -characters, like '.', it will need to be quoted (with double or single -quotes) like so: - - payload.image_meta.'org.openstack__1__architecture' - -The syntax used for the field specification is a variant of JSONPath, -and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) - -Example Definitions file ------------------------- - -:: - - --- - - event_type: compute.instance.* - traits: &instance_traits - user_id: - fields: payload.user_id - instance_id: - fields: payload.instance_id - host: - fields: publisher_id - plugin: - name: split - parameters: - segment: 1 - max_split: 1 - service_name: - fields: publisher_id - plugin: split - instance_type_id: - type: int - fields: payload.instance_type_id - os_architecture: - fields: payload.image_meta.'org.openstack__1__architecture' - launched_at: - type: datetime - fields: payload.launched_at - deleted_at: - type: datetime - fields: payload.deleted_at - - event_type: - - compute.instance.exists - - compute.instance.update - traits: - <<: *instance_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending - -Trait plugins -------------- - -Trait plugins can be used to do simple programmatic conversions on the value in -a notification field, like splitting a string, lowercasing a value, converting -a screwball date into ISO format, or the like. They are initialized with the -parameters from the trait definition, if any, which can customize their -behavior for a given trait. They are called with a list of all matching fields -from the notification, so they can derive a value from multiple fields. The -plugin will be called even if there are no fields found matching the field -path(s), this lets a plugin set a default value, if needed. A plugin can also -reject a value by returning *None*, which will cause the trait not to be -added. If the plugin returns anything other than *None*, the trait's value -will be set to whatever the plugin returned (coerced to the appropriate type -for the trait). - -Building Notifications -====================== - -In general, the payload format OpenStack services emit could be described as -the Wild West. The payloads are often arbitrary data dumps at the time of -the event which is often susceptible to change. To make consumption easier, -the Ceilometer team offers two proposals: CADF_, an open, cloud standard -which helps model cloud events and the PaaS Event Format. - -.. toctree:: - :maxdepth: 1 - - format - -.. _CADF: http://docs.openstack.org/developer/pycadf/ diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 2787d5d4..00000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,132 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - Copyright 2013 eNovance - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== - Glossary -========== - -.. glossary:: - - agent - Software service running on the OpenStack infrastructure - measuring usage and sending the results to the :term:`collector`. - - API server - HTTP REST API service for ceilometer. - - billing - Billing is the process to assemble bill line items into a single - per customer bill, emitting the bill to start the payment collection. - - bus listener agent - Bus listener agent which takes events generated on the Oslo - notification bus and transforms them into Ceilometer samples. This - is the preferred method of data collection. - - ceilometer - From Wikipedia [#]_: - - A ceilometer is a device that uses a laser or other light - source to determine the height of a cloud base. - - polling agent - Software service running either on a central management node within the - OpenStack infrastructure or compute node measuring usage and sending the - results to the :term:`collector`. - - collector - Software service running on the OpenStack infrastructure - monitoring notifications from other OpenStack components and - samples from the ceilometer agent and recording the results - in the database. - - notification agent - The different OpenStack services emit several notifications about the - various types of events. The notification agent consumes them from - respective queues and filters them by the event_type. - - data store - Storage system for recording data collected by ceilometer. - - meter - The measurements tracked for a resource. For example, an instance has - a number of meters, such as duration of instance, CPU time used, - number of disk io requests, etc. - Three types of meters are defined in ceilometer: - - * Cumulative: Increasing over time (e.g. disk I/O) - * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating - values (e.g. number of Swift objects) - * Delta: Incremental change to a counter over time (e.g. bandwidth delta) - - metering - Metering is the process of collecting information about what, - who, when and how much regarding anything that can be billed. The result of - this is a collection of "tickets" (a.k.a. samples) which are ready to be - processed in any way you want. - - notification - A message sent via an external OpenStack system (e.g Nova, Glance, - etc) using the Oslo notification mechanism [#]_. These notifications - are usually sent to and received by Ceilometer through the notifier - RPC driver. - - non-repudiable - From Wikipedia [#]_: - - Non-repudiation refers to a state of affairs where the purported - maker of a statement will not be able to successfully challenge - the validity of the statement or contract. The term is often - seen in a legal setting wherein the authenticity of a signature - is being challenged. In such an instance, the authenticity is - being "repudiated". - - project - The OpenStack tenant or project. - - polling agents - The polling agent is collecting measurements by polling some API or other - tool at a regular interval. - - push agents - The push agent is the only solution to fetch data within projects, - which do not expose the required data in a remotely usable way. This - is not the preferred method as it makes deployment a bit more - complex having to add a component to each of the nodes that need - to be monitored. - - rating - Rating is the process of analysing a series of tickets, - according to business rules defined by marketing, in order to transform - them into bill line items with a currency value. - - resource - The OpenStack entity being metered (e.g. instance, volume, image, etc). - - sample - Data sample for a particular meter. - - source - The origin of metering data. This field is set to "openstack" by default. - It can be configured to a different value using the sample_source field - in the ceilometer.conf file. - - user - An OpenStack user. - -.. [#] http://en.wikipedia.org/wiki/Ceilometer -.. [#] https://git.openstack.org/cgit/openstack/ceilometer/tree/ceilometer/openstack/common/notifier -.. [#] http://en.wikipedia.org/wiki/Non-repudiation diff --git a/doc/source/gmr.rst b/doc/source/gmr.rst index 2453b0c1..4e6bed92 100644 --- a/doc/source/gmr.rst +++ b/doc/source/gmr.rst @@ -27,10 +27,10 @@ A *GMR* can be generated by sending the *USR1* signal to any Ceilometer process with support (see below). The *GMR* will then be outputted standard error for that particular process. -For example, suppose that ``ceilometer-polling`` has process id ``8675``, and -was run with ``2>/var/log/ceilometer/ceilometer-polling.log``. Then, +For example, suppose that ``ceilometer-api`` has process id ``8675``, and +was run with ``2>/var/log/ceilometer/ceilometer-api.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to -``/var/log/ceilometer/ceilometer-polling.log``. +``/var/log/ceilometer/ceilometer-api.log``. Structure of a GMR ------------------ diff --git a/doc/source/index.rst b/doc/source/index.rst index c3589ad0..6e48bed9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -17,13 +17,7 @@ Welcome to the Ceilometer developer documentation! ================================================== -The :term:`Ceilometer` project is a data collection service that provides the -ability to normalise and transform data across all current OpenStack core -components with work underway to support future OpenStack components. - -Ceilometer is a component of the Telemetry project. Its data can be used to -provide customer billing, resource tracking, and alarming capabilities -across all OpenStack core components. +Ceilometer is a component of the Telemetry project. This documentation offers information on how Ceilometer works and how to contribute to the project. @@ -34,10 +28,6 @@ Overview .. toctree:: :maxdepth: 2 - overview - architecture - measurements - events webapi/index Developer Documentation @@ -47,9 +37,6 @@ Developer Documentation :maxdepth: 2 install/index - configuration - plugins - new_meters testing contributing gmr @@ -60,8 +47,6 @@ Appendix .. toctree:: :maxdepth: 1 - releasenotes/index - glossary api/index diff --git a/doc/source/install/custom.rst b/doc/source/install/custom.rst deleted file mode 100644 index edb954db..00000000 --- a/doc/source/install/custom.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _customizing_deployment: - -=================================== - Customizing Ceilometer Deployment -=================================== - -Notifications queues -==================== - -.. index:: - double: customizing deployment; notifications queues; multiple topics - -By default, Ceilometer consumes notifications on the messaging bus sent to -**topics** by using a queue/pool name that is identical to the -topic name. You shouldn't have different applications consuming messages from -this queue. If you want to also consume the topic notifications with a system -other than Ceilometer, you should configure a separate queue that listens for -the same messages. - -Ceilometer allows multiple topics to be configured so that the polling agent can -send the same messages of notifications to other queues. Notification agents -also use **topics** to configure which queue to listen for. If -you use multiple topics, you should configure notification agent and polling -agent separately, otherwise Ceilometer collects duplicate samples. - -By default, the ceilometer.conf file is as follows:: - - [oslo_messaging_notifications] - topics = notifications - -To use multiple topics, you should give ceilometer-agent-notification and -ceilometer-polling services different ceilometer.conf files. The Ceilometer -configuration file ceilometer.conf is normally locate in the /etc/ceilometer -directory. Make changes according to your requirements which may look like -the following:: - -For notification agent using ceilometer-notification.conf, settings like:: - - [oslo_messaging_notifications] - topics = notifications,xxx - -For polling agent using ceilometer-polling.conf, settings like:: - - [oslo_messaging_notifications] - topics = notifications,foo - -.. note:: - - notification_topics in ceilometer-notification.conf should only have one same - topic in ceilometer-polling.conf - -Doing this, it's easy to listen/receive data from multiple internal and external services. - - -Using multiple dispatchers -========================== - -.. index:: - double: customizing deployment; multiple dispatchers - -The Ceilometer collector allows multiple dispatchers to be configured so that -data can be easily sent to multiple internal and external systems. Dispatchers -are divided between ``event_dispatchers`` and ``meter_dispatchers`` which can -each be provided with their own set of receiving systems. - -.. note:: - - In Liberty and prior, the configuration option for all data was - ``dispatcher`` but this was changed for the Mitaka release to break out - separate destination systems by type of data. - -By default, Ceilometer only saves event and meter data in a database. If you -want Ceilometer to send data to other systems, instead of or in addition to -the Ceilometer database, multiple dispatchers can be enabled by modifying the -Ceilometer configuration file. - -Ceilometer ships multiple dispatchers currently. They are ``database``, -``file``, ``http`` and ``gnocchi`` dispatcher. As the names imply, database -dispatcher sends metering data to a database, file dispatcher logs meters into -a file, http dispatcher posts the meters onto a http target, gnocchi -dispatcher posts the meters onto Gnocchi_ backend. Each dispatcher can have -its own configuration parameters. Please see available configuration -parameters at the beginning of each dispatcher file. - -.. _Gnocchi: http://gnocchi.readthedocs.org/en/latest/basic.html - -To check if any of the dispatchers is available in your system, you can -inspect the Ceilometer egg entry_points.txt file, you should normally see text -like the following:: - - [ceilometer.dispatcher] - database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher - -To configure one or multiple dispatchers for Ceilometer, find the Ceilometer -configuration file ceilometer.conf which is normally located at /etc/ceilometer -directory and make changes accordingly. Your configuration file can be in a -different directory. - -To use multiple dispatchers on a Ceilometer collector service, add multiple -dispatcher lines in ceilometer.conf file like the following:: - - [DEFAULT] - meter_dispatchers=database - meter_dispatchers=file - -If there is no dispatcher present, database dispatcher is used as the -default. If in some cases such as traffic tests, no dispatcher is needed, -one can configure the line without a dispatcher, like the following:: - - event_dispatchers= - -With the above configuration, no event dispatcher is used by the Ceilometer -collector service, all event data received by Ceilometer collector will be -dropped. - -For Gnocchi dispatcher, the following configuration settings should be added:: - - [DEFAULT] - meter_dispatchers = gnocchi - - [dispatcher_gnocchi] - archive_policy = low - -The value specified for ``archive_policy`` should correspond to the name of an -``archive_policy`` configured within Gnocchi. - -For Gnocchi dispatcher backed by Swift storage, the following additional -configuration settings should be added:: - - [dispatcher_gnocchi] - filter_project = gnocchi_swift - filter_service_activity = True - -.. note:: - If gnocchi dispatcher is enabled, Ceilometer api calls will return a 410 with - an empty result. The Gnocchi Api should be used instead to access the data. - -Efficient polling -================= - -- There is an optional config called ``shuffle_time_before_polling_task`` - in ceilometer.conf. Enable this by setting an integer greater than zero to - shuffle polling time for agents. This will add some random jitter to the time - of sending requests to Nova or other components to avoid large number of - requests in a short time period. -- There is an option to stream samples to minimise latency (at the - expense of load) by setting ``batch_polled_samples`` to ``False`` in - ceilometer.conf. - diff --git a/doc/source/install/dbreco.rst b/doc/source/install/dbreco.rst deleted file mode 100644 index 55be61dc..00000000 --- a/doc/source/install/dbreco.rst +++ /dev/null @@ -1,89 +0,0 @@ -.. - Copyright 2013 Nicolas Barcet for eNovance - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _choosing_db_backend: - -============================ - Choosing a database backend -============================ - -.. note:: - - Ceilometer's native database capabilities is intended for post processing - and auditing purposes where responsiveness is not a requirement. It - captures the full fidelity of each datapoint and thus is not designed - for low latency use cases. For more responsive use cases, it's recommended - to store data in an alternative source such as Gnocchi_. Please see - `Moving from Ceilometer to Gnocchi`_ to find more information. - -.. note:: - - As of Liberty, alarming support, and subsequently its database, is handled - by Aodh_. - -.. _Aodh: http://docs.openstack.org/developer/aodh/ - -Selecting a database backend for Ceilometer should not be done lightly for -numerous reasons: - -1. Not all backend drivers are equally implemented and tested. To help you - make your choice, the table below will give you some idea of the - status of each of the drivers available in trunk. Note that we do welcome - patches to improve completeness and quality of drivers. - -2. It may not be a good idea to use the same host as another database as - Ceilometer can generate a LOT OF WRITES. For this reason it is generally - recommended, if the deployment is targeting going into production, to use - a dedicated host, or at least a VM which will be migratable to another - physical host if needed. The following spreadsheet can help you get an - idea of the volumes that ceilometer can generate: - `Google spreadsheet `_ - -3. If you are relying on this backend to bill customers, you will note that - your capacity to generate revenue is very much linked to its reliability, - which seems to be a factor dear to many managers. - -The following is a table indicating the status of each database drivers: - -================== ============================= =========================================== -Driver API querying API statistics -================== ============================= =========================================== -MongoDB Yes Yes -MySQL Yes Yes -PostgreSQL Yes Yes -HBase Yes Yes, except groupby & selectable aggregates -================== ============================= =========================================== - - -Moving from Ceilometer to Gnocchi -================================= - -Gnocchi represents a fundamental change in how data is represented and stored. -Installation and configuration can be found in :ref:`installing_manually`. -Differences between APIs can be found here_. - -There currently exists no migration tool between the services. To transition -to Gnocchi, multiple dispatchers can be enabled in the Collector to capture -data in both the native Ceilometer database and Gnocchi. This will allow you -to test Gnocchi and transition to it fully when comfortable. The following -should be included in addition to the required configurations for each -backend:: - - [DEFAULT] - meter_dispatchers=database - meter_dispatchers=gnocchi - -.. _Gnocchi: http://gnocchi.xyz -.. _here: https://docs.google.com/presentation/d/1PefouoeMVd27p2OGDfNQpx18mY-Wk5l0P1Ke2Vt5LwA/edit?usp=sharing diff --git a/doc/source/install/development.rst b/doc/source/install/development.rst index 08a052d8..3886261f 100644 --- a/doc/source/install/development.rst +++ b/doc/source/install/development.rst @@ -18,15 +18,6 @@ Installing development sandbox =============================== -Ceilometer has several daemons. The basic are: :term:`polling agent` running -either on the Nova compute node(s) or :term:`polling agent` running on the -central management node(s), :term:`collector` and :term:`notification agent` -running on the cloud's management node(s). - -In a development environment created by devstack_, these services are -typically running on the same server. - - Configuring devstack ==================== @@ -37,11 +28,7 @@ Configuring devstack 2. Create a ``local.conf`` file as input to devstack. -3. Ceilometer makes extensive use of the messaging bus, but has not - yet been tested with ZeroMQ. We recommend using Rabbit for - now. By default, RabbitMQ will be used by devstack. - -4. The ceilometer services are not enabled by default, so they must be +3. The ceilometer services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for @@ -51,7 +38,4 @@ Configuring devstack # Enable the Ceilometer devstack plugin enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git - By default, all ceilometer services except for ceilometer-ipmi agent will - be enabled - .. _devstack: http://www.devstack.org/ diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst index 447b1996..35fb7075 100644 --- a/doc/source/install/index.rst +++ b/doc/source/install/index.rst @@ -22,9 +22,6 @@ .. toctree:: :maxdepth: 2 - dbreco development manual - custom - upgrade mod_wsgi diff --git a/doc/source/install/manual.rst b/doc/source/install/manual.rst index 8011a32a..821427ef 100644 --- a/doc/source/install/manual.rst +++ b/doc/source/install/manual.rst @@ -27,67 +27,6 @@ Storage Backend Installation This step is a prerequisite for the collector and API services. You may use one of the listed database backends below to store Ceilometer data. -Gnocchi -------- - -1. Follow `Gnocchi installation`_ instructions - -2. Initialize Gnocchi for Ceilometer:: - - gnocchi-upgrade --create-legacy-resource-types - - .. note:: - - Prior to Gnocchi 2.1, Ceilometer resource types were included, therefore - --create-legacy-resource-types flag is not needed. - -3. Edit `/etc/ceilometer/ceilometer.conf` for the collector service:: - - [DEFAULT] - meter_dispatchers = gnocchi - event_dispatchers = - - [notification] - store_events = False - - [dispatcher_gnocchi] - filter_service_activity = False # Enable if using swift backend - filter_project = # if using swift backend - - [service_credentials] - auth_url = :5000 - region_name = RegionOne - password = password - username = ceilometer - project_name = service - project_domain_id = default - user_domain_id = default - auth_type = password - -4. Copy gnocchi_resources.yaml to config directory (e.g./etc/ceilometer) - -5. To minimize data requests, caching and batch processing should be enabled: - - 1. Enable resource caching (oslo.cache_ should be installed):: - - [cache] - backend_argument = redis_expiration_time:600 - backend_argument = db:0 - backend_argument = distributed_lock:True - backend_argument = url:redis://localhost:6379 - backend = dogpile.cache.redis - - 2. Enable batch processing:: - - [collector] - batch_size = 100 - batch_timeout = 5 - -6. Start collector service - -.. _oslo.cache: http://docs.openstack.org/developer/oslo.cache/opts.html - - MongoDB ------- @@ -113,253 +52,11 @@ SQLalchemy-supported DBs [database] connection = mysql+pymysql://username:password@host/ceilometer?charset=utf8 -HBase ------ - HBase backend is implemented to use HBase Thrift interface, therefore it is - mandatory to have the HBase Thrift server installed and running. To start - the Thrift server, please run the following command:: - - ${HBASE_HOME}/bin/hbase thrift start - - The implementation uses `HappyBase`_, which is a wrapper library used to - interact with HBase via Thrift protocol. You can verify the Thrift - connection by running a quick test from a client:: - - import happybase - - conn = happybase.Connection(host=$hbase-thrift-server, - port=9090, - table_prefix=None, - table_prefix_separator='_') - print conn.tables() # this returns a list of HBase tables in your HBase server - - .. note:: - - HappyBase version 0.5 or greater is required. Additionally, version 0.7 - is not currently supported. - - In the case of HBase, the required database tables (`project`, `user`, `resource`, - `meter`) should be created manually with `f` column family for each one. - - To use HBase as the storage backend, change the 'database' section in - ceilometer.conf as follows:: - - [database] - connection = hbase://hbase-thrift-host:9090 - - It is possible to customize happybase's `table_prefix` and `table_prefix_separator` - via query string. By default `table_prefix` is not set and `table_prefix_separator` - is '_'. When `table_prefix` is not specified `table_prefix_separator` is not taken - into account. E.g. the resource table in the default case will be 'resource' while - with `table_prefix` set to 'ceilo' and `table_prefix_separator` to '.' the resulting - table will be 'ceilo.resource'. For this second case this is the database connection - configuration:: - - [database] - connection = hbase://hbase-thrift-host:9090?table_prefix=ceilo&table_prefix_separator=. - - To ensure proper configuration, please add the following lines to the - `hbase-site.xml` configuration file:: - - - hbase.thrift.minWorkerThreads - 200 - - -.. _`Gnocchi installation`: http://docs.openstack.org/developer/gnocchi/install.html -.. _HappyBase: http://happybase.readthedocs.org/en/latest/index.html# .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.python.org/pypi/pymongo/ -Installing the notification agent -================================= - -.. index:: - double: installing; agent-notification - -1. Clone the ceilometer git repository to the management server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Edit ``/etc/ceilometer/ceilometer.conf`` - - 1. Configure messaging:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - - 2. Set the ``telemetry_secret`` value. - - Set the ``telemetry_secret`` value to a large, random, value. Use - the same value in all ceilometer configuration files, on all - nodes, so that messages passing between the nodes can be - validated. This value can be left empty to disable message signing. - - .. note:: - - Disabling signing will improve message handling performance - - Refer to :doc:`/configuration` for details about any other options - you might want to modify before starting the service. - -5. Start the notification daemon:: - - $ ceilometer-agent-notification - - .. note:: - - The default development configuration of the collector logs to - stderr, so you may want to run this step using a screen session - or other tool for maintaining a long-running program in the - background. - - -Installing the collector -======================== - -.. index:: - double: installing; collector - -.. _storage_backends: - -1. Clone the ceilometer git repository to the management server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Edit ``/etc/ceilometer/ceilometer.conf`` - - 1. Configure messaging:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - - 2. Set the ``telemetry_secret`` value (if enabled for notification agent) - - Refer to :doc:`/configuration` for details about any other options - you might want to modify before starting the service. - -5. Start the collector:: - - $ ceilometer-collector - - .. note:: - - The default development configuration of the collector logs to - stderr, so you may want to run this step using a screen session - or other tool for maintaining a long-running program in the - background. - -Installing the Polling Agent -============================ - -.. index:: - double: installing; agent - -.. note:: - - The polling agent needs to be able to talk to Keystone and any of - the services being polled for updates. It also needs to run on your compute - nodes to poll instances. - -1. Clone the ceilometer git repository to the server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Configure messaging by editing ``/etc/ceilometer/ceilometer.conf``:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - -5. In order to retrieve object store statistics, ceilometer needs - access to swift with ``ResellerAdmin`` role. You should give this - role to your ``os_username`` user for tenant ``os_tenant_name``:: - - $ openstack role create ResellerAdmin - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | None | - | id | f5153dae801244e8bb4948f0a6fb73b7 | - | name | ResellerAdmin | - +-----------+----------------------------------+ - - $ openstack role add f5153dae801244e8bb4948f0a6fb73b7 \ - --project $SERVICE_TENANT \ - --user $CEILOMETER_USER - -6. Start the agent:: - - $ ceilometer-polling - -7. By default, the polling agent polls the `compute` and `central` namespaces. - You can specify which namespace to poll in the `ceilometer.conf` - configuration file or on the command line:: - - $ ceilometer-polling --polling-namespaces central,ipmi - - Installing the API Server ========================= @@ -392,22 +89,12 @@ Installing the API Server $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf -4. Configure messaging by editing ``/etc/ceilometer/ceilometer.conf``:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - -5. Create a service for ceilometer in keystone:: +4. Create a service for ceilometer in keystone:: $ openstack service create metering --name=ceilometer \ --description="Ceilometer Service" -6. Create an endpoint in keystone for ceilometer:: +5. Create an endpoint in keystone for ceilometer:: $ openstack endpoint create $CEILOMETER_SERVICE \ --region RegionOne \ @@ -422,7 +109,7 @@ Installing the API Server default port value for ceilometer API is 8777. If the port value has been customized, adjust accordingly. -7. Choose and start the API server. +6. Choose and start the API server. Ceilometer includes the ``ceilometer-api`` command. This can be used to run the API server. For smaller or proof-of-concept @@ -442,81 +129,3 @@ Installing the API Server The development version of the API server logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. - - -Enabling Service Notifications -============================== - -Cinder ------- - -Edit ``cinder.conf`` to include:: - - [oslo_messaging_notifications] - driver = messagingv2 - -Glance ------- - -Edit ``glance.conf`` to include:: - - [oslo_messaging_notifications] - driver = messagingv2 - -Heat ----- - -Configure the driver in ``heat.conf``:: - - [oslo_messaging_notifications] - driver=messagingv2 - -Nova ----- - -Edit ``nova.conf`` to include:: - - [DEFAULT] - instance_usage_audit=True - instance_usage_audit_period=hour - notify_on_state_change=vm_and_task_state - - [oslo_messaging_notifications] - driver=messagingv2 - - -Sahara ------- - -Configure the driver in ``sahara.conf``:: - - [DEFAULT] - enable_notifications=true - - [oslo_messaging_notifications] - driver=messagingv2 - - -Swift ------ - -Edit ``proxy-server.conf`` to include:: - - [filter:ceilometer] - topic = notifications - driver = messaging - url = rabbit://stackrabbit:openstack1@10.0.2.15:5672/ - control_exchange = swift - paste.filter_factory = ceilometermiddleware.swift:filter_factory - set log_level = WARN - -and edit [pipeline:main] to include the ceilometer middleware before the application:: - - [pipeline:main] - pipeline = catch_errors ... ... ceilometer proxy-server - - -Also, you need to configure messaging related options correctly as written above -for other parts of installation guide. Refer to :doc:`/configuration` for -details about any other options you might want to modify before starting the -service. diff --git a/doc/source/install/upgrade.rst b/doc/source/install/upgrade.rst deleted file mode 100644 index 7994b9ee..00000000 --- a/doc/source/install/upgrade.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _upgrade: - -========== - Upgrading -========== - -Ceilometer's services support both full upgrades as well as partial -(rolling) upgrades. The required steps for each process are described below. - - -Full upgrades -============= - -The following describes how to upgrade your entire Ceilometer environment in -one pass. - -.. _full upgrade path: - -1. Upgrade the database (if applicable) - - Run ceilometer-dbsync to upgrade the database if using one of Ceilometer's - databases (see :ref:`choosing_db_backend`). The database does not need to be - taken offline as no data is modified or deleted. Ideally this should be done - during a period of low activity. Best practices should still be followed - (ie. back up your data). If not using a Ceilometer database, you should - consult the documentation of that storage beforehand. - -2. Upgrade the collector service(s) - - Shutdown all collector services. The new collector, that knows how to - interpret the new payload, can then be started. It will disregard any - historical attributes and can continue to process older data from the - agents. You may restart as many new collectors as required. - -3. Upgrade the notification agent(s) - - The notification agent can then be taken offline and upgraded with the - same conditions as the collector service. - -4. Upgrade the polling agent(s) - - In this path, you'll want to take down agents on all hosts before starting. - After starting the first agent, you should verify that data is again being - polled. Additional agents can be added to support coordination if enabled. - -.. note:: - - The API service can be taken offline and upgraded at any point in the - process (if applicable). - - -Partial upgrades -================ - -The following describes how to upgrade parts of your Ceilometer environment -gradually. The ultimate goal is to have all services upgraded to the new -version in time. - -1. Upgrade the database (if applicable) - - Upgrading the database here is the same as the `full upgrade path`_. - -2. Upgrade the collector service(s) - - The new collector services can be started alongside the old collectors. - Collectors old and new will disregard any new or historical attributes. - -3. Upgrade the notification agent(s) - - The new notification agent can be started alongside the old agent if no - workload_partioning is enabled OR if it has the same pipeline configuration. - If the pipeline configuration is changed, the old agents must be loaded with - the same pipeline configuration first to ensure the notification agents all - work against same pipeline sets. - -4. Upgrade the polling agent(s) - - The new polling agent can be started alongside the old agent only if no new - pollsters were added. If not, new polling agents must start only in its - own partitioning group and poll only the new pollsters. After all old agents - are upgraded, the polling agents can be changed to poll both new pollsters - AND the old ones. - -5. Upgrade the API service(s) - - API management is handled by WSGI so there is only ever one version of API - service running - -.. note:: - - Upgrade ordering does not matter in partial upgrade path. The only - requirement is that the database be upgraded first. It is advisable to - upgrade following the same ordering as currently described: database, - collector, notification agent, polling agent, api. - - -Developer notes -=============== - -When updating data models in the database or IPC, we need to adhere to a single -mantra: 'always add, never delete or modify.' diff --git a/doc/source/measurements.rst b/doc/source/measurements.rst deleted file mode 100644 index f61cf4c4..00000000 --- a/doc/source/measurements.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _measurements: - -============== - Measurements -============== - -Existing meters -=============== - -For the list of existing meters see the tables under the -`Measurements page`_ of Ceilometer in the Administrator Guide. - -.. _Measurements page: http://docs.openstack.org/admin-guide/telemetry-measurements.html - -Adding new meters -================= - -If you would like to add new meters please check the -:ref:`add_new_meters` page under in the Contributing -section. diff --git a/doc/source/new_meters.rst b/doc/source/new_meters.rst deleted file mode 100644 index aed02a69..00000000 --- a/doc/source/new_meters.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _add_new_meters: - -================== - New measurements -================== - -Ceilometer is designed to collect measurements from OpenStack services and -from other external components. If you would like to add new meters to the -currently existing ones, you need to follow the guidelines given in this -section. - -.. _meter_types: - -Types -===== - -Three type of meters are defined in Ceilometer: - -.. index:: - double: meter; cumulative - double: meter; gauge - double: meter; delta - -========== ============================================================================== -Type Definition -========== ============================================================================== -Cumulative Increasing over time (instance hours) -Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) -Delta Changing over time (bandwidth) -========== ============================================================================== - -When you're about to add a new meter choose one type from the above list, which -is applicable. - - -Units -===== - -1. Whenever a volume is to be measured, SI approved units and their - approved symbols or abbreviations should be used. Information units - should be expressed in bits ('b') or bytes ('B'). -2. For a given meter, the units should NEVER, EVER be changed. -3. When the measurement does not represent a volume, the unit - description should always describe WHAT is measured (ie: apples, - disk, routers, floating IPs, etc.). -4. When creating a new meter, if another meter exists measuring - something similar, the same units and precision should be used. -5. Meters and samples should always document their units in Ceilometer (API - and Documentation) and new sampling code should not be merged without the - appropriate documentation. - -============ ======== ============== ======================= -Dimension Unit Abbreviations Note -============ ======== ============== ======================= -None N/A Dimension-less variable -Volume byte B -Time seconds s -============ ======== ============== ======================= - - -Meters -====== - -Naming convention ------------------ - -If you plan on adding meters, please follow the convention below: - -1. Always use '.' as separator and go from least to most discriminant word. - For example, do not use ephemeral_disk_size but disk.ephemeral.size - -2. When a part of the name is a variable, it should always be at the end and start with a ':'. - For example, do not use .image but image:, where type is your variable name. - -3. If you have any hesitation, come and ask in #openstack-ceilometer - -Meter definitions ------------------ -Meters definitions by default, are stored in separate configuration -file, called :file:`ceilometer/meter/data/meter.yaml`. This is essentially -a replacement for prior approach of writing notification handlers to consume -specific topics. - -A detailed description of how to use meter definition is illustrated in -the `admin_guide`_. - -.. _admin_guide: http://docs.openstack.org/admin-guide/telemetry-data-collection.html#meter-definitions - -Non-metric meters and events ----------------------------- - -Ceilometer supports collecting notifications as events. It is highly -recommended to use events for capturing if something happened in the system -or not as opposed to defining meters of which volume will be constantly '1'. -Events enable better representation and querying of metadata rather than -statistical aggregations required for Samples. When the event support is -turned on for Ceilometer, event type meters are collected into the event -database too, which can lead to the duplication of a huge amount of data. - -In order to learn more about events see the :ref:`events` section. diff --git a/doc/source/overview.rst b/doc/source/overview.rst deleted file mode 100644 index 602522ea..00000000 --- a/doc/source/overview.rst +++ /dev/null @@ -1,49 +0,0 @@ -======== -Overview -======== - -Objectives -========== - -The Ceilometer project was started in 2012 with one simple goal in mind: to -provide an infrastructure to collect any information needed regarding -OpenStack projects. It was designed so that rating engines could use this -single source to transform events into billable items which we -label as "metering". - -As the project started to come to life, collecting an -`increasing number of meters`_ across multiple projects, the OpenStack -community started to realize that a secondary goal could be added to -Ceilometer: become a standard way to collect meter, regardless of the -purpose of the collection. For example, Ceilometer can now publish information -for monitoring, debugging and graphing tools in addition or in parallel to the -metering backend. We labelled this effort as "multi-publisher". - -.. _increasing number of meters: http://docs.openstack.org/developer/ceilometer/measurements.html - -Metering -======== - -If you divide a billing process into a 3 step process, as is commonly done in -the telco industry, the steps are: - -1. :term:`Metering` -2. :term:`Rating` -3. :term:`Billing` - -Ceilometer's initial goal was, and still is, strictly limited to step -one. This is a choice made from the beginning not to go into rating or billing, -as the variety of possibilities seemed too large for the project to ever -deliver a solution that would fit everyone's needs, from private to public -clouds. This means that if you are looking at this project to solve your -billing needs, this is the right way to go, but certainly not the end of the -road for you. Once Ceilometer is in place on your OpenStack deployment, you -will still have several things to do before you can produce a bill for your -customers. One of you first task could be: finding the right queries within the -Ceilometer API to extract the information you need for your very own rating -engine. - -.. seealso:: - - * http://wiki.openstack.org/EfficientMetering/ArchitectureProposalV1 - * http://wiki.openstack.org/EfficientMetering#Architecture diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst deleted file mode 100644 index ea744f27..00000000 --- a/doc/source/plugins.rst +++ /dev/null @@ -1,177 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins-and-containers: - -======================= - Writing Agent Plugins -======================= - -This documentation gives you some clues on how to write a new agent or -plugin for Ceilometer if you wish to instrument a measurement which -has not yet been covered by an existing plugin. - -Agents -====== - -Polling agent might be run either on central cloud management nodes or on the -compute nodes (where direct hypervisor polling is quite logical). - -The agent running on each compute node polls for compute resources -usage. Each meter collected is tagged with the resource ID (such as -an instance) and the owner, including tenant and user IDs. The meters -are then reported to the collector via the message bus. More detailed -information follows. - -The agent running on the cloud central management node polls other types of -resources from a management server (usually using OpenStack services API to -collect this data). - -The polling agent is implemented in ``ceilometer/agent/manager.py``. As -you will see in the manager, the agent loads all plugins defined in -the namespace ``ceilometer.poll.agent``, then periodically calls their -:func:`get_samples` method. - -Plugins -======= - -A polling agent can support multiple plugins to retrieve different -information and send them to the collector. As stated above, an agent -will automatically activate all possible plugins if no additional information -about what to poll was passed. Previously we had separated compute and -central agents with different namespaces with plugins (pollsters) defined -within. Currently we keep separated namespaces - ``ceilometer.poll.compute`` -and ``ceilometer.poll.central`` for quick separation of what to poll depending -on where is polling agent running. This will load, among others, the -:class:`ceilometer.compute.pollsters.cpu.CPUPollster`, which is defined in -the folder ``ceilometer/compute/pollsters``. - -Notifications mechanism uses plugins as well, for instance -:class:`ceilometer.telemetry.notifications.TelemetryApiPost` plugin -which is defined in the ``ceilometer/telemetry/notifications`` folder, Though -in most cases, this is not needed. A meter definition can be directly added -to :file:`ceilometer/meter/data/meter.yaml` to match the event type. For -more information, see the :ref:`add_new_meters` page. - -We are using these two existing plugins as examples as the first one provides -an example of how to interact when you need to retrieve information from an -external system (pollster) and the second one is an example of how to forward -an existing event notification on the standard OpenStack queue to ceilometer. - -Pollster --------- - -Compute plugins are defined as subclasses of the -:class:`ceilometer.compute.BaseComputePollster` class as defined in -the ``ceilometer/compute/__init__.py`` file. Pollsters must implement one -method: ``get_samples(self, manager, context)``, which returns a -sequence of ``Sample`` objects as defined in the -``ceilometer/sample.py`` file. - -In the ``CPUPollster`` plugin, the ``get_samples`` method is implemented as a -loop which, for each instances running on the local host, retrieves the -cpu_time from the hypervisor and sends back two ``Sample`` objects. The first -one, named "cpu", is of type "cumulative", meaning that between two polls, its -value is not reset while the instance remains active, or in other words that -the CPU value is always provided as a duration that continuously increases -since the creation of the instance. The second one, named "cpu_util", is of -type "gauge", meaning that its value is the percentage of cpu utilization. - -Note that the ``LOG`` method is only used as a debugging tool and does not -participate in the actual metering activity. - -There is the way to specify either namespace(s) with pollsters or just -list of concrete pollsters to use, or even both of these parameters on the -polling agent start via CLI parameter: - - ceilometer-polling --polling-namespaces central compute - -This command will basically make polling agent to load all plugins from the -central and compute namespaces and poll everything it can. If you need to load -only some of the pollsters, you can use ``pollster-list`` option: - - ceilometer-polling --pollster-list image image.size storage.* - -If both of these options are passed, the polling agent will load only those -pollsters specified in the pollster list, that can be loaded from the selected -namespaces. - -.. note:: - - Agents coordination cannot be used in case of pollster-list option usage. - This allows to avoid both samples duplication and their lost. - -Notifications -------------- - -.. note:: - This should only be needed for cases where a complex arithmetic or - non-primitive data types are used. In most cases, adding a meter - definition to the :file:`ceilometer/meter/data/meter.yaml` should - suffice. - -Notifications are defined as subclass of the -:class:`ceilometer.agent.plugin_base.NotificationBase` meta class. -Notifications must implement: - - ``event_types`` which should be a sequence of strings defining the event types to be given to the plugin and - - ``process_notification(self, message)`` which receives an event message from the list provided to event_types and returns a sequence of Sample objects as defined in the ``ceilometer/sample.py`` file. - -In the ``InstanceNotifications`` plugin, it listens to three events: - -* compute.instance.create.end - -* compute.instance.exists - -* compute.instance.delete.start - -using the ``get_event_type`` method and subsequently the method -``process_notification`` will be invoked each time such events are happening which -generates the appropriate sample objects to be sent to the collector. - -Adding new plugins ------------------- - -Although we have described a list of the meters Ceilometer should -collect, we cannot predict all of the ways deployers will want to -measure the resources their customers use. This means that Ceilometer -needs to be easy to extend and configure so it can be tuned for each -installation. A plugin system based on `setuptools entry points`_ -makes it easy to add new monitors in the agents. In particular, -Ceilometer now uses Stevedore_, and you should put your entry point -definitions in the ``entry_points.txt`` file of your Ceilometer egg. - -.. _setuptools entry points: http://pythonhosted.org/setuptools/setuptools.html#dynamic-discovery-of-services-and-plugins - -.. _Stevedore: http://stevedore.readthedocs.org - -Installing a plugin automatically activates it the next time the -ceilometer daemon starts. Rather than running and reporting errors or -simply consuming cycles for no-ops, plugins may disable themselves at -runtime based on configuration settings defined by other components (for example, the -plugin for polling libvirt does not run if it sees that the system is -configured using some other virtualization tool). Additionally, if no -valid resources can be discovered the plugin will be disabled. - - -Tests -===== -Any new plugin or agent contribution will only be accepted into the project if -provided together with unit tests. Those are defined for the compute agent -plugins in the directory ``tests/compute`` and for the agent itself in ``test/agent``. -Unit tests are run in a continuous integration process for each commit made to -the project, thus ensuring as best as possible that a given patch has no side -effect to the rest of the project. diff --git a/doc/source/releasenotes/folsom.rst b/doc/source/releasenotes/folsom.rst deleted file mode 100644 index 5b08b872..00000000 --- a/doc/source/releasenotes/folsom.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _folsom: - -==================== -Folsom -==================== - -This is the first release (Version 0.1) of Ceilometer. Please take all appropriate -caution in using it, as it is a technology preview at this time. - -Version of OpenStack - It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of - openstack-common, and the modification that were made in term of notification - to many other components (glance, cinder, quantum), it will not easily work - with any prior version of OpenStack. - -Components - Currently covered components are: Nova, Nova-network, Glance, Cinder and - Quantum. Notably, there is no support yet for Swift and it was decided not - to support nova-volume in favor of Cinder. A detailed list of meters covered - per component can be found at in :ref:`measurements`. - -Nova with libvirt only - Most of the Nova meters will only work with libvirt fronted hypervisors at the - moment, and our test coverage was mostly done on KVM. Contributors are welcome - to implement other virtualization backends' meters. - -Quantum delete events - Quantum delete notifications do not include the same metadata as the other - messages, so we ignore them for now. This isn't ideal, since it may mean we - miss charging for some amount of time, but it is better than throwing away the - existing metadata for a resource when it is deleted. - -Database backend - The only tested and complete database backend is currently MongoDB, the - SQLAlchemy one is still work in progress. - -Installation - The current best source of information on how to deploy this project is found - as the devstack implementation but feel free to come to #openstack-metering on - freenode for more info. - -Volume of data - Please note that metering can generate lots of data very quickly. Have a look - at the following spreadsheet to evaluate what you will end up with. - - http://wiki.openstack.org/EfficientMetering#Volume_of_data diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst deleted file mode 100644 index 22a47d88..00000000 --- a/doc/source/releasenotes/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================ - Release Notes -============================ - -.. toctree:: - :hidden: - - folsom - -* :ref:`folsom` -* `Havana`_ -* `Icehouse`_ -* `Juno`_ -* `Kilo`_ -* `Liberty`_ - -Since Mitaka development cycle, we start to host release notes on -`Ceilometer Release Notes`_ - -.. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 -.. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 -.. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 -.. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 -.. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 -.. _Ceilometer Release Notes: http://docs.openstack.org/releasenotes/ceilometer/ diff --git a/doc/source/webapi/v2.rst b/doc/source/webapi/v2.rst index aa78256d..ebfbe686 100644 --- a/doc/source/webapi/v2.rst +++ b/doc/source/webapi/v2.rst @@ -4,81 +4,6 @@ V2 Web API ============ -Resources -========= - -.. rest-controller:: ceilometer.api.controllers.v2.resources:ResourcesController - :webprefix: /v2/resources - -.. autotype:: ceilometer.api.controllers.v2.resources.Resource - :members: - -Meters -====== - -.. rest-controller:: ceilometer.api.controllers.v2.meters:MetersController - :webprefix: /v2/meters - -.. rest-controller:: ceilometer.api.controllers.v2.meters:MeterController - :webprefix: /v2/meters - -.. autotype:: ceilometer.api.controllers.v2.meters.Meter - :members: - -.. autotype:: ceilometer.api.controllers.v2.meters.OldSample - :members: - -Samples and Statistics -====================== - -.. rest-controller:: ceilometer.api.controllers.v2.samples:SamplesController - :webprefix: /v2/samples - -.. autotype:: ceilometer.api.controllers.v2.samples.Sample - :members: - -.. autotype:: ceilometer.api.controllers.v2.meters.Statistics - :members: - -When a simple statistics request is invoked (using GET /v2/meters//statistics), -it will return the standard set of *Statistics*: *avg*, *sum*, *min*, *max*, and *count*. - -.. note:: - - If using Ceilometer data for statistics, it's recommended to use a backend - such as Gnocchi_ rather than Ceilometer's interface. Gnocchi is designed - specifically for this use case by providing a light-weight, aggregated model. - As they manage data differently, the API models returned by Ceilometer and Gnocchi - are different. The Gnocchi API can be found here_. - -.. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ -.. _here: http://docs.openstack.org/developer/gnocchi/rest.html - -Selectable Aggregates -+++++++++++++++++++++ - -The Statistics API has been extended to include the aggregate functions -*stddev* and *cardinality*. You can explicitly select these functions or any -from the standard set by specifying an aggregate function in the statistics -query:: - - GET /v2/meters//statistics?aggregate.func=&aggregate.param= - -(where aggregate.param is optional). - -Duplicate aggregate function and parameter pairs are silently discarded from the statistics query. Partial duplicates, in the sense of the same function but differing parameters, for example:: - - GET /v2/meters//statistics?aggregate.func=cardinality&aggregate.param=resource_id&aggregate.func=cardinality&aggregate.param=project_id - -are, on the other hand, both allowed by the API and supported by the storage drivers. See the :ref:`functional-examples` section for more detail. - -.. note:: - - Currently only *cardinality* needs aggregate.param to be specified. - -.. autotype:: ceilometer.api.controllers.v2.meters.Aggregate - :members: - Capabilities ============ @@ -163,518 +88,3 @@ applied on trait. See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.events.EventQuery :members: - -Complex Query -+++++++++++++ - -The filter expressions of the Complex Query feature operate on the fields -of *Sample*. The following comparison operators are -supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical -operators can be used: *and* *or* and *not*. The field names are validated -against the database models. See :ref:`api-queries` for how to query the API. - -.. note:: - - The *not* operator has different meaning in MongoDB and in SQL DB engine. - If the *not* operator is applied on a non existent metadata field then - the result depends on the DB engine. For example, if - {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query - the MongoDB will return every Sample object as *not* operator evaluated true - for every Sample where the given field does not exists. See more in the MongoDB doc. - On the other hand, SQL based DB engine will return empty result as the join operation - on the metadata table will return zero rows as the on clause of the join which - tries to match on the metadata field name is never fulfilled. - -Complex Query supports defining the list of orderby expressions in the form -of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. - -The number of the returned items can be bounded using the *limit* option. - -The *filter*, *orderby* and *limit* are all optional fields in a query. - -.. rest-controller:: ceilometer.api.controllers.v2.query:QuerySamplesController - :webprefix: /v2/query/samples - -.. autotype:: ceilometer.api.controllers.v2.query.ComplexQuery - :members: - -Links -===== - -.. autotype:: ceilometer.api.controllers.v2.base.Link - :members: - -API and CLI query examples -========================== - -CLI Queries -+++++++++++ - -Ceilometer CLI Commands:: - - $ ceilometer --debug --os-username --os-password --os-auth-url http://localhost:5000/v2.0/ --os-tenant-name admin meter-list - -.. note:: - - The *username*, *password*, and *tenant-name* options are required to be - present in these arguments or specified via environment variables. Note that - the in-line arguments will override the environment variables. - -.. _api-queries: - -API Queries -+++++++++++ - -Ceilometer API calls: - -.. note:: - - To successfully query Ceilometer you must first get a project-specific - token from the Keystone service and add it to any API calls that you - execute against that project. See the - `OpenStack credentials documentation `_ - for additional details. - -A simple query to return a list of available meters:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters" - -A query to return the list of resources:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/resources" - -A query to return the list of samples, limited to a specific meter type:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/disk.root.size" - -A query using filters (see: `query filter section `_):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.delete.start" - -Additional examples:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/disk.root.size?q.field=resource_id&q.op=eq&q.value=" - -or:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.exists" - -You can specify multiple filters by using an array of queries (order matters):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance"\ - "?q.field=metadata.event_type&q.value=compute.instance.exists"\ - "&q.field=timestamp&q.op=gt&q.value=2013-07-03T13:34:17" - -A query to find the maximum value and standard deviation (*max*, *stddev*) of -the CPU utilization for a given instance (identified by *resource_id*):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/cpu_util/statistics?aggregate.func=max&aggregate.func=stddev"\ - "&q.field=resource_id&q.op=eq&q.value=64da755c-9120-4236-bee1-54acafe24980" - -.. note:: - - If any of the requested aggregates are not supported by the storage driver, - a HTTP 400 error code will be returned along with an appropriate error - message. - -JSON based example:: - - curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" - -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}]}' - http://localhost:8777/v2/meters/instance - -JSON based example with multiple filters:: - - curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" - -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}, - {"field": "resource_id", "op": "eq", "value": "4da2b992-0dc3-4a7c-a19a-d54bf918de41"}]}' - http://localhost:8777/v2/meters/instance - -.. _functional-examples: - -Functional examples -+++++++++++++++++++ - -The examples below are meant to help you understand how to query the -Ceilometer API to build custom meters report. The query parameters should -be encoded using one of the above methods, e.g. as the URL parameters or -as JSON encoded data passed to the GET request. - -Get the list of samples about instances running for June 2013:: - - GET /v2/meters/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}] - - -Get the list of samples about instances running for June 2013 for a particular -project:: - - GET /v2/meters/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -Now you may want to have statistics on the meters you are targeting. -Consider the following example where you are getting the list of samples -about CPU utilization of a given instance (identified by its *resource_id*) -running for June 2013:: - - GET /v2/meters/cpu_util - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - -You can have statistics on the list of samples requested (*avg*, *sum*, *max*, -*min*, *count*) computed on the full duration:: - - GET /v2/meters/cpu_util/statistics - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - -You may want to aggregate samples over a given period (10 minutes for -example) in order to get an array of the statistics computed on smaller -durations:: - - GET /v2/meters/cpu_util/statistics - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - period: 600 - -The *period* parameter aggregates by time range. You can also aggregate by -field using the *groupby* parameter. Currently, the *user_id*, *resource_id*, -*project_id*, and *source* fields are supported. Below is an example that uses -a query filter and group by aggregation on *project_id* and *resource_id*:: - - GET /v2/meters/instance/statistics - q: [{"field": "user_id", - "op": "eq", - "value": "user-2"}, - {"field": "source", - "op": "eq", - "value": "source-1"}] - groupby: ["project_id", "resource_id"] - -The statistics will be returned in a list, and each entry of the list will be -labeled with the group name. For the previous example, the first entry might -have *project_id* be "project-1" and *resource_id* be "resource-1", the second -entry have *project_id* be "project-1" and *resource_id* be "resource-2", and -so on. - -You can request both period and group by aggregation in the same query:: - - GET /v2/meters/instance/statistics - q: [{"field": "source", - "op": "eq", - "value": "source-1"}] - groupby: ["project_id"] - period: 7200 - -Note that period aggregation is applied first, followed by group by -aggregation. Order matters because the period aggregation determines the time -ranges for the statistics. - -Below is a real-life query:: - - GET /v2/meters/image/statistics - groupby: ["project_id", "resource_id"] - -With the return values:: - - [{"count": 4, "duration_start": "2013-09-18T19:08:33", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1137.0, - "period_start": "2013-09-18T19:08:33", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "551f495f-7f49-4624-a34c-c422f2c5f90b"}, - "unit": "image"}, - {"count": 4, "duration_start": "2013-09-18T19:08:36", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1134.0, - "period_start": "2013-09-18T19:08:36", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "7c1157ed-cf30-48af-a868-6c7c3ad7b531"}, - "unit": "image"}, - {"count": 4, "duration_start": "2013-09-18T19:08:34", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1136.0, - "period_start": "2013-09-18T19:08:34", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "eaed9cf4-fc99-4115-93ae-4a5c37a1a7d7"}, - "unit": "image"}] - -You can request specific aggregate functions as well. For example, if you only -want the average CPU utilization, the GET request would look like this:: - - GET /v2/meters/cpu_util/statistics?aggregate.func=avg - -Use the same syntax to access the aggregate functions not in the standard set, -e.g. *stddev* and *cardinality*. A request for the standard deviation of CPU utilization would take the form:: - - GET /v2/meters/cpu_util/statistics?aggregate.func=stddev - -And would give a response such as the example:: - - [{"aggregate": {"stddev":0.6858829535841072}, - "duration_start": "2014-01-30T11:13:23", - "duration_end": "2014-01-31T16:07:13", - "duration": 104030.0, - "period": 0, - "period_start": "2014-01-30T11:13:23", - "period_end": "2014-01-31T16:07:13", - "groupby": null, - "unit" : "%"}] - -The request syntax is similar for *cardinality* but with the aggregate.param -option provided. So, for example, if you want to know the number of distinct -tenants with images, you would do:: - - GET /v2/meters/image/statistics?aggregate.func=cardinality - &aggregate.param=project_id - -For a more involved example, consider a requirement for determining, for some -tenant, the number of distinct instances (*cardinality*) as well as the total -number of instance samples (*count*). You might also want to see this -information with 15 minute long intervals. Then, using the *period* and -*groupby* options, a query would look like the following:: - - GET /v2/meters/instance/statistics?aggregate.func=cardinality - &aggregate.param=resource_id - &aggregate.func=count - &groupby=project_id&period=900 - -This would give an example response of the form:: - - [{"count": 19, - "aggregate": {"count": 19.0, "cardinality/resource_id": 3.0}, - "duration": 328.478029, - "duration_start": "2014-01-31T10:00:41.823919", - "duration_end": "2014-01-31T10:06:10.301948", - "period": 900, - "period_start": "2014-01-31T10:00:00", - "period_end": "2014-01-31T10:15:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}, - {"count": 22, - "aggregate": {"count": 22.0, "cardinality/resource_id": 4.0}, - "duration": 808.00384, - "duration_start": "2014-01-31T10:15:15", - "duration_end": "2014-01-31T10:28:43.003840", - "period": 900, - "period_start": "2014-01-31T10:15:00", - "period_end": "2014-01-31T10:30:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}, - {"count": 2, - "aggregate": {"count": 2.0, "cardinality/resource_id": 2.0}, - "duration": 0.0, - "duration_start": "2014-01-31T10:35:15", - "duration_end": "2014-01-31T10:35:15", - "period": 900, - "period_start": "2014-01-31T10:30:00", - "period_end": "2014-01-31T10:45:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}] - -If you want to retrieve all the instances (not the list of samples, but the -resource itself) that have been run during this month for a given project, -you should ask the resource endpoint for the list of resources (all types: -including storage, images, networking, ...):: - - GET /v2/resources - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -Then look for resources that have an *instance* meter linked to them. That -will indicate resources that have been measured as being instance. You can -then request their samples to have more detailed information, like their -state or their flavor:: - - GET /v2/meter/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -This will return a list of samples that have been recorded on this -particular resource. You can inspect them to retrieve information, such as -the instance state (check the *metadata.vm_state* field) or the instance -flavor (check the *metadata.flavor* field). -You can request nested metadata fields by using a dot to delimit the fields -(e.g. *metadata.weighted_host.host* for *instance.scheduled* meter) - -To retrieve only the 3 last samples of a meters, you can pass the *limit* -parameter to the query:: - - GET /v2/meter/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - limit: 3 - -This query would only return the last 3 samples. - -Functional example for Complex Query -++++++++++++++++++++++++++++++++++++ - -This example demonstrates how complex query filter expressions can be generated and sent -to the /v2/query/samples endpoint of Ceilometer API using POST request. - -To check for *cpu_util* samples reported between 18:00-18:15 or between 18:30 - 18:45 -on a particular date (2013-12-01), where the utilization is between 23 and 26 percent, -but not exactly 25.12 percent, the following filter expression can be created:: - - {"and": - [{"and": - [{"=": {"counter_name": "cpu_util"}}, - {">": {"counter_volume": 0.23}}, - {"<": {"counter_volume": 0.26}}, - {"not": {"=": {"counter_volume": 0.2512}}}]}, - {"or": - [{"and": - [{">": {"timestamp": "2013-12-01T18:00:00"}}, - {"<": {"timestamp": "2013-12-01T18:15:00"}}]}, - {"and": - [{">": {"timestamp": "2013-12-01T18:30:00"}}, - {"<": {"timestamp": "2013-12-01T18:45:00"}}]}]}]} - -Different sorting criteria can be defined for the query filter, for example the results -can be ordered in an ascending order by the *counter_volume* and descending order based on -the *timestamp*. The following order by expression has to be created for specifying this -criteria:: - - [{"counter_volume": "ASC"}, {"timestamp": "DESC"}] - -As the current implementation accepts only string values as query filter and order by -definitions, the above defined expressions have to be converted to string values. -By adding a limit criteria to the request, which maximizes the number of returned samples -to four, the query looks like the following:: - - { - "filter" : "{\"and\":[{\"and\": [{\"=\": {\"counter_name\": \"cpu_util\"}}, {\">\": {\"counter_volume\": 0.23}}, {\"<\": {\"counter_volume\": 0.26}}, {\"not\": {\"=\": {\"counter_volume\": 0.2512}}}]}, {\"or\": [{\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:00:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:15:00\"}}]}, {\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:30:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:45:00\"}}]}]}]}", - "orderby" : "[{\"counter_volume\": \"ASC\"}, {\"timestamp\": \"DESC\"}]", - "limit" : 4 - } - -A query request looks like the following with curl:: - - curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ - -d '' \ - http://localhost:8777/v2/query/samples - -.. _user-defined-data: - -User-defined data -+++++++++++++++++ - -It is possible to add your own samples (created from data retrieved in any -way like monitoring agents on your instances) in Ceilometer to store -them and query on them. You can even get *Statistics* on your own inserted data. -By adding a *Sample* to a *Resource*, you create automatically the corresponding -*Meter* if it does not exist already. To achieve this, you have to POST a list -of one to many samples in JSON format:: - - curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ - -d '' \ - http://localhost:8777/v2/meters/ - -Fields *source*, *timestamp*, *project_id* and *user_id* are automatically -added if not present in the samples. Field *message_id* is not taken into -account if present and an internal value will be set. - -By default, samples posted via API will be placed on the notification bus and -processed by the notification agent. - -To avoid re-queuing the data, samples posted via API can be stored directly to -the storage backend verbatim by specifying a boolean flag 'direct' in the -request URL, like this:: - - POST /v2/meters/ram_util?direct=True - -Samples posted this way will bypass pipeline processing. - -Here is an example showing how to add a sample for a *ram_util* meter (already -existing or not):: - - POST /v2/meters/ram_util - body: [ - { - "counter_name": "ram_util", - "user_id": "4790fbafad2e44dab37b1d7bfc36299b", - "resource_id": "87acaca4-ae45-43ae-ac91-846d8d96a89b", - "resource_metadata": { - "display_name": "my_instance", - "my_custom_metadata_1": "value1", - "my_custom_metadata_2": "value2" - }, - "counter_unit": "%", - "counter_volume": 8.57762938230384, - "project_id": "97f9a6aaa9d842fcab73797d3abb2f53", - "counter_type": "gauge" - } - ] - -You get back the same list containing your example completed with the missing -fields : *source* and *timestamp* in this case. diff --git a/etc/apache2/ceilometer b/etc/apache2/ceilometer deleted file mode 100644 index 261acc3e..00000000 --- a/etc/apache2/ceilometer +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# ceilometer API through mod_wsgi. - -# Note: If you are using a Debian-based system then the paths -# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead -# of "httpd". -# -# The number of processes and threads is an example only and should -# be adjusted according to local requirements. - -Listen 8777 - - - WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} - WSGIProcessGroup ceilometer-api - WSGIScriptAlias / /var/www/ceilometer/app - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/httpd/ceilometer_error.log - CustomLog /var/log/httpd/ceilometer_access.log combined - - -WSGISocketPrefix /var/run/httpd diff --git a/etc/ceilometer/README-ceilometer.conf.txt b/etc/ceilometer/README-ceilometer.conf.txt deleted file mode 100644 index db6d857e..00000000 --- a/etc/ceilometer/README-ceilometer.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample ceilometer.conf file, run the following -command from the top-level ceilometer directory: - -tox -egenconfig \ No newline at end of file diff --git a/etc/ceilometer/ceilometer-config-generator.conf b/etc/ceilometer/ceilometer-config-generator.conf index 4fc80454..eaf240c1 100644 --- a/etc/ceilometer/ceilometer-config-generator.conf +++ b/etc/ceilometer/ceilometer-config-generator.conf @@ -2,11 +2,8 @@ output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer -namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log -namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy -namespace = oslo.service.service namespace = keystonemiddleware.auth_token diff --git a/etc/ceilometer/event_definitions.yaml b/etc/ceilometer/event_definitions.yaml deleted file mode 100644 index 784b4060..00000000 --- a/etc/ceilometer/event_definitions.yaml +++ /dev/null @@ -1,545 +0,0 @@ ---- -- event_type: compute.instance.* - traits: &instance_traits - tenant_id: - fields: payload.tenant_id - user_id: - fields: payload.user_id - instance_id: - fields: payload.instance_id - host: - fields: publisher_id.`split(., 1, 1)` - service: - fields: publisher_id.`split(., 0, -1)` - memory_mb: - type: int - fields: payload.memory_mb - disk_gb: - type: int - fields: payload.disk_gb - root_gb: - type: int - fields: payload.root_gb - ephemeral_gb: - type: int - fields: payload.ephemeral_gb - vcpus: - type: int - fields: payload.vcpus - instance_type_id: - type: int - fields: payload.instance_type_id - instance_type: - fields: payload.instance_type - state: - fields: payload.state - os_architecture: - fields: payload.image_meta.'org.openstack__1__architecture' - os_version: - fields: payload.image_meta.'org.openstack__1__os_version' - os_distro: - fields: payload.image_meta.'org.openstack__1__os_distro' - launched_at: - type: datetime - fields: payload.launched_at - deleted_at: - type: datetime - fields: payload.deleted_at -- event_type: compute.instance.exists - traits: - <<: *instance_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] - traits: &cinder_traits - user_id: - fields: payload.user_id - project_id: - fields: payload.tenant_id - availability_zone: - fields: payload.availability_zone - display_name: - fields: payload.display_name - replication_status: - fields: payload.replication_status - status: - fields: payload.status - created_at: - fields: payload.created_at -- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] - traits: - <<: *cinder_traits - resource_id: - fields: payload.volume_id - host: - fields: payload.host - size: - fields: payload.size - type: - fields: payload.volume_type - replication_status: - fields: payload.replication_status -- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] - traits: - <<: *cinder_traits - resource_id: - fields: payload.snapshot_id - volume_id: - fields: payload.volume_id -- event_type: ['image_volume_cache.*'] - traits: - image_id: - fields: payload.image_id - host: - fields: payload.host -- event_type: ['image.update', 'image.upload', 'image.delete'] - traits: &glance_crud - project_id: - fields: payload.owner - resource_id: - fields: payload.id - name: - fields: payload.name - status: - fields: payload.status - created_at: - fields: payload.created_at - user_id: - fields: payload.owner - deleted_at: - fields: payload.deleted_at - size: - fields: payload.size -- event_type: image.send - traits: &glance_send - receiver_project: - fields: payload.receiver_tenant_id - receiver_user: - fields: payload.receiver_user_id - user_id: - fields: payload.owner_id - image_id: - fields: payload.image_id - destination_ip: - fields: payload.destination_ip - bytes_sent: - fields: payload.bytes_sent -- event_type: orchestration.stack.* - traits: &orchestration_crud - project_id: - fields: payload.tenant_id - user_id: - fields: ['_context_trustor_user_id', '_context_user_id'] - resource_id: - fields: payload.stack_identity -- event_type: sahara.cluster.* - traits: &sahara_crud - project_id: - fields: payload.project_id - user_id: - fields: _context_user_id - resource_id: - fields: payload.cluster_id -- event_type: sahara.cluster.health - traits: &sahara_health - <<: *sahara_crud - verification_id: - fields: payload.verification_id - health_check_status: - fields: payload.health_check_status - health_check_name: - fields: payload.health_check_name - health_check_description: - fields: payload.health_check_description - created_at: - type: datetime - fields: payload.created_at - updated_at: - type: datetime - fields: payload.updated_at -- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', - 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] - traits: &identity_crud - resource_id: - fields: payload.resource_info - initiator_id: - fields: payload.initiator.id - project_id: - fields: payload.initiator.project_id - domain_id: - fields: payload.initiator.domain_id -- event_type: identity.role_assignment.* - traits: &identity_role_assignment - role: - fields: payload.role - group: - fields: payload.group - domain: - fields: payload.domain - user: - fields: payload.user - project: - fields: payload.project -- event_type: identity.authenticate - traits: &identity_authenticate - typeURI: - fields: payload.typeURI - id: - fields: payload.id - action: - fields: payload.action - eventType: - fields: payload.eventType - eventTime: - fields: payload.eventTime - outcome: - fields: payload.outcome - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_name: - fields: payload.initiator.name - initiator_host_agent: - fields: payload.initiator.host.agent - initiator_host_addr: - fields: payload.initiator.host.address - target_typeURI: - fields: payload.target.typeURI - target_id: - fields: payload.target.id - observer_typeURI: - fields: payload.observer.typeURI - observer_id: - fields: payload.observer.id -- event_type: objectstore.http.request - traits: &objectstore_request - typeURI: - fields: payload.typeURI - id: - fields: payload.id - action: - fields: payload.action - eventType: - fields: payload.eventType - eventTime: - fields: payload.eventTime - outcome: - fields: payload.outcome - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_project_id: - fields: payload.initiator.project_id - target_typeURI: - fields: payload.target.typeURI - target_id: - fields: payload.target.id - target_action: - fields: payload.target.action - target_metadata_path: - fields: payload.target.metadata.path - target_metadata_version: - fields: payload.target.metadata.version - target_metadata_container: - fields: payload.target.metadata.container - target_metadata_object: - fields: payload.target.metadata.object - observer_id: - fields: payload.observer.id -- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] - traits: &network_traits - user_id: - fields: _context_user_id - project_id: - fields: _context_tenant_id -- event_type: network.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.network.id', 'payload.id'] -- event_type: subnet.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.subnet.id', 'payload.id'] -- event_type: port.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.port.id', 'payload.id'] -- event_type: router.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.router.id', 'payload.id'] -- event_type: floatingip.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.floatingip.id', 'payload.id'] -- event_type: pool.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.pool.id', 'payload.id'] -- event_type: vip.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.vip.id', 'payload.id'] -- event_type: member.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.member.id', 'payload.id'] -- event_type: health_monitor.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.health_monitor.id', 'payload.id'] -- event_type: healthmonitor.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.healthmonitor.id', 'payload.id'] -- event_type: listener.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.listener.id', 'payload.id'] -- event_type: loadbalancer.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.loadbalancer.id', 'payload.id'] -- event_type: firewall.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall.id', 'payload.id'] -- event_type: firewall_policy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall_policy.id', 'payload.id'] -- event_type: firewall_rule.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall_rule.id', 'payload.id'] -- event_type: vpnservice.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.vpnservice.id', 'payload.id'] -- event_type: ipsecpolicy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ipsecpolicy.id', 'payload.id'] -- event_type: ikepolicy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ikepolicy.id', 'payload.id'] -- event_type: ipsec_site_connection.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ipsec_site_connection.id', 'payload.id'] -- event_type: '*http.*' - traits: &http_audit - project_id: - fields: payload.initiator.project_id - user_id: - fields: payload.initiator.id - typeURI: - fields: payload.typeURI - eventType: - fields: payload.eventType - action: - fields: payload.action - outcome: - fields: payload.outcome - id: - fields: payload.id - eventTime: - fields: payload.eventTime - requestPath: - fields: payload.requestPath - observer_id: - fields: payload.observer.id - target_id: - fields: payload.target.id - target_typeURI: - fields: payload.target.typeURI - target_name: - fields: payload.target.name - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_name: - fields: payload.initiator.name - initiator_host_address: - fields: payload.initiator.host.address -- event_type: '*http.response' - traits: - <<: *http_audit - reason_code: - fields: payload.reason.reasonCode -- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] - traits: &dns_domain_traits - status: - fields: payload.status - retry: - fields: payload.retry - description: - fields: payload.description - expire: - fields: payload.expire - email: - fields: payload.email - ttl: - fields: payload.ttl - action: - fields: payload.action - name: - fields: payload.name - resource_id: - fields: payload.id - created_at: - fields: payload.created_at - updated_at: - fields: payload.updated_at - version: - fields: payload.version - parent_domain_id: - fields: parent_domain_id - serial: - fields: payload.serial -- event_type: dns.domain.exists - traits: - <<: *dns_domain_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: trove.* - traits: &trove_base_traits - state: - fields: payload.state_description - instance_type: - fields: payload.instance_type - user_id: - fields: payload.user_id - resource_id: - fields: payload.instance_id - instance_type_id: - fields: payload.instance_type_id - launched_at: - type: datetime - fields: payload.launched_at - instance_name: - fields: payload.instance_name - state: - fields: payload.state - nova_instance_id: - fields: payload.nova_instance_id - service_id: - fields: payload.service_id - created_at: - type: datetime - fields: payload.created_at - region: - fields: payload.region -- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] - traits: &trove_common_traits - name: - fields: payload.name - availability_zone: - fields: payload.availability_zone - instance_size: - type: int - fields: payload.instance_size - volume_size: - type: int - fields: payload.volume_size - nova_volume_id: - fields: payload.nova_volume_id -- event_type: trove.instance.create - traits: - <<: [*trove_base_traits, *trove_common_traits] -- event_type: trove.instance.modify_volume - traits: - <<: [*trove_base_traits, *trove_common_traits] - old_volume_size: - type: int - fields: payload.old_volume_size - modify_at: - type: datetime - fields: payload.modify_at -- event_type: trove.instance.modify_flavor - traits: - <<: [*trove_base_traits, *trove_common_traits] - old_instance_size: - type: int - fields: payload.old_instance_size - modify_at: - type: datetime - fields: payload.modify_at -- event_type: trove.instance.delete - traits: - <<: [*trove_base_traits, *trove_common_traits] - deleted_at: - type: datetime - fields: payload.deleted_at -- event_type: trove.instance.exists - traits: - <<: *trove_base_traits - display_name: - fields: payload.display_name - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: profiler.* - traits: - project: - fields: payload.project - service: - fields: payload.service - name: - fields: payload.name - base_id: - fields: payload.base_id - trace_id: - fields: payload.trace_id - parent_id: - fields: payload.parent_id - timestamp: - fields: payload.timestamp - host: - fields: payload.info.host - path: - fields: payload.info.request.path - query: - fields: payload.info.request.query - method: - fields: payload.info.request.method - scheme: - fields: payload.info.request.scheme - db.statement: - fields: payload.info.db.statement - db.params: - fields: payload.info.db.params diff --git a/etc/ceilometer/event_pipeline.yaml b/etc/ceilometer/event_pipeline.yaml deleted file mode 100644 index a91c46a1..00000000 --- a/etc/ceilometer/event_pipeline.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -sources: - - name: event_source - events: - - "*" - sinks: - - event_sink -sinks: - - name: event_sink - transformers: - publishers: - - notifier:// diff --git a/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml b/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml deleted file mode 100644 index 8d009fcd..00000000 --- a/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml +++ /dev/null @@ -1,265 +0,0 @@ -metric: - # LBaaS V2 - - name: "loadbalancer.create" - event_type: - - "loadbalancer.create.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "loadbalancer.update" - event_type: - - "loadbalancer.update.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "loadbalancer.delete" - event_type: - - "loadbalancer.delete.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "listener.create" - event_type: - - "listener.create.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "listener.update" - event_type: - - "listener.update.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "listener.delete" - event_type: - - "listener.delete.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "healthmonitor.create" - event_type: - - "healthmonitor.create.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "healthmonitor.update" - event_type: - - "healthmonitor.update.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "healthmonitor.delete" - event_type: - - "healthmonitor.delete.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "pool.create" - event_type: - - "pool.create.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "pool.update" - event_type: - - "pool.update.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "pool.delete" - event_type: - - "pool.delete.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "member.create" - event_type: - - "member.create.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id - - - name: "member.update" - event_type: - - "member.update.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id - - - name: "member.delete" - event_type: - - "member.delete.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id diff --git a/etc/ceilometer/examples/osprofiler_event_definitions.yaml b/etc/ceilometer/examples/osprofiler_event_definitions.yaml deleted file mode 100644 index d2a87539..00000000 --- a/etc/ceilometer/examples/osprofiler_event_definitions.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- event_type: profiler.* - traits: - project: - fields: payload.project - service: - fields: payload.service - name: - fields: payload.name - base_id: - fields: payload.base_id - trace_id: - fields: payload.trace_id - parent_id: - fields: payload.parent_id - timestamp: - fields: payload.timestamp - host: - fields: payload.info.host - path: - fields: payload.info.request.path - query: - fields: payload.info.request.query - method: - fields: payload.info.request.method - scheme: - fields: payload.info.request.scheme - db.statement: - fields: payload.info.db.statement - db.params: - fields: payload.info.db.params diff --git a/etc/ceilometer/gnocchi_resources.yaml b/etc/ceilometer/gnocchi_resources.yaml deleted file mode 100644 index 88b81541..00000000 --- a/etc/ceilometer/gnocchi_resources.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- - -resources: - - resource_type: identity - archive_policy: low - metrics: - - 'identity.authenticate.success' - - 'identity.authenticate.pending' - - 'identity.authenticate.failure' - - 'identity.user.created' - - 'identity.user.deleted' - - 'identity.user.updated' - - 'identity.group.created' - - 'identity.group.deleted' - - 'identity.group.updated' - - 'identity.role.created' - - 'identity.role.deleted' - - 'identity.role.updated' - - 'identity.project.created' - - 'identity.project.deleted' - - 'identity.project.updated' - - 'identity.trust.created' - - 'identity.trust.deleted' - - 'identity.role_assignment.created' - - 'identity.role_assignment.deleted' - - - resource_type: ceph_account - metrics: - - 'radosgw.objects' - - 'radosgw.objects.size' - - 'radosgw.objects.containers' - - 'radosgw.api.request' - - 'radosgw.containers.objects' - - 'radosgw.containers.objects.size' - - - resource_type: instance - metrics: - - 'instance' - - 'memory' - - 'memory.usage' - - 'memory.resident' - - 'vcpus' - - 'cpu' - - 'cpu.delta' - - 'cpu_util' - - 'disk.root.size' - - 'disk.ephemeral.size' - - 'disk.read.requests' - - 'disk.read.requests.rate' - - 'disk.write.requests' - - 'disk.write.requests.rate' - - 'disk.read.bytes' - - 'disk.read.bytes.rate' - - 'disk.write.bytes' - - 'disk.write.bytes.rate' - - 'disk.latency' - - 'disk.iops' - - 'disk.capacity' - - 'disk.allocation' - - 'disk.usage' - attributes: - host: resource_metadata.host - image_ref: resource_metadata.image_ref - display_name: resource_metadata.display_name - flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)) - server_group: resource_metadata.user_metadata.server_group - - - resource_type: instance_network_interface - metrics: - - 'network.outgoing.packets.rate' - - 'network.incoming.packets.rate' - - 'network.outgoing.packets' - - 'network.incoming.packets' - - 'network.outgoing.bytes.rate' - - 'network.incoming.bytes.rate' - - 'network.outgoing.bytes' - - 'network.incoming.bytes' - attributes: - name: resource_metadata.vnic_name - instance_id: resource_metadata.instance_id - - - resource_type: instance_disk - metrics: - - 'disk.device.read.requests' - - 'disk.device.read.requests.rate' - - 'disk.device.write.requests' - - 'disk.device.write.requests.rate' - - 'disk.device.read.bytes' - - 'disk.device.read.bytes.rate' - - 'disk.device.write.bytes' - - 'disk.device.write.bytes.rate' - - 'disk.device.latency' - - 'disk.device.iops' - - 'disk.device.capacity' - - 'disk.device.allocation' - - 'disk.device.usage' - attributes: - name: resource_metadata.disk_name - instance_id: resource_metadata.instance_id - - - resource_type: image - metrics: - - 'image' - - 'image.size' - - 'image.download' - - 'image.serve' - attributes: - name: resource_metadata.name - container_format: resource_metadata.container_format - disk_format: resource_metadata.disk_format - - - resource_type: ipmi - metrics: - - 'hardware.ipmi.node.power' - - 'hardware.ipmi.node.temperature' - - 'hardware.ipmi.node.inlet_temperature' - - 'hardware.ipmi.node.outlet_temperature' - - 'hardware.ipmi.node.fan' - - 'hardware.ipmi.node.current' - - 'hardware.ipmi.node.voltage' - - 'hardware.ipmi.node.airflow' - - 'hardware.ipmi.node.cups' - - 'hardware.ipmi.node.cpu_util' - - 'hardware.ipmi.node.mem_util' - - 'hardware.ipmi.node.io_util' - - - resource_type: network - metrics: - - 'bandwidth' - - 'network' - - 'network.create' - - 'network.update' - - 'subnet' - - 'subnet.create' - - 'subnet.update' - - 'port' - - 'port.create' - - 'port.update' - - 'router' - - 'router.create' - - 'router.update' - - 'ip.floating' - - 'ip.floating.create' - - 'ip.floating.update' - - - resource_type: stack - metrics: - - 'stack.create' - - 'stack.update' - - 'stack.delete' - - 'stack.resume' - - 'stack.suspend' - - - resource_type: swift_account - metrics: - - 'storage.objects.incoming.bytes' - - 'storage.objects.outgoing.bytes' - - 'storage.api.request' - - 'storage.objects.size' - - 'storage.objects' - - 'storage.objects.containers' - - 'storage.containers.objects' - - 'storage.containers.objects.size' - - - resource_type: volume - metrics: - - 'volume' - - 'volume.size' - - 'volume.create' - - 'volume.delete' - - 'volume.update' - - 'volume.resize' - - 'volume.attach' - - 'volume.detach' - attributes: - display_name: resource_metadata.display_name - - - resource_type: host - metrics: - - 'hardware.cpu.load.1min' - - 'hardware.cpu.load.5min' - - 'hardware.cpu.load.15min' - - 'hardware.cpu.util' - - 'hardware.memory.total' - - 'hardware.memory.used' - - 'hardware.memory.swap.total' - - 'hardware.memory.swap.avail' - - 'hardware.memory.buffer' - - 'hardware.memory.cached' - - 'hardware.network.ip.outgoing.datagrams' - - 'hardware.network.ip.incoming.datagrams' - - 'hardware.system_stats.cpu.idle' - - 'hardware.system_stats.io.outgoing.blocks' - - 'hardware.system_stats.io.incoming.blocks' - attributes: - host_name: resource_metadata.resource_url - - - resource_type: host_disk - metrics: - - 'hardware.disk.size.total' - - 'hardware.disk.size.used' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.device - - - resource_type: host_network_interface - metrics: - - 'hardware.network.incoming.bytes' - - 'hardware.network.outgoing.bytes' - - 'hardware.network.outgoing.errors' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.name diff --git a/etc/ceilometer/pipeline.yaml b/etc/ceilometer/pipeline.yaml deleted file mode 100644 index a5bd5148..00000000 --- a/etc/ceilometer/pipeline.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- -sources: - - name: meter_source - interval: 600 - meters: - - "*" - sinks: - - meter_sink - - name: cpu_source - interval: 600 - meters: - - "cpu" - sinks: - - cpu_sink - - cpu_delta_sink - - name: disk_source - interval: 600 - meters: - - "disk.read.bytes" - - "disk.read.requests" - - "disk.write.bytes" - - "disk.write.requests" - - "disk.device.read.bytes" - - "disk.device.read.requests" - - "disk.device.write.bytes" - - "disk.device.write.requests" - sinks: - - disk_sink - - name: network_source - interval: 600 - meters: - - "network.incoming.bytes" - - "network.incoming.packets" - - "network.outgoing.bytes" - - "network.outgoing.packets" - sinks: - - network_sink -sinks: - - name: meter_sink - transformers: - publishers: - - notifier:// - - name: cpu_sink - transformers: - - name: "rate_of_change" - parameters: - target: - name: "cpu_util" - unit: "%" - type: "gauge" - scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - publishers: - - notifier:// - - name: cpu_delta_sink - transformers: - - name: "delta" - parameters: - target: - name: "cpu.delta" - growth_only: True - publishers: - - notifier:// - - name: disk_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" - unit: "(B|request)" - target: - map_to: - name: "\\1.\\2.\\3.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// - - name: network_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "network\\.(incoming|outgoing)\\.(bytes|packets)" - unit: "(B|packet)" - target: - map_to: - name: "network.\\1.\\2.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// diff --git a/etc/ceilometer/rootwrap.conf b/etc/ceilometer/rootwrap.conf deleted file mode 100644 index f5d90d20..00000000 --- a/etc/ceilometer/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for ceilometer-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap - -# List of directories to search executables in, in case filters do not -# explicitely specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, user0, user1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/etc/ceilometer/rootwrap.d/ipmi.filters b/etc/ceilometer/rootwrap.d/ipmi.filters deleted file mode 100644 index 2ef74b04..00000000 --- a/etc/ceilometer/rootwrap.d/ipmi.filters +++ /dev/null @@ -1,7 +0,0 @@ -# ceilometer-rootwrap command filters for IPMI capable nodes -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool' -ipmitool: CommandFilter, ipmitool, root - diff --git a/etc/panko/policy.json b/etc/panko/policy.json new file mode 100644 index 00000000..332d080d --- /dev/null +++ b/etc/panko/policy.json @@ -0,0 +1,7 @@ +{ + "context_is_admin": "role:admin", + "segregation": "rule:context_is_admin", + + "telemetry:events:index": "", + "telemetry:events:show": "" +} diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index c9996a36..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* ceilometer is a task that is run in gates against Ceilometer - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/ceilometer.yaml b/rally-jobs/ceilometer.yaml deleted file mode 100644 index 32c1022f..00000000 --- a/rally-jobs/ceilometer.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- - - CeilometerMeters.list_meters: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerResource.list_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerStats.create_meter_and_get_stats: - - - args: - user_id: "user-id" - resource_id: "resource-id" - counter_volume: 1.0 - counter_unit: "" - counter_type: "cumulative" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: "1.0" - resource_id: "resource_id" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index aab343c5..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - diff --git a/rally-jobs/extra/fake.img b/rally-jobs/extra/fake.img deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 33bec0d2..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All *.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/plugin_sample.py b/rally-jobs/plugins/plugin_sample.py deleted file mode 100644 index 6541357a..00000000 --- a/rally-jobs/plugins/plugin_sample.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Sample of plugin for Ceilometer. - -For more Ceilometer related benchmarks take a look here: -github.com/openstack/rally/blob/master/rally/benchmark/scenarios/ceilometer/ - -About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts -""" - -from rally.benchmark.scenarios import base - - -class CeilometerPlugin(base.Scenario): - pass diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml b/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml deleted file mode 100644 index 32b4b248..00000000 --- a/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1531626 `_] - Ensure aggregator transformer timeout is honoured if size is not provided. diff --git a/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml b/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml deleted file mode 100644 index 68fd2370..00000000 --- a/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -critical: - - > - The previous configuration options default for - `requeue_sample_on_dispatcher_error' and - `requeue_event_on_dispatcher_error' allowed to lose data very easily: if - the dispatcher failed to send data to the backend (e.g. Gnocchi is down), - then the dispatcher raised and the data were lost forever. This was - completely unacceptable, and nobody should be able to configure Ceilometer - in that way." - -upgrade: - - > - The options `requeue_event_on_dispatcher_error' and - `requeue_sample_on_dispatcher_error' have been enabled and removed. diff --git a/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml b/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml deleted file mode 100644 index e691bec1..00000000 --- a/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - > - Add support for batch processing of messages from queue. This will allow - the collector and notification agent to grab multiple messages per thread - to enable more efficient processing. -upgrade: - - > - batch_size and batch_timeout configuration options are added to both - [notification] and [collector] sections of configuration. The batch_size - controls the number of messages to grab before processing. Similarly, - the batch_timeout defines the wait time before processing. diff --git a/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml b/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml deleted file mode 100644 index 39491021..00000000 --- a/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1550436 `_] - Cache json parsers when building parsing logic to handle event and - meter definitions. This will improve agent startup and setup time. diff --git a/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml b/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml deleted file mode 100644 index ff9ae9f5..00000000 --- a/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - To minimise load on Nova API, an additional configuration option was added - to control discovery interval vs metric polling interval. If - resource_update_interval option is configured in compute section, the - compute agent will discover new instances based on defined interval. The - agent will continue to poll the discovered instances at the interval - defined by pipeline. diff --git a/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml b/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml deleted file mode 100644 index 6ab41f75..00000000 --- a/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - > - [`bug 1480333 `_] - Support ability to configure collector to capture events or meters mutually - exclusively, rather than capturing both always. -other: - - > - Configure individual dispatchers by specifying meter_dispatchers and - event_dispatchers in configuration file. diff --git a/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml b/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml deleted file mode 100644 index c9fbe533..00000000 --- a/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - Support for CORS is added. More information can be found - [`here `_] -upgrade: - - > - The api-paste.ini file can be modified to include or exclude the CORs - middleware. Additional configurations can be made to middleware as well. diff --git a/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml b/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml deleted file mode 100644 index a4b0c6ef..00000000 --- a/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1254800 `_] - Add better support to catch race conditions when creating event_types diff --git a/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml b/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml deleted file mode 100644 index 45794a74..00000000 --- a/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -critical: - - > - [`bug 1533787 `_] - Fix an issue where agents are not properly getting registered to group - when multiple notification agents are deployed. This can result in - bad transformation as the agents are not coordinated. It is still - recommended to set heartbeat_timeout_threshold = 0 in - [oslo_messaging_rabbit] section when deploying multiple agents. diff --git a/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml b/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml deleted file mode 100644 index 60c598b3..00000000 --- a/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - > - [`bug 1539163 `_] - Add ability to define whether to use first or last timestamps when - aggregating samples. This will allow more flexibility when chaining - transformers. diff --git a/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml b/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml deleted file mode 100644 index 1bd295ab..00000000 --- a/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - > - [`bug 1536338 `_] - Patch was added to fix the broken floatingip pollster - that polled data from nova api, but since the nova api - filtered the data by tenant, ceilometer was not getting - any data back. The fix changes the pollster to use the - neutron api instead to get the floating ip info. diff --git a/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml b/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml deleted file mode 100644 index d3eb8399..00000000 --- a/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1530793 `_] - network.services.lb.incoming.bytes meter was previous set to incorrect - type. It should be a gauge meter. diff --git a/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml b/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml deleted file mode 100644 index 653d3b32..00000000 --- a/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - > - Support resource caching in Gnocchi dispatcher to improve write - performance to avoid additional queries. -other: - - > - A dogpile.cache supported backend is required to enable cache. Additional - configuration `options `_ - are also required. diff --git a/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml b/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml deleted file mode 100644 index 29f4b04e..00000000 --- a/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 255569 `_] - Fix caching support in Gnocchi dispatcher. Added better locking support - to enable smoother cache access. diff --git a/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml b/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml deleted file mode 100644 index 01774a90..00000000 --- a/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - Gnocchi dispatcher now uses client rather than direct http requests -upgrade: - - > - gnocchiclient library is now a requirement if using ceilometer+gnocchi. diff --git a/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml b/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml deleted file mode 100644 index baf5db49..00000000 --- a/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - > - [`bug 1518338 `_] - Add support for storing SNMP metrics in Gnocchi.This functionality requires - Gnocchi v2.1.0 to be installed. diff --git a/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml b/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml deleted file mode 100644 index 23f557e5..00000000 --- a/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - > - gnocchi_resources.yaml in Ceilometer should be updated. -fixes: - - > - Fix samples from Heat to map to correct Gnocchi resource type diff --git a/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml b/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml deleted file mode 100644 index 485204b2..00000000 --- a/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1523124 `_] - Fix gnocchi dispatcher to support UDP collector diff --git a/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml b/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml deleted file mode 100644 index 4149a0b0..00000000 --- a/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - > - [`bug 1542189 `_] - Handle malformed resource definitions in gnocchi_resources.yaml - gracefully. Currently we raise an exception once we hit a bad - resource and skip the rest. Instead the patch skips the bad - resource and proceeds with rest of the definitions. diff --git a/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml b/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml deleted file mode 100644 index c6eb6e77..00000000 --- a/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - > - To utilize the new policy support. The policy.json file - should be updated accordingly. The pre-existing policy.json - file will continue to function as it does if policy changes - are not required. -fixes: - - > - [`bug 1504495 `_] - Configure ceilometer to handle policy.json rules when possible. diff --git a/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml b/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml deleted file mode 100644 index c3fcf6c8..00000000 --- a/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - > - Run db-sync to add new indices. -fixes: - - > - [`bug 1526793 `_] - Additional indices were added to better support querying of event data. diff --git a/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml b/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml deleted file mode 100644 index 87225fad..00000000 --- a/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Add support for Keystone v3 authentication diff --git a/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml b/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml deleted file mode 100644 index 9bb5c5b1..00000000 --- a/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - > - [`bug 1536699 `_] - Patch to fix volume field lookup in meter definition file. In case - the field is missing in the definition, it raises a keyerror and - aborts. Instead we should skip the missing field meter and continue - with the rest of the definitions. diff --git a/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml b/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml deleted file mode 100644 index c2a86272..00000000 --- a/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - > - [`bug 1532661 `_] - Fix statistics query failures due to large numbers stored in MongoDB. Data - from MongoDB is returned as Int64 for big numbers when int and float types - are expected. The data is cast to appropriate type to handle large data. diff --git a/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml b/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml deleted file mode 100644 index 42955cdd..00000000 --- a/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - Ceilometer alarms code is now fully removed from code base. - Equivalent functionality is handled by Aodh. diff --git a/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml b/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml deleted file mode 100644 index 0c2e1fc9..00000000 --- a/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - Support for CADF-only payload in HTTP dispatcher is dropped as - audit middleware in pyCADF was dropped in Kilo cycle. -upgrade: - - > - audit middleware in keystonemiddleware library should be used for - similar support. diff --git a/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml b/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml deleted file mode 100644 index 2b565152..00000000 --- a/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Remove eventlet from Ceilometer in favour of threaded approach diff --git a/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml b/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml deleted file mode 100644 index 669a85fa..00000000 --- a/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - RPC collector support is dropped. The queue-based notifier publisher and - collector was added as the recommended alternative as of Icehouse cycle. -upgrade: - - > - Pipeline.yaml files for agents should be updated to notifier:// or udp:// - publishers. The rpc:// publisher is no longer supported. diff --git a/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml b/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml deleted file mode 100644 index 0a1ec7e5..00000000 --- a/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml +++ /dev/null @@ -1,10 +0,0 @@ - - ---- -fixes: - - > - [`bug 1536498 `_] - Patch to fix duplicate meter definitions causing duplicate samples. - If a duplicate is found, log a warning and skip the meter definition. - Note that the first occurrence of a meter will be used and any following - duplicates will be skipped from processing. diff --git a/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml b/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml deleted file mode 100644 index 39482a91..00000000 --- a/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1506738 `_] - [`bug 1509677 `_] - Optimise SQL backend queries to minimise query load diff --git a/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml b/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml deleted file mode 100644 index 248e3582..00000000 --- a/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1388680 `_] - Suppose ability to query for None value when using SQL backend. diff --git a/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml b/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml deleted file mode 100644 index 7c652ab9..00000000 --- a/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - > - Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron - is deprecated. The same metrics are available between v1 and v2. -issues: - - > - Neutron API is not designed to be polled against. When polling against - Neutron is enabled, Ceilometer's polling agents may generage a significant - load against the Neutron API. It is recommended that a dedicated API be - enabled for polling while Neutron's API is improved to handle polling. -upgrade: - - > - By default, Ceilometer will poll the v2 API. To poll legacy v1 API, - add neutron_lbaas_version=v1 option to configuration file. diff --git a/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml b/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml deleted file mode 100644 index fb970fd0..00000000 --- a/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - [`bug 1513731 `_] - Add support for hardware cpu_util in snmp.yaml diff --git a/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml b/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml deleted file mode 100644 index 46fdf04f..00000000 --- a/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - [`bug 1506959 `_] - Add support to query unique set of meter names rather than meters - associated with each resource. The list is available by adding unique=True - option to request. diff --git a/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml b/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml deleted file mode 100644 index fbb6414c..00000000 --- a/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -critical: - - > - [`bug 1519767 `_] - fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and - its potential race conditions are now patched. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index fa6da509..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,275 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Ceilometer Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Ceilometer Release Notes' -copyright = u'2015, Ceilometer Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from ceilometer.version import version_info as ceilometer_version -# The full version, including alpha/beta/rc tags. -release = ceilometer_version.version_string_with_vcs() -# The short X.Y version. -version = ceilometer_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CeilometerReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'CeilometerReleaseNotes.tex', - u'Ceilometer Release Notes Documentation', - u'Ceilometer Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'ceilometerreleasenotes', - u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'CeilometerReleaseNotes', - u'Ceilometer Release Notes Documentation', - u'Ceilometer Developers', 'CeilometerReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 49a83ead..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -========================= - Ceilometer Release Notes -========================= - -.. toctree:: - :maxdepth: 1 - - mitaka - liberty - unreleased diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be8..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e5456096..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabc..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt index 2a8f7e4f..cce51a04 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,46 +2,25 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD -futurist>=0.11.0 # Apache-2.0 -debtcollector>=1.2.0 # Apache-2.0 +debtcollector>=1.2.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 -jsonpath-rw-ext>=0.1.9 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -kafka-python<1.0.0,>=0.9.5 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD -msgpack-python>=0.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 -oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.9.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.service>=1.0.0 # Apache-2.0 PasteDeploy>=1.5.0 # MIT pbr>=1.6 # Apache-2.0 pecan>=1.0.0 # BSD -oslo.messaging>=5.2.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 -pysnmp<5.0.0,>=4.2.3 # BSD -python-glanceclient>=2.0.0 # Apache-2.0 -python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 -keystoneauth1>=2.1.0 # Apache-2.0 -python-neutronclient>=4.2.0 # Apache-2.0 -python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 -python-swiftclient>=2.2.0 # Apache-2.0 PyYAML>=3.1.0 # MIT -requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT -sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.9.0 # Apache-2.0 -tooz>=1.28.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3 # MIT WSME>=0.8 # MIT diff --git a/setup.cfg b/setup.cfg index 81f4ceb9..29cfc6fc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,163 +26,6 @@ packages = ceilometer [entry_points] -ceilometer.notification = - instance = ceilometer.compute.notifications.instance:Instance - instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled - network = ceilometer.network.notifications:Network - subnet = ceilometer.network.notifications:Subnet - port = ceilometer.network.notifications:Port - router = ceilometer.network.notifications:Router - floatingip = ceilometer.network.notifications:FloatingIP - http.request = ceilometer.middleware:HTTPRequest - http.response = ceilometer.middleware:HTTPResponse - hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification - hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification - hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification - hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification - network.services.lb.pool = ceilometer.network.notifications:Pool - network.services.lb.vip = ceilometer.network.notifications:Vip - network.services.lb.member = ceilometer.network.notifications:Member - network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor - network.services.firewall = ceilometer.network.notifications:Firewall - network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy - network.services.firewall.rule = ceilometer.network.notifications:FirewallRule - network.services.vpn = ceilometer.network.notifications:VPNService - network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy - network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy - network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection - _sample = ceilometer.telemetry.notifications:TelemetryIpc - meter = ceilometer.meter.notifications:ProcessMeterNotifications - -ceilometer.discover = - local_instances = ceilometer.compute.discovery:InstanceDiscovery - endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery - tenant = ceilometer.agent.discovery.tenant:TenantDiscovery - local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery - lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery - lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery - lb_members = ceilometer.network.services.discovery:LBMembersDiscovery - lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery - lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery - lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery - vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery - ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery - fw_services = ceilometer.network.services.discovery:FirewallDiscovery - fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery - tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO - fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery - -ceilometer.poll.compute = - disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster - disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster - disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster - disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster - disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster - disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster - disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster - disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster - disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster - disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster - disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster - disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster - disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster - disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster - disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster - disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster - disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster - disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster - disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster - disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster - cpu = ceilometer.compute.pollsters.cpu:CPUPollster - cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster - network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster - network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster - network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster - network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster - network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster - network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster - instance = ceilometer.compute.pollsters.instance:InstancePollster - memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster - memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster - disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster - disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster - disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster - disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster - disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster - disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster - -ceilometer.poll.ipmi = - hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster - hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster - hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster - hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster - hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster - hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster - hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster - hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster - hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster - hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster - hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster - hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster - -ceilometer.poll.central = - ip.floating = ceilometer.network.floatingip:FloatingIPPollster - image = ceilometer.image.glance:ImagePollster - image.size = ceilometer.image.glance:ImageSizePollster - rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster - rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster - rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster - rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster - rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster - rgw.usage = ceilometer.objectstore.rgw:UsagePollster - storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster - storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster - storage.objects = ceilometer.objectstore.swift:ObjectsPollster - storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster - storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster - energy = ceilometer.energy.kwapi:EnergyPollster - power = ceilometer.energy.kwapi:PowerPollster - switch.port = ceilometer.network.statistics.port:PortPollster - switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets - switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets - switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes - switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes - switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops - switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops - switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors - switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors - switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors - switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors - switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors - switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount - switch.table = ceilometer.network.statistics.table:TablePollster - switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries - switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets - switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets - switch = ceilometer.network.statistics.switch:SWPollster - switch.flow = ceilometer.network.statistics.flow:FlowPollster - switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes - switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds - switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds - switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets - network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster - network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster - network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster - network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster - network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster - network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster - network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster - network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster - network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster - network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster - network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster - network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster - network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster - network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster - -ceilometer.builder.poll.central = - hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster - ceilometer.event.storage = es = ceilometer.event.storage.impl_elasticsearch:Connection log = ceilometer.event.storage.impl_log:Connection @@ -192,77 +35,13 @@ ceilometer.event.storage = sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection hbase = ceilometer.event.storage.impl_hbase:Connection -ceilometer.metering.storage = - log = ceilometer.storage.impl_log:Connection - mongodb = ceilometer.storage.impl_mongodb:Connection - mysql = ceilometer.storage.impl_sqlalchemy:Connection - postgresql = ceilometer.storage.impl_sqlalchemy:Connection - sqlite = ceilometer.storage.impl_sqlalchemy:Connection - hbase = ceilometer.storage.impl_hbase:Connection - -ceilometer.compute.virt = - libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector - hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector - vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector - xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector - -ceilometer.hardware.inspectors = - snmp = ceilometer.hardware.inspector.snmp:SNMPInspector - -ceilometer.transformer = - accumulator = ceilometer.transformer.accumulator:TransformerAccumulator - delta = ceilometer.transformer.conversions:DeltaTransformer - unit_conversion = ceilometer.transformer.conversions:ScalingTransformer - rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer - aggregator = ceilometer.transformer.conversions:AggregatorTransformer - arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer - -ceilometer.publisher = - test = ceilometer.publisher.test:TestPublisher - notifier = ceilometer.publisher.messaging:SampleNotifierPublisher - udp = ceilometer.publisher.udp:UDPPublisher - file = ceilometer.publisher.file:FilePublisher - direct = ceilometer.publisher.direct:DirectPublisher - kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher - http = ceilometer.publisher.http:HttpPublisher - -ceilometer.event.publisher = - test = ceilometer.publisher.test:TestPublisher - direct = ceilometer.publisher.direct:DirectPublisher - notifier = ceilometer.publisher.messaging:EventNotifierPublisher - kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher - http = ceilometer.publisher.http:HttpPublisher - -ceilometer.event.trait_plugin = - split = ceilometer.event.trait_plugins:SplitterTraitPlugin - bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin - timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin - - console_scripts = ceilometer-api = ceilometer.cmd.api:main - ceilometer-polling = ceilometer.cmd.polling:main - ceilometer-agent-notification = ceilometer.cmd.agent_notification:main - ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer - ceilometer-rootwrap = oslo_rootwrap.cmd:main - ceilometer-collector = ceilometer.cmd.collector:main - -ceilometer.dispatcher.meter = - database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher ceilometer.dispatcher.event = database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - -network.statistics.drivers = - opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver - opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver oslo.config.opts = ceilometer = ceilometer.opts:list_opts @@ -270,9 +49,6 @@ oslo.config.opts = oslo.config.opts.defaults = ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults -tempest.test_plugins = - ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin - [build_sphinx] all_files = 1 build-dir = doc/build diff --git a/test-requirements.txt b/test-requirements.txt index 9aa5d54c..96d060db 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,32 +2,24 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -contextlib2>=0.4.0 # PSF License coverage>=3.6 # Apache-2.0 elasticsearch<2.0,>=1.3.0 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD happybase!=0.7,>=0.5;python_version=='2.7' # MIT mock>=1.2 # BSD PyMySQL>=0.6.2 # MIT License -os-win>=0.2.3 # Apache-2.0 -oslo.cache>=1.5.0 # Apache-2.0 # Docs Requirements oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=1.6.2 # Apache2 oslotest>=1.10.0 # Apache-2.0 -oslo.vmware>=1.16.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL pymongo!=3.1,>=3.0.2 # Apache-2.0 -gnocchiclient>=2.2.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD sphinxcontrib-httpdomain # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.11.0 # Apache-2.0 -requests-aws>=0.1.4 # BSD License (3 clause) os-testr>=0.4.1 # Apache-2.0 WebTest>=2.0 # MIT pifpaf>=0.0.11 diff --git a/tools/ceilometer-test-event.py b/tools/ceilometer-test-event.py deleted file mode 100755 index dedc7ac0..00000000 --- a/tools/ceilometer-test-event.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool help you debug your event definitions. - -Feed it a list of test notifications in json format, and it will show -you what events will be generated. -""" - -import json -import sys - -from oslo_config import cfg -from stevedore import extension - -from ceilometer.event import converter -from ceilometer import service - - -cfg.CONF.register_cli_opts([ - cfg.StrOpt('input-file', - short='i', - help='File to read test notifications from.' - ' (Containing a json list of notifications.)' - ' defaults to stdin.'), - cfg.StrOpt('output-file', - short='o', - help='File to write results to. Defaults to stdout.'), -]) - -TYPES = {1: 'text', - 2: 'int', - 3: 'float', - 4: 'datetime'} - - -service.prepare_service() - -output_file = cfg.CONF.output_file -input_file = cfg.CONF.input_file - -if output_file is None: - out = sys.stdout -else: - out = open(output_file, 'w') - -if input_file is None: - notifications = json.load(sys.stdin) -else: - with open(input_file, 'r') as f: - notifications = json.load(f) - -out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file) -out.write("Notifications tested: %s\n" % len(notifications)) - -event_converter = converter.setup_events( - extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin')) - -for notification in notifications: - event = event_converter.to_event(notification) - if event is None: - out.write("Dropped notification: %s\n" % - notification['message_id']) - continue - out.write("Event: %s at %s\n" % (event.event_type, event.generated)) - for trait in event.traits: - dtype = TYPES[trait.dtype] - out.write(" Trait: name: %s, type: %s, value: %s\n" % ( - trait.name, dtype, trait.value)) diff --git a/tools/make_test_data.py b/tools/make_test_data.py deleted file mode 100755 index b58a60ee..00000000 --- a/tools/make_test_data.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for creating test data for Ceilometer. - -Usage: - -Generate testing data for e.g. for default time span - -source .tox/py27/bin/activate -./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util ---volume 20 -""" -import argparse -import datetime -import random -import uuid - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import storage - - -def make_test_data(name, meter_type, unit, volume, random_min, - random_max, user_id, project_id, resource_id, start, - end, interval, resource_metadata=None, source='artificial'): - resource_metadata = resource_metadata or {'display_name': 'toto', - 'host': 'tata', - 'image_ref': 'test', - 'instance_flavor_id': 'toto', - 'server_group': 'toto', - } - # Compute start and end timestamps for the new data. - if isinstance(start, datetime.datetime): - timestamp = start - else: - timestamp = timeutils.parse_strtime(start) - - if not isinstance(end, datetime.datetime): - end = timeutils.parse_strtime(end) - - increment = datetime.timedelta(minutes=interval) - - print('Adding new samples for meter %s.' % (name)) - # Generate samples - n = 0 - total_volume = volume - while timestamp <= end: - if (random_min >= 0 and random_max >= 0): - # If there is a random element defined, we will add it to - # user given volume. - if isinstance(random_min, int) and isinstance(random_max, int): - total_volume += random.randint(random_min, random_max) - else: - total_volume += random.uniform(random_min, random_max) - - c = sample.Sample(name=name, - type=meter_type, - unit=unit, - volume=total_volume, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=timestamp.isoformat(), - resource_metadata=resource_metadata, - source=source, - ) - data = utils.meter_message_from_counter( - c, cfg.CONF.publisher.telemetry_secret) - # timestamp should be string when calculating signature, but should be - # datetime object when calling record_metering_data. - data['timestamp'] = timestamp - yield data - n += 1 - timestamp = timestamp + increment - - if (meter_type == 'gauge' or meter_type == 'delta'): - # For delta and gauge, we don't want to increase the value - # in time by random element. So we always set it back to - # volume. - total_volume = volume - - print('Added %d new samples for meter %s.' % (n, name)) - - -def record_test_data(conn, *args, **kwargs): - for data in make_test_data(*args, **kwargs): - conn.record_metering_data(data) - - -def get_parser(): - parser = argparse.ArgumentParser( - description='generate metering data', - ) - parser.add_argument( - '--interval', - default=10, - type=int, - help='The period between samples, in minutes.', - ) - parser.add_argument( - '--start', - default=31, - help='Number of days to be stepped back from now or date in the past (' - '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.', - ) - parser.add_argument( - '--end', - default=2, - help='Number of days to be stepped forward from now or date in the ' - 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end ' - 'range.', - ) - parser.add_argument( - '--type', - choices=('gauge', 'cumulative'), - default='gauge', - dest='meter_type', - help='Counter type.', - ) - parser.add_argument( - '--unit', - default=None, - help='Counter unit.', - ) - parser.add_argument( - '--project', - dest='project_id', - help='Project id of owner.', - ) - parser.add_argument( - '--user', - dest='user_id', - help='User id of owner.', - ) - parser.add_argument( - '--random_min', - help='The random min border of amount for added to given volume.', - type=int, - default=0, - ) - parser.add_argument( - '--random_max', - help='The random max border of amount for added to given volume.', - type=int, - default=0, - ) - parser.add_argument( - '--resource', - dest='resource_id', - default=str(uuid.uuid4()), - help='The resource id for the meter data.', - ) - parser.add_argument( - '--counter', - default='instance', - dest='name', - help='The counter name for the meter data.', - ) - parser.add_argument( - '--volume', - help='The amount to attach to the meter.', - type=int, - default=1, - ) - return parser - - -def main(): - cfg.CONF([], project='ceilometer') - - args = get_parser().parse_args() - - # Connect to the metering database - conn = storage.get_connection_from_config(cfg.CONF) - - # Find the user and/or project for a real resource - if not (args.user_id or args.project_id): - for r in conn.get_resources(): - if r.resource_id == args.resource_id: - args.user_id = r.user_id - args.project_id = r.project_id - break - - # Compute the correct time span - format = '%Y-%m-%dT%H:%M:%S' - - try: - start = datetime.datetime.utcnow() - datetime.timedelta( - days=int(args.start)) - except ValueError: - try: - start = datetime.datetime.strptime(args.start, format) - except ValueError: - raise - - try: - end = datetime.datetime.utcnow() + datetime.timedelta( - days=int(args.end)) - except ValueError: - try: - end = datetime.datetime.strptime(args.end, format) - except ValueError: - raise - args.start = start - args.end = end - record_test_data(conn=conn, **args.__dict__) - - return 0 - - -if __name__ == '__main__': - main() diff --git a/tools/make_test_data.sh b/tools/make_test_data.sh deleted file mode 100755 index 23a93e88..00000000 --- a/tools/make_test_data.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -bindir=$(dirname $0) - -project_name="$1" -if [ -z "$project_name" ] -then - project_name=demo -fi - -if [ -z "$OS_USERNAME" ] -then - user=demo -else - user=$OS_USERNAME -fi - -# Convert a possible project name to an id, if we have -# openstack cli installed. -if which openstack >/dev/null -then - project=$(openstack project show "$project_name" -c id -f value) -else - # Assume they gave us the project id as argument. - project="$project_name" -fi - -if [ -z "$project" ] -then - echo "Could not determine project id for \"$project_name\"" 1>&2 - exit 1 -fi - -early1="2012-08-27T07:00:00" -early2="2012-08-27T17:00:00" - -start="2012-08-28T00:00:00" - -middle1="2012-08-28T08:00:00" -middle2="2012-08-28T18:00:00" -middle3="2012-08-29T09:00:00" -middle4="2012-08-29T19:00:00" - -end="2012-08-31T23:59:00" - -late1="2012-08-31T10:00:00" -late2="2012-08-31T20:00:00" - -mkdata() { - ${bindir}/make_test_data.py --project "$project" \ - --user "$user" --start "$2" --end "$3" \ - --resource "$1" --counter instance --volume 1 -} - -dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2) - -echo $project - -for i in $(seq 0 $((${#dates[@]} - 2)) ) -do - - iname=${dates[$i]} - eval "ivalue=\$$iname" - - for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) ) - do - jname=${dates[$j]} - eval "jvalue=\$$jname" - - resource_id="${project_name}-$iname-$jname" - echo "$resource_id" - - mkdata "$resource_id" "$ivalue" "$jvalue" - [ $? -eq 0 ] || exit $? - done - echo -done diff --git a/tools/make_test_event_data.py b/tools/make_test_event_data.py index 1df6db4f..d665bc87 100755 --- a/tools/make_test_event_data.py +++ b/tools/make_test_event_data.py @@ -99,7 +99,7 @@ def main(): args = parser.parse_args() # Connect to the event database - conn = storage.get_connection_from_config(cfg.CONF, 'event') + conn = storage.get_connection_from_config(cfg.CONF) # Compute the correct time span start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start) diff --git a/tools/send_test_data.py b/tools/send_test_data.py deleted file mode 100755 index 867c34a4..00000000 --- a/tools/send_test_data.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for sending test data for Ceilometer via oslo.messaging. - -Usage: - -Send messages with samples generated by make_test_data - -source .tox/py27/bin/activate -./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering -""" -import argparse -import datetime -import functools -import json -import random -import uuid - -import make_test_data -from oslo_config import cfg -import oslo_messaging -from six import moves - -from ceilometer import messaging -from ceilometer.publisher import utils -from ceilometer import service - - -def send_batch_notifier(notifier, topic, batch): - notifier.sample({}, event_type=topic, payload=batch) - - -def get_notifier(config_file): - service.prepare_service(argv=['/', '--config-file', config_file]) - return oslo_messaging.Notifier( - messaging.get_transport(), - driver='messagingv2', - publisher_id='telemetry.publisher.test', - topics=['metering'], - ) - - -def generate_data(send_batch, make_data_args, samples_count, - batch_size, resources_count, topic): - make_data_args.interval = 1 - make_data_args.start = (datetime.datetime.utcnow() - - datetime.timedelta(minutes=samples_count)) - make_data_args.end = datetime.datetime.utcnow() - - make_data_args.resource_id = None - resources_list = [str(uuid.uuid4()) - for _ in moves.xrange(resources_count)] - resource_samples = {resource: 0 for resource in resources_list} - batch = [] - count = 0 - for sample in make_test_data.make_test_data(**make_data_args.__dict__): - count += 1 - resource = resources_list[random.randint(0, len(resources_list) - 1)] - resource_samples[resource] += 1 - sample['resource_id'] = resource - # need to change the timestamp from datetime.datetime type to iso - # format (unicode type), because collector will change iso format - # timestamp to datetime.datetime type before recording to db. - sample['timestamp'] = sample['timestamp'].isoformat() - # need to recalculate signature because of the resource_id change - sig = utils.compute_signature(sample, - cfg.CONF.publisher.telemetry_secret) - sample['message_signature'] = sig - batch.append(sample) - if len(batch) == batch_size: - send_batch(topic, batch) - batch = [] - if count == samples_count: - send_batch(topic, batch) - return resource_samples - send_batch(topic, batch) - return resource_samples - - -def get_parser(): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--batch-size', - dest='batch_size', - type=int, - default=100 - ) - parser.add_argument( - '--config-file', - default='/etc/ceilometer/ceilometer.conf' - ) - parser.add_argument( - '--topic', - default='perfmetering' - ) - parser.add_argument( - '--samples-count', - dest='samples_count', - type=int, - default=1000 - ) - parser.add_argument( - '--resources-count', - dest='resources_count', - type=int, - default=100 - ) - parser.add_argument( - '--result-directory', - dest='result_dir', - default='/tmp' - ) - return parser - - -def main(): - args = get_parser().parse_known_args()[0] - make_data_args = make_test_data.get_parser().parse_known_args()[0] - notifier = get_notifier(args.config_file) - send_batch = functools.partial(send_batch_notifier, notifier) - result_dir = args.result_dir - del args.notify - del args.config_file - del args.result_dir - - resource_writes = generate_data(send_batch, make_data_args, - **args.__dict__) - result_file = "%s/sample-by-resource-%s" % (result_dir, - random.getrandbits(32)) - with open(result_file, 'w') as f: - f.write(json.dumps(resource_writes)) - return result_file - - -if __name__ == '__main__': - main() diff --git a/tools/show_data.py b/tools/show_data.py deleted file mode 100755 index 754dddcc..00000000 --- a/tools/show_data.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 New Dream Network (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - -from oslo_config import cfg -import six - -from ceilometer import storage - - -def show_users(db, args): - for u in sorted(db.get_users()): - print(u) - - -def show_resources(db, args): - if args: - users = args - else: - users = sorted(db.get_users()) - for u in users: - print(u) - for resource in db.get_resources(user=u): - print(' %(resource_id)s %(timestamp)s' % resource) - for k, v in sorted(six.iteritems(resource['metadata'])): - print(' %-10s : %s' % (k, v)) - for meter in resource['meter']: - totals = db.get_statistics(storage.SampleFilter( - user=u, - meter=meter['counter_name'], - resource=resource['resource_id'], - )) - # FIXME(dhellmann): Need a way to tell whether to use - # max() or sum() by meter name without hard-coding. - if meter['counter_name'] in ['cpu', 'disk']: - value = totals[0]['max'] - else: - value = totals[0]['sum'] - print(' %s (%s): %s' % - (meter['counter_name'], meter['counter_type'], value)) - - -def show_total_resources(db, args): - if args: - users = args - else: - users = sorted(db.get_users()) - for u in users: - print(u) - for meter in ['disk', 'cpu', 'instance']: - stats = db.get_statistics(storage.SampleFilter( - user=u, - meter=meter, - )) - if meter in ['cpu', 'disk']: - total = stats['max'] - else: - total = stats['sum'] - print(' ', meter, total) - - -def show_raw(db, args): - fmt = ' %(timestamp)s %(counter_name)10s %(counter_volume)s' - for u in sorted(db.get_users()): - print(u) - for resource in db.get_resources(user=u): - print(' ', resource['resource_id']) - for sample in db.get_samples(storage.SampleFilter( - user=u, - resource=resource['resource_id'], - )): - print(fmt % sample) - - -def show_help(db, args): - print('COMMANDS:') - for name in sorted(COMMANDS.keys()): - print(name) - - -def show_projects(db, args): - for u in sorted(db.get_projects()): - print(u) - - -COMMANDS = { - 'users': show_users, - 'projects': show_projects, - 'help': show_help, - 'resources': show_resources, - 'total_resources': show_total_resources, - 'raw': show_raw, -} - - -def main(argv): - extra_args = cfg.CONF( - sys.argv[1:], - # NOTE(dhellmann): Read the configuration file(s) for the - # ceilometer collector by default. - default_config_files=['/etc/ceilometer/ceilometer.conf'], - ) - db = storage.get_connection_from_config(cfg.CONF) - command = extra_args[0] if extra_args else 'help' - COMMANDS[command](db, extra_args[1:]) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/tools/test_hbase_table_utils.py b/tools/test_hbase_table_utils.py index 10294e31..0bf370d9 100755 --- a/tools/test_hbase_table_utils.py +++ b/tools/test_hbase_table_utils.py @@ -26,14 +26,11 @@ def main(argv): url = ("%s?table_prefix=%s" % (os.getenv("CEILOMETER_TEST_STORAGE_URL"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"))) - conn = storage.get_connection(url, 'ceilometer.metering.storage') - event_conn = storage.get_connection(url, 'ceilometer.event.storage') + event_conn = storage.get_connection(url) for arg in argv: if arg == "--upgrade": - conn.upgrade() event_conn.upgrade() if arg == "--clear": - conn.clear() event_conn.clear() diff --git a/tox.ini b/tox.ini index 51f011d7..e93156ae 100644 --- a/tox.ini +++ b/tox.ini @@ -48,18 +48,6 @@ passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" -[testenv:integration] -setenv = VIRTUAL_ENV={envdir} - OS_TEST_PATH=./ceilometer/tests/integration - OS_TEST_TIMEOUT=2400 - GABBI_LIVE_FAIL_IF_NO_TEST=1 -passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* GLANCE_* NOVA_* ADMIN_* -# FIXME(sileht): run gabbi-run to failfast in case of error because testr -# doesn't support --failfast, but we loose the testr report. -commands = - bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' -# bash -x {toxinidir}/tools/pretty_tox.sh "{posargs}" - # NOTE(chdent): The gabbi tests are also run under the other functional # tox targets. This target simply provides a target to directly run just # gabbi tests without needing to do discovery across the entire body of