Allow Cloudkitty to collect non-OpenStack metrics
* Add a SourceFetcher, which allows to retrieve new type of data sources. It allows new collectors to be added. (examples: Prometheus, Influxdb, Kubernetes...) * Base Cloudkitty on metrics instead of services and resources for collecting metrics. This new architecture allows Cloudkitty to be more agnostic and so to rate containers metrics as the same way as virtual machines metrics. * Centralize metrology information in metrics.yml under metrics names. Task: 6291 Story: 2001501 Change-Id: I00ca080cf05dfc03a3363720f85b79e003eda9be
This commit is contained in:
parent
4b9e916d3d
commit
dff3e97b12
@ -16,6 +16,7 @@
|
||||
# @author: Maxime Cottret
|
||||
#
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import six
|
||||
@ -28,51 +29,99 @@ from cloudkitty import collector
|
||||
from cloudkitty.common import policy
|
||||
from cloudkitty import utils as ck_utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
METADATA = collector.get_collector_metadata()
|
||||
def get_all_metrics():
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
METADATA = collector.get_metrics_based_collector_metadata()
|
||||
if 'metrics' not in METRICS_CONF:
|
||||
msg = 'Invalid endpoint: no metrics in current configuration.'
|
||||
pecan.abort(405, msg)
|
||||
|
||||
policy.authorize(pecan.request.context, 'info:list_metrics_info', {})
|
||||
metrics_info_list = []
|
||||
for metric, metadata in METADATA.items():
|
||||
info = metadata.copy()
|
||||
info['metric_id'] = metric
|
||||
metrics_info_list.append(
|
||||
info_models.CloudkittyMetricInfo(**info))
|
||||
return info_models.CloudkittyMetricInfoCollection(
|
||||
metrics=metrics_info_list)
|
||||
|
||||
|
||||
def get_one_metric(metric_name):
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
METADATA = collector.get_metrics_based_collector_metadata()
|
||||
if 'metrics' not in METRICS_CONF:
|
||||
msg = 'Invalid endpoint: no metrics in current configuration.'
|
||||
pecan.abort(405, msg)
|
||||
|
||||
policy.authorize(pecan.request.context, 'info:get_metric_info', {})
|
||||
try:
|
||||
info = METADATA[metric_name].copy()
|
||||
info['metric_id'] = metric_name
|
||||
return info_models.CloudkittyMetricInfo(**info)
|
||||
except KeyError:
|
||||
pecan.abort(404, six.text_type(metric_name))
|
||||
|
||||
|
||||
class MetricInfoController(rest.RestController):
|
||||
"""REST Controller managing collected metrics information
|
||||
|
||||
independently of their services.
|
||||
If no metrics are defined in conf, return 405 for each endpoint.
|
||||
"""
|
||||
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyMetricInfoCollection)
|
||||
def get_all(self):
|
||||
"""Get the metric list.
|
||||
|
||||
:return: List of every metrics.
|
||||
"""
|
||||
return get_all_metrics()
|
||||
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyMetricInfo, wtypes.text)
|
||||
def get_one(self, metric_name):
|
||||
"""Return a metric.
|
||||
|
||||
:param metric_name: name of the metric.
|
||||
"""
|
||||
return get_one_metric(metric_name)
|
||||
|
||||
|
||||
class ServiceInfoController(rest.RestController):
|
||||
"""REST Controller mananging collected services information."""
|
||||
"""REST Controller managing collected services information."""
|
||||
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyServiceInfoCollection)
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyMetricInfoCollection)
|
||||
def get_all(self):
|
||||
"""Get the service list.
|
||||
"""Get the service list (deprecated).
|
||||
|
||||
:return: List of every services.
|
||||
"""
|
||||
policy.authorize(pecan.request.context, 'info:list_services_info', {})
|
||||
services_info_list = []
|
||||
for service, metadata in METADATA.items():
|
||||
info = metadata.copy()
|
||||
info['service_id'] = service
|
||||
services_info_list.append(
|
||||
info_models.CloudkittyServiceInfo(**info))
|
||||
return info_models.CloudkittyServiceInfoCollection(
|
||||
services=services_info_list)
|
||||
LOG.warning("Services based endpoints are deprecated. "
|
||||
"Please use metrics based enpoints instead.")
|
||||
return get_all_metrics()
|
||||
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyServiceInfo, wtypes.text)
|
||||
@wsme_pecan.wsexpose(info_models.CloudkittyMetricInfo, wtypes.text)
|
||||
def get_one(self, service_name):
|
||||
"""Return a service.
|
||||
"""Return a service (deprecated).
|
||||
|
||||
:param service_name: name of the service.
|
||||
"""
|
||||
policy.authorize(pecan.request.context, 'info:get_service_info', {})
|
||||
try:
|
||||
info = METADATA[service_name].copy()
|
||||
info['service_id'] = service_name
|
||||
return info_models.CloudkittyServiceInfo(**info)
|
||||
except KeyError:
|
||||
pecan.abort(404, six.text_type(service_name))
|
||||
LOG.warning("Services based endpoints are deprecated. "
|
||||
"Please use metrics based enpoints instead.")
|
||||
return get_one_metric(service_name)
|
||||
|
||||
|
||||
class InfoController(rest.RestController):
|
||||
"""REST Controller managing Cloudkitty general information."""
|
||||
|
||||
services = ServiceInfoController()
|
||||
metrics = MetricInfoController()
|
||||
|
||||
_custom_actions = {'config': ['GET']}
|
||||
|
||||
@ -82,6 +131,4 @@ class InfoController(rest.RestController):
|
||||
def config(self):
|
||||
"""Return current configuration."""
|
||||
policy.authorize(pecan.request.context, 'info:get_config', {})
|
||||
info = {}
|
||||
info["collect"] = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
return info
|
||||
return ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
@ -18,33 +18,40 @@
|
||||
from oslo_config import cfg
|
||||
from wsme import types as wtypes
|
||||
|
||||
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
|
||||
from cloudkitty import utils as ck_utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
CLOUDKITTY_SERVICES = wtypes.Enum(wtypes.text,
|
||||
*METRICS_CONF['services'])
|
||||
def get_metrics_list():
|
||||
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
try:
|
||||
metrics = list(metrics_conf['metrics'].keys())
|
||||
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
|
||||
except KeyError:
|
||||
metrics = list(DEFAULT_METRICS_CONF['metrics'].keys())
|
||||
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
|
||||
|
||||
return cloudkitty_metrics
|
||||
|
||||
|
||||
class CloudkittyServiceInfo(wtypes.Base):
|
||||
"""Type describing a service info in CloudKitty.
|
||||
class CloudkittyMetricInfo(wtypes.Base):
|
||||
"""Type describing a metric info in CloudKitty."""
|
||||
|
||||
"""
|
||||
|
||||
service_id = CLOUDKITTY_SERVICES
|
||||
"""Name of the service."""
|
||||
metric_id = get_metrics_list()
|
||||
"""Name of the metric."""
|
||||
|
||||
metadata = [wtypes.text]
|
||||
"""List of service metadata"""
|
||||
"""List of metric metadata"""
|
||||
|
||||
unit = wtypes.text
|
||||
"""service unit"""
|
||||
"""Metric unit"""
|
||||
|
||||
def to_json(self):
|
||||
res_dict = {}
|
||||
res_dict[self.service_id] = [{
|
||||
res_dict[self.metric_id] = [{
|
||||
'metadata': self.metadata,
|
||||
'unit': self.unit
|
||||
}]
|
||||
@ -52,18 +59,19 @@ class CloudkittyServiceInfo(wtypes.Base):
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
sample = cls(service_id='compute',
|
||||
metadata=['resource_id', 'flavor', 'availability_zone'],
|
||||
unit='instance')
|
||||
metadata = ['resource_id', 'project_id', 'qty', 'unit']
|
||||
sample = cls(metric_id='image.size',
|
||||
metadata=metadata,
|
||||
unit='MiB')
|
||||
return sample
|
||||
|
||||
|
||||
class CloudkittyServiceInfoCollection(wtypes.Base):
|
||||
"""A list of CloudKittyServiceInfo."""
|
||||
class CloudkittyMetricInfoCollection(wtypes.Base):
|
||||
"""A list of CloudKittyMetricInfo."""
|
||||
|
||||
services = [CloudkittyServiceInfo]
|
||||
metrics = [CloudkittyMetricInfo]
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
sample = CloudkittyServiceInfo.sample()
|
||||
return cls(services=[sample])
|
||||
sample = CloudkittyMetricInfo.sample()
|
||||
return cls(metrics=[sample])
|
||||
|
@ -27,8 +27,21 @@ CONF = cfg.CONF
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
CLOUDKITTY_SERVICES = wtypes.Enum(wtypes.text,
|
||||
*METRICS_CONF['services'])
|
||||
try:
|
||||
services_names = list(METRICS_CONF['services_objects'].keys())
|
||||
except Exception:
|
||||
# TODO(mc): remove this hack once rated dataframes are based on metrics.
|
||||
services_names = [
|
||||
'compute',
|
||||
'volume',
|
||||
'image',
|
||||
'network.bw.in',
|
||||
'network.bw.out',
|
||||
'network.floating',
|
||||
'radosgw.usage',
|
||||
]
|
||||
|
||||
CLOUDKITTY_SERVICES = wtypes.Enum(wtypes.text, *services_names)
|
||||
|
||||
|
||||
class CloudkittyResource(wtypes.Base):
|
||||
|
@ -27,37 +27,51 @@ from cloudkitty import utils as ck_utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
|
||||
|
||||
|
||||
def get_collector(transformers=None):
|
||||
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
if not transformers:
|
||||
transformers = transformer.get_transformers()
|
||||
collector_args = {
|
||||
'period': METRICS_CONF['period'],
|
||||
'transformers': transformers}
|
||||
collector = driver.DriverManager(
|
||||
'period': metrics_conf.get('period', 3600),
|
||||
'transformers': transformers,
|
||||
}
|
||||
collector_args.update({'conf': metrics_conf})
|
||||
return driver.DriverManager(
|
||||
COLLECTORS_NAMESPACE,
|
||||
METRICS_CONF['collector'],
|
||||
metrics_conf.get('collector', 'gnocchi'),
|
||||
invoke_on_load=True,
|
||||
invoke_kwds=collector_args).driver
|
||||
return collector
|
||||
|
||||
|
||||
def get_collector_metadata():
|
||||
def get_collector_without_invoke():
|
||||
"""Return the collector without invoke it."""
|
||||
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
return driver.DriverManager(
|
||||
COLLECTORS_NAMESPACE,
|
||||
metrics_conf.get('collector', 'gnocchi'),
|
||||
invoke_on_load=False
|
||||
).driver
|
||||
|
||||
|
||||
def get_metrics_based_collector_metadata():
|
||||
"""Return dict of metadata.
|
||||
|
||||
Results are based on enabled collector and services in CONF.
|
||||
Results are based on enabled collector and metrics in CONF.
|
||||
"""
|
||||
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
transformers = transformer.get_transformers()
|
||||
collector = driver.DriverManager(
|
||||
COLLECTORS_NAMESPACE, METRICS_CONF['collector'],
|
||||
invoke_on_load=False).driver
|
||||
collector = get_collector_without_invoke()
|
||||
metadata = {}
|
||||
for service in METRICS_CONF['services']:
|
||||
metadata[service] = collector.get_metadata(service, transformers)
|
||||
if 'metrics' in metrics_conf:
|
||||
for metric in metrics_conf.get('metrics', {}):
|
||||
metadata[metric] = collector.get_metadata(
|
||||
metric,
|
||||
transformers,
|
||||
metrics_conf,
|
||||
)
|
||||
return metadata
|
||||
|
||||
|
||||
@ -94,6 +108,7 @@ class BaseCollector(object):
|
||||
try:
|
||||
self.transformers = transformers
|
||||
self.period = kwargs['period']
|
||||
self.conf = kwargs['conf']
|
||||
except IndexError as e:
|
||||
raise ValueError("Missing argument (%s)" % e)
|
||||
|
||||
@ -149,4 +164,4 @@ class BaseCollector(object):
|
||||
"No method found in collector '%s' for resource '%s'."
|
||||
% (self.collector_name, resource))
|
||||
func = getattr(self, trans_resource)
|
||||
return func(start, end, project_id, q_filter)
|
||||
return func(resource, start, end, project_id, q_filter)
|
||||
|
@ -19,7 +19,6 @@ from gnocchiclient import client as gclient
|
||||
from keystoneauth1 import loading as ks_loading
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
|
||||
from cloudkitty import collector
|
||||
from cloudkitty import utils as ck_utils
|
||||
@ -43,8 +42,6 @@ ks_loading.register_auth_conf_options(
|
||||
GNOCCHI_COLLECTOR_OPTS)
|
||||
CONF = cfg.CONF
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
|
||||
class GnocchiCollector(collector.BaseCollector):
|
||||
collector_name = 'gnocchi'
|
||||
@ -112,22 +109,13 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
'interface': CONF.gnocchi_collector.interface})
|
||||
|
||||
@classmethod
|
||||
def get_metadata(cls, resource_name, transformers):
|
||||
def get_metadata(cls, resource_name, transformers, conf):
|
||||
info = super(GnocchiCollector, cls).get_metadata(resource_name,
|
||||
transformers)
|
||||
try:
|
||||
info["metadata"].extend(transformers['GnocchiTransformer']
|
||||
.get_metadata(resource_name))
|
||||
|
||||
try:
|
||||
tmp = METRICS_CONF['metrics_units'][resource_name]
|
||||
info['unit'] = list(tmp.values())[0]['unit']
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
info['unit'] = cls.units_mappings[resource_name][1]
|
||||
|
||||
info['unit'] = conf['metrics'][resource_name]['unit']
|
||||
except KeyError:
|
||||
pass
|
||||
return info
|
||||
@ -192,38 +180,20 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
# [point_date, granularity, value]
|
||||
# ["2015-11-24T00:00:00+00:00", 86400.0, 64.0]
|
||||
resource[name] = values[0][2]
|
||||
except IndexError:
|
||||
except (IndexError, KeyError):
|
||||
resource[name] = 0
|
||||
except KeyError:
|
||||
# Skip metrics not found
|
||||
pass
|
||||
|
||||
def _expand_metrics(self, resources, mappings, start, end):
|
||||
def _expand_metrics(self, resources, mappings, start, end, resource_name):
|
||||
for resource in resources:
|
||||
metrics = resource.get('metrics', {})
|
||||
try:
|
||||
for mapping in mappings:
|
||||
self._expand(
|
||||
metrics,
|
||||
resource,
|
||||
mapping.keys()[0],
|
||||
mapping.values()[0],
|
||||
start,
|
||||
end,
|
||||
)
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except AttributeError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
for name, aggregate in mappings:
|
||||
self._expand(
|
||||
metrics,
|
||||
resource,
|
||||
name,
|
||||
aggregate,
|
||||
start,
|
||||
end,
|
||||
)
|
||||
self._expand(
|
||||
metrics,
|
||||
resource,
|
||||
resource_name,
|
||||
mappings,
|
||||
start,
|
||||
end,
|
||||
)
|
||||
|
||||
def get_resources(self, resource_name, start, end,
|
||||
project_id, q_filter=None):
|
||||
@ -246,13 +216,7 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
# Translating the resource name if needed
|
||||
query_parameters = self._generate_time_filter(start, end)
|
||||
|
||||
try:
|
||||
resource_type = METRICS_CONF['services_objects'].get(resource_name)
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
resource_type = self.retrieve_mappings.get(resource_name)
|
||||
resource_type = self.conf['metrics'][resource_name]['resource']
|
||||
|
||||
query_parameters.append(
|
||||
self.gen_filter(cop="=", type=resource_type))
|
||||
@ -265,72 +229,50 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
query=self.extend_filter(*query_parameters))
|
||||
return resources
|
||||
|
||||
def resource_info(self, resource_name, start, end, project_id,
|
||||
q_filter=None):
|
||||
try:
|
||||
tmp = METRICS_CONF['metrics_units'][resource_name]
|
||||
qty = list(tmp.keys())[0]
|
||||
unit = list(tmp.values())[0]['unit']
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
qty, unit = self.units_mappings.get(
|
||||
resource_name,
|
||||
self.default_unit,
|
||||
)
|
||||
def resource_info(self, resource_name, start, end,
|
||||
project_id, q_filter=None):
|
||||
met = self.conf['metrics'][resource_name]
|
||||
unit = met['unit']
|
||||
qty = 1 if met.get('countable_unit') else met['resource']
|
||||
|
||||
resources = self.get_resources(
|
||||
resource_name,
|
||||
start,
|
||||
end,
|
||||
project_id=project_id,
|
||||
q_filter=q_filter,
|
||||
)
|
||||
|
||||
resources = self.get_resources(resource_name, start, end,
|
||||
project_id=project_id,
|
||||
q_filter=q_filter)
|
||||
formated_resources = list()
|
||||
for resource in resources:
|
||||
resource_data = self.t_gnocchi.strip_resource_data(
|
||||
resource_name, resource)
|
||||
|
||||
try:
|
||||
mappings = METRICS_CONF['services_metrics'][resource_name]
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
mappings = self.metrics_mappings[resource_name]
|
||||
mapp = self.conf['metrics'][resource_name]['aggregation_method']
|
||||
|
||||
self._expand_metrics(
|
||||
[resource_data],
|
||||
mapp,
|
||||
start,
|
||||
end,
|
||||
resource_name,
|
||||
)
|
||||
|
||||
self._expand_metrics([resource_data], mappings, start, end)
|
||||
resource_data.pop('metrics', None)
|
||||
|
||||
# Unit conversion
|
||||
try:
|
||||
conv_data = METRICS_CONF['metrics_units'][resource_name][qty]
|
||||
if isinstance(qty, str):
|
||||
resource_data[qty] = ck_utils.convert_unit(
|
||||
resource_data[qty],
|
||||
conv_data.get('factor', '1'),
|
||||
conv_data.get('offset', '0'),
|
||||
)
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated hardcoded method.')
|
||||
|
||||
if resource.get('type') == 'instance_network_interface':
|
||||
resource_data[qty] = (
|
||||
decimal.Decimal(resource_data[qty]) / units.M
|
||||
)
|
||||
elif resource.get('type') == 'image':
|
||||
resource_data[qty] = (
|
||||
decimal.Decimal(resource_data[qty]) / units.Mi
|
||||
)
|
||||
elif resource.get('type') == 'ceph_account':
|
||||
resource_data[qty] = (
|
||||
decimal.Decimal(resource_data[qty]) / units.Gi)
|
||||
if isinstance(qty, str):
|
||||
resource_data[resource_name] = ck_utils.convert_unit(
|
||||
resource_data[resource_name],
|
||||
self.conf['metrics'][resource_name].get('factor', 1),
|
||||
self.conf['metrics'][resource_name].get('offset', 0),
|
||||
)
|
||||
|
||||
val = qty if isinstance(qty, int) else resource_data[resource_name]
|
||||
data = self.t_cloudkitty.format_item(
|
||||
resource_data,
|
||||
unit,
|
||||
decimal.Decimal(
|
||||
qty if isinstance(qty, int) else resource_data[qty]
|
||||
)
|
||||
decimal.Decimal(val)
|
||||
)
|
||||
|
||||
# NOTE(sheeprine): Reference to gnocchi resource used by storage
|
||||
@ -338,10 +280,17 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
formated_resources.append(data)
|
||||
return formated_resources
|
||||
|
||||
def retrieve(self, resource_name, start, end, project_id, q_filter=None):
|
||||
resources = self.resource_info(resource_name, start, end,
|
||||
project_id=project_id,
|
||||
q_filter=q_filter)
|
||||
def retrieve(self, resource_name, start, end,
|
||||
project_id, q_filter=None):
|
||||
|
||||
resources = self.resource_info(
|
||||
resource_name,
|
||||
start,
|
||||
end,
|
||||
project_id,
|
||||
q_filter=q_filter,
|
||||
)
|
||||
|
||||
if not resources:
|
||||
raise collector.NoDataCollected(self.collector_name, resource_name)
|
||||
return self.t_cloudkitty.format_service(resource_name, resources)
|
||||
|
@ -22,7 +22,6 @@ from keystoneclient.v3 import client as ks_client
|
||||
from monascaclient import client as mclient
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
|
||||
from cloudkitty import collector
|
||||
from cloudkitty import transformer
|
||||
@ -130,55 +129,59 @@ class MonascaCollector(collector.BaseCollector):
|
||||
return endpoint.url
|
||||
return None
|
||||
|
||||
def _get_metadata(self, resource_type, transformers):
|
||||
def _get_metadata(self, resource_type, transformers, conf):
|
||||
info = {}
|
||||
try:
|
||||
met = list(METRICS_CONF['metrics_units'][resource_type].values())
|
||||
info['unit'] = met[0]['unit']
|
||||
# NOTE(mc): deprecated second try kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
try:
|
||||
info['unit'] = self.units_mappings[resource_type][1]
|
||||
except (KeyError, IndexError):
|
||||
info['unit'] = self.default_unit[1]
|
||||
info['unit'] = conf['metrics'][resource_type]['unit']
|
||||
|
||||
start = ck_utils.dt2ts(ck_utils.get_month_start())
|
||||
end = ck_utils.dt2ts(ck_utils.get_month_end())
|
||||
|
||||
try:
|
||||
resource = self.active_resources(resource_type, start,
|
||||
end, None)[0]
|
||||
resource = self.active_resources(
|
||||
resource_type,
|
||||
start,
|
||||
end,
|
||||
None,
|
||||
)[0]
|
||||
except IndexError:
|
||||
resource = {}
|
||||
info['metadata'] = resource.get('dimensions', {}).keys()
|
||||
|
||||
try:
|
||||
service_metrics = METRICS_CONF['services_metrics'][resource_type]
|
||||
for service_metric in service_metrics:
|
||||
metric, statistics = list(service_metric.items())[0]
|
||||
info['metadata'].append(metric)
|
||||
# NOTE(mc): deprecated second try kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
try:
|
||||
for metric, statistics in self.metrics_mappings[resource_type]:
|
||||
info['metadata'].append(metric)
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
service_metrics = METRICS_CONF['services_metrics'][resource_type]
|
||||
for service_metric in service_metrics:
|
||||
metric, statistics = list(service_metric.items())[0]
|
||||
info['metadata'].append(metric)
|
||||
return info
|
||||
|
||||
# NOTE(lukapeschke) if anyone sees a better way to do this,
|
||||
# please make a patch
|
||||
@classmethod
|
||||
def get_metadata(cls, resource_type, transformers):
|
||||
def get_metadata(cls, resource_type, transformers, conf):
|
||||
args = {
|
||||
'transformers': transformer.get_transformers(),
|
||||
'period': CONF.collect.period}
|
||||
'period': conf['period']}
|
||||
tmp = cls(**args)
|
||||
return tmp._get_metadata(resource_type, transformers)
|
||||
return tmp._get_metadata(resource_type, transformers, conf)
|
||||
|
||||
def _get_resource_metadata(self, resource_type, start,
|
||||
end, resource_id, conf):
|
||||
meter = conf['metrics'][resource_type]['resource']
|
||||
|
||||
if not meter:
|
||||
return {}
|
||||
measurements = self._conn.metrics.list_measurements(
|
||||
name=meter,
|
||||
start_time=ck_utils.ts2dt(start),
|
||||
end_time=ck_utils.ts2dt(end),
|
||||
merge_metrics=True,
|
||||
dimensions={'resource_id': resource_id},
|
||||
)
|
||||
try:
|
||||
# Getting the last measurement of given period
|
||||
metadata = measurements[-1]['measurements'][-1][2]
|
||||
except (KeyError, IndexError):
|
||||
metadata = {}
|
||||
return metadata
|
||||
|
||||
def _get_resource_qty(self, meter, start, end, resource_id, statistics):
|
||||
# NOTE(lukapeschke) the period trick is used to aggregate
|
||||
@ -213,14 +216,8 @@ class MonascaCollector(collector.BaseCollector):
|
||||
return len(measurements) > 0
|
||||
|
||||
def active_resources(self, resource_type, start,
|
||||
end, project_id, **kwargs):
|
||||
try:
|
||||
meter = METRICS_CONF['services_objects'].get(resource_type)
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
meter = self.retrieve_mappings.get(resource_type)
|
||||
end, project_id, conf, **kwargs):
|
||||
meter = conf['metrics'][resource_type]['resource']
|
||||
|
||||
if not meter:
|
||||
return {}
|
||||
@ -254,58 +251,32 @@ class MonascaCollector(collector.BaseCollector):
|
||||
statistics,
|
||||
)
|
||||
|
||||
try:
|
||||
conv_data = METRICS_CONF['metrics_units'][resource_type]
|
||||
conv_data = conv_data.get(name)
|
||||
if conv_data:
|
||||
resource[name] = ck_utils.convert_unit(
|
||||
qty,
|
||||
conv_data.get('factor', 1),
|
||||
conv_data.get('offset', 0),
|
||||
)
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning(
|
||||
'Error when trying to use yaml metrology conf.\n'
|
||||
'Fallback on the deprecated hardcoded dict method.')
|
||||
|
||||
names = ['network.outgoing.bytes', 'network.incoming.bytes']
|
||||
if name in names:
|
||||
qty = qty / units.M
|
||||
elif 'image.' in name:
|
||||
qty = qty / units.Mi
|
||||
resource[name] = qty
|
||||
conv_data = METRICS_CONF['metrics'][resource_type].get(name)
|
||||
if conv_data:
|
||||
resource[name] = ck_utils.convert_unit(
|
||||
qty,
|
||||
conv_data.get('factor', 1),
|
||||
conv_data.get('offset', 0),
|
||||
)
|
||||
|
||||
def resource_info(self, resource_type, start, end,
|
||||
project_id, q_filter=None):
|
||||
|
||||
try:
|
||||
tmp = METRICS_CONF['metrics_units'][resource_type]
|
||||
qty = list(tmp.keys())[0]
|
||||
unit = list(tmp.values())[0]['unit']
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
qty, unit = self.units_mappings.get(
|
||||
resource_type,
|
||||
self.default_unit
|
||||
)
|
||||
met = self.conf['metrics'][resource_type]
|
||||
unit = met['unit']
|
||||
qty = 1 if met.get('countable_unit') else met['resource']
|
||||
|
||||
active_resources = self.active_resources(
|
||||
resource_type, start, end, project_id
|
||||
)
|
||||
|
||||
resource_data = []
|
||||
for resource in active_resources:
|
||||
resource_id = resource['dimensions']['resource_id']
|
||||
data = resource['dimensions']
|
||||
try:
|
||||
mappings = METRICS_CONF['services_metrics'][resource_type]
|
||||
# NOTE(mc): deprecated except part kept for backward compatibility.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf.')
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
mappings = self.metrics_mappings[resource_type]
|
||||
mappings = (
|
||||
resource_type,
|
||||
METRICS_CONF['metrics'][resource_type]['aggregation_method'],
|
||||
)
|
||||
|
||||
self._expand_metrics(
|
||||
data,
|
||||
@ -317,15 +288,7 @@ class MonascaCollector(collector.BaseCollector):
|
||||
)
|
||||
resource_qty = qty
|
||||
if not (isinstance(qty, int) or isinstance(qty, decimal.Decimal)):
|
||||
try:
|
||||
resource_qty \
|
||||
= METRICS_CONF['services_objects'][resource_type]
|
||||
# NOTE(mc): deprecated except part kept for backward compat.
|
||||
except KeyError:
|
||||
LOG.warning('Error when trying to use yaml metrology conf')
|
||||
msg = 'Fallback on the deprecated oslo config method'
|
||||
LOG.warning(msg)
|
||||
resource_qty = data[self.retrieve_mappings[resource_type]]
|
||||
resource_qty = METRICS_CONF['services_objects'][resource_type]
|
||||
resource_qty = data[resource_qty]
|
||||
|
||||
resource = self.t_cloudkitty.format_item(data, unit, resource_qty)
|
||||
|
@ -20,12 +20,12 @@ import cloudkitty.api.app
|
||||
import cloudkitty.collector.gnocchi
|
||||
import cloudkitty.collector.monasca
|
||||
import cloudkitty.config
|
||||
import cloudkitty.fetcher
|
||||
import cloudkitty.fetcher.keystone
|
||||
import cloudkitty.orchestrator
|
||||
import cloudkitty.service
|
||||
import cloudkitty.storage
|
||||
import cloudkitty.storage.hybrid.backends.gnocchi
|
||||
import cloudkitty.tenant_fetcher
|
||||
import cloudkitty.tenant_fetcher.keystone
|
||||
import cloudkitty.utils
|
||||
|
||||
__all__ = ['list_opts']
|
||||
@ -41,8 +41,8 @@ _opts = [
|
||||
cloudkitty.collector.gnocchi.end_point_type_opts,
|
||||
cloudkitty.collector.gnocchi.gnocchi_collector_opts))),
|
||||
('keystone_fetcher', list(itertools.chain(
|
||||
cloudkitty.tenant_fetcher.keystone.keystone_fetcher_opts,
|
||||
cloudkitty.tenant_fetcher.keystone.keystone_common_opts))),
|
||||
cloudkitty.fetcher.keystone.keystone_fetcher_opts,
|
||||
cloudkitty.fetcher.keystone.keystone_common_opts))),
|
||||
('orchestrator', list(itertools.chain(
|
||||
cloudkitty.orchestrator.orchestrator_opts))),
|
||||
('output', list(itertools.chain(
|
||||
@ -50,14 +50,14 @@ _opts = [
|
||||
('state', list(itertools.chain(
|
||||
cloudkitty.config.state_opts))),
|
||||
('storage', list(itertools.chain(
|
||||
cloudkitty.storage.storage_opts))),
|
||||
cloudkitty.utils.storage_opts))),
|
||||
('storage_gnocchi', list(itertools.chain(
|
||||
cloudkitty.storage.hybrid.backends.gnocchi.gnocchi_storage_opts))),
|
||||
('tenant_fetcher', list(itertools.chain(
|
||||
cloudkitty.tenant_fetcher.fetchers_opts))),
|
||||
('fetcher', list(itertools.chain(
|
||||
cloudkitty.fetcher.fetchers_opts))),
|
||||
(None, list(itertools.chain(
|
||||
cloudkitty.api.app.auth_opts,
|
||||
cloudkitty.service.service_opts)))
|
||||
cloudkitty.service.service_opts))),
|
||||
]
|
||||
|
||||
|
||||
|
@ -28,7 +28,19 @@ info_policies = [
|
||||
name='info:get_service_info',
|
||||
check_str=base.UNPROTECTED,
|
||||
description='Get specified service information.',
|
||||
operations=[{'path': '/v1/info/services/{service_id}',
|
||||
operations=[{'path': '/v1/info/services/{metric_id}',
|
||||
'method': 'GET'}]),
|
||||
policy.DocumentedRuleDefault(
|
||||
name='info:list_metrics_info',
|
||||
check_str=base.UNPROTECTED,
|
||||
description='List available metrics information in Cloudkitty.',
|
||||
operations=[{'path': '/v1/info/metrics',
|
||||
'method': 'LIST'}]),
|
||||
policy.DocumentedRuleDefault(
|
||||
name='info:get_metric_info',
|
||||
check_str=base.UNPROTECTED,
|
||||
description='Get specified metric information.',
|
||||
operations=[{'path': '/v1/info/metrics/{metric_id}',
|
||||
'method': 'GET'}]),
|
||||
policy.DocumentedRuleDefault(
|
||||
name='info:get_config',
|
||||
|
124
cloudkitty/default_metrics_conf.py
Normal file
124
cloudkitty/default_metrics_conf.py
Normal file
@ -0,0 +1,124 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 Objectif Libre
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Martin CAMEY
|
||||
#
|
||||
DEFAULT_METRICS_CONF = {
|
||||
'name': 'OpenStack',
|
||||
|
||||
'fetcher': 'keystone',
|
||||
'collector': 'gnocchi',
|
||||
|
||||
'period': 3600,
|
||||
'wait_periods': 2,
|
||||
'window': 1800,
|
||||
|
||||
'services_objects': {
|
||||
'compute': 'instance',
|
||||
'volume': 'volume',
|
||||
'network.bw.out': 'instance_network_interface',
|
||||
'network.bw.in': 'instance_network_interface',
|
||||
'network.floating': 'network',
|
||||
'image': 'image',
|
||||
'radosgw.usage': 'ceph_account',
|
||||
},
|
||||
|
||||
'metrics': {
|
||||
'vcpus': {
|
||||
'resource': 'instance',
|
||||
'unit': 'instance',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'memory': {
|
||||
'resource': 'instance',
|
||||
'unit': 'instance',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'cpu': {
|
||||
'resource': 'instance',
|
||||
'unit': 'instance',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'disk.root.size': {
|
||||
'resource': 'instance',
|
||||
'unit': 'instance',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'disk.ephemeral.size': {
|
||||
'resource': 'instance',
|
||||
'unit': 'instance',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'image.size': {
|
||||
'resource': 'image',
|
||||
'unit': 'MiB',
|
||||
'factor': 1 / 1048576,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'image.download': {
|
||||
'resource': 'image',
|
||||
'unit': 'MiB',
|
||||
'factor': 1 / 1048576,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'image.serve': {
|
||||
'resource': 'image',
|
||||
'unit': 'MiB',
|
||||
'factor': 1 / 1048576,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'volume.size': {
|
||||
'resource': 'volume',
|
||||
'unit': 'GiB',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'network.outgoing.bytes': {
|
||||
'resource': 'instance_network_interface',
|
||||
'unit': 'MB',
|
||||
'factor': 1 / 1000000,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'network.incoming.bytes': {
|
||||
'resource': 'instance_network_interface',
|
||||
'unit': 'MB',
|
||||
'factor': 1 / 1000000,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
'ip.floating': {
|
||||
'resource': 'network',
|
||||
'unit': 'ip',
|
||||
'factor': 1,
|
||||
'aggregation_method': 'max',
|
||||
'countable_unit': True,
|
||||
},
|
||||
'radosgw.objects.size': {
|
||||
'resource': 'ceph_account',
|
||||
'unit': 'GiB',
|
||||
'factor': 1 / 1073741824,
|
||||
'aggregation_method': 'max',
|
||||
},
|
||||
},
|
||||
}
|
@ -20,7 +20,7 @@ import csv
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from cloudkitty import tenant_fetcher
|
||||
from cloudkitty import fetcher
|
||||
|
||||
fake_fetcher_opts = [
|
||||
cfg.StrOpt('file',
|
||||
@ -31,7 +31,7 @@ cfg.CONF.register_opts(fake_fetcher_opts, 'fake_fetcher')
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class FakeFetcher(tenant_fetcher.BaseFetcher):
|
||||
class FakeFetcher(fetcher.BaseFetcher):
|
||||
"""Fake tenants fetcher."""
|
||||
|
||||
def __init__(self):
|
@ -22,7 +22,8 @@ from keystoneclient import discover
|
||||
from keystoneclient import exceptions
|
||||
from oslo_config import cfg
|
||||
|
||||
from cloudkitty import tenant_fetcher
|
||||
from cloudkitty import fetcher
|
||||
|
||||
|
||||
KEYSTONE_FETCHER_OPTS = 'keystone_fetcher'
|
||||
keystone_common_opts = ks_loading.get_auth_common_conf_options()
|
||||
@ -42,9 +43,11 @@ ks_loading.register_auth_conf_options(
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class KeystoneFetcher(tenant_fetcher.BaseFetcher):
|
||||
class KeystoneFetcher(fetcher.BaseFetcher):
|
||||
"""Keystone tenants fetcher."""
|
||||
|
||||
name = 'keystone'
|
||||
|
||||
def __init__(self):
|
||||
self.auth = ks_loading.load_auth_from_conf_options(
|
||||
CONF,
|
||||
@ -58,18 +61,18 @@ class KeystoneFetcher(tenant_fetcher.BaseFetcher):
|
||||
session=self.session,
|
||||
auth_url=self.auth.auth_url)
|
||||
|
||||
def get_tenants(self):
|
||||
def get_tenants(self, conf=None):
|
||||
keystone_version = discover.normalize_version_number(
|
||||
CONF.keystone_fetcher.keystone_version)
|
||||
auth_dispatch = {(3,): ('project', 'projects', 'list'),
|
||||
(2,): ('tenant', 'tenants', 'roles_for_user')}
|
||||
for auth_version, auth_version_mapping in auth_dispatch.items():
|
||||
if discover.version_match(auth_version, keystone_version):
|
||||
return self._do_get_tenants(auth_version_mapping)
|
||||
return self._do_get_tenants(auth_version_mapping, conf)
|
||||
msg = "Keystone version you've specified is not supported"
|
||||
raise exceptions.VersionNotAvailable(msg)
|
||||
|
||||
def _do_get_tenants(self, auth_version_mapping):
|
||||
def _do_get_tenants(self, auth_version_mapping, conf):
|
||||
tenant_attr, tenants_attr, role_func = auth_version_mapping
|
||||
tenant_list = getattr(self.admin_ks, tenants_attr).list()
|
||||
my_user_id = self.session.get_user_id()
|
||||
@ -79,4 +82,8 @@ class KeystoneFetcher(tenant_fetcher.BaseFetcher):
|
||||
tenant_attr: tenant})
|
||||
if 'rating' not in [role.name for role in roles]:
|
||||
tenant_list.remove(tenant)
|
||||
return [tenant.id for tenant in tenant_list]
|
||||
if conf:
|
||||
res = [{'tenant_id': tenant.id} for tenant in tenant_list]
|
||||
for tenant in res:
|
||||
tenant.update(conf)
|
||||
return res
|
37
cloudkitty/fetcher/source.py
Normal file
37
cloudkitty/fetcher/source.py
Normal file
@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# !/usr/bin/env python
|
||||
# Copyright 2015 Objectif Libre
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Martin CAMEY
|
||||
#
|
||||
import hashlib
|
||||
|
||||
from cloudkitty import fetcher
|
||||
|
||||
|
||||
class SourceFetcher(fetcher.BaseFetcher):
|
||||
"""Source projects fetcher."""
|
||||
|
||||
name = 'source'
|
||||
|
||||
def get_projects(self, conf=None):
|
||||
if conf:
|
||||
tmp = hashlib.md5()
|
||||
tmp.update(conf['name'])
|
||||
conf['tenant_id'] = tmp.hexdigest()
|
||||
return [conf]
|
||||
|
||||
def get_tenants(self, conf=None):
|
||||
return self.get_projects(conf=conf)
|
@ -41,7 +41,7 @@ eventlet.monkey_patch()
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('backend', 'cloudkitty.tenant_fetcher', 'tenant_fetcher')
|
||||
CONF.import_opt('backend', 'cloudkitty.fetcher', 'tenant_fetcher')
|
||||
|
||||
orchestrator_opts = [
|
||||
cfg.StrOpt('coordination_url',
|
||||
@ -55,6 +55,8 @@ METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
FETCHERS_NAMESPACE = 'cloudkitty.tenant.fetchers'
|
||||
PROCESSORS_NAMESPACE = 'cloudkitty.rating.processors'
|
||||
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
|
||||
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
|
||||
|
||||
|
||||
class RatingEndpoint(object):
|
||||
@ -151,20 +153,26 @@ class APIWorker(BaseWorker):
|
||||
|
||||
|
||||
class Worker(BaseWorker):
|
||||
def __init__(self, collector, storage, tenant_id=None):
|
||||
def __init__(self, collector, storage, tenant):
|
||||
self._collector = collector
|
||||
self._storage = storage
|
||||
self._period = METRICS_CONF['period']
|
||||
self._wait_time = METRICS_CONF['wait_periods'] * self._period
|
||||
self._period = tenant['period']
|
||||
self._wait_time = tenant['wait_periods'] * self._period
|
||||
self._tenant_id = tenant['tenant_id']
|
||||
self.conf = tenant
|
||||
|
||||
super(Worker, self).__init__(tenant_id)
|
||||
super(Worker, self).__init__(self._tenant_id)
|
||||
|
||||
def _collect(self, service, start_timestamp):
|
||||
def _collect(self, metric, start_timestamp):
|
||||
next_timestamp = start_timestamp + self._period
|
||||
raw_data = self._collector.retrieve(service,
|
||||
start_timestamp,
|
||||
next_timestamp,
|
||||
self._tenant_id)
|
||||
|
||||
raw_data = self._collector.retrieve(
|
||||
metric,
|
||||
start_timestamp,
|
||||
next_timestamp,
|
||||
self._tenant_id,
|
||||
)
|
||||
|
||||
if raw_data:
|
||||
return [{'period': {'begin': start_timestamp,
|
||||
'end': next_timestamp},
|
||||
@ -182,18 +190,20 @@ class Worker(BaseWorker):
|
||||
if not timestamp:
|
||||
break
|
||||
|
||||
for service in METRICS_CONF['services']:
|
||||
metrics = list(self.conf['metrics'].keys())
|
||||
|
||||
for metric in metrics:
|
||||
try:
|
||||
try:
|
||||
data = self._collect(service, timestamp)
|
||||
data = self._collect(metric, timestamp)
|
||||
except collector.NoDataCollected:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.warning(
|
||||
'Error while collecting service '
|
||||
'%(service)s: %(error)s',
|
||||
{'service': service, 'error': e})
|
||||
raise collector.NoDataCollected('', service)
|
||||
'Error while collecting metric '
|
||||
'%(metric)s: %(error)s',
|
||||
{'metric': metric, 'error': e})
|
||||
raise collector.NoDataCollected('', metric)
|
||||
except collector.NoDataCollected:
|
||||
begin = timestamp
|
||||
end = begin + self._period
|
||||
@ -213,15 +223,15 @@ class Worker(BaseWorker):
|
||||
|
||||
class Orchestrator(object):
|
||||
def __init__(self):
|
||||
# Tenant fetcher
|
||||
self.fetcher = driver.DriverManager(
|
||||
FETCHERS_NAMESPACE,
|
||||
CONF.tenant_fetcher.backend,
|
||||
invoke_on_load=True).driver
|
||||
METRICS_CONF['fetcher'],
|
||||
invoke_on_load=True
|
||||
).driver
|
||||
|
||||
self.transformers = transformer.get_transformers()
|
||||
self.collector = collector.get_collector(self.transformers)
|
||||
self.storage = storage.get_storage(self.collector)
|
||||
transformers = transformer.get_transformers()
|
||||
self.collector = collector.get_collector(transformers)
|
||||
self.storage = storage.get_storage(collector=self.collector)
|
||||
|
||||
# RPC
|
||||
self.server = None
|
||||
@ -234,17 +244,10 @@ class Orchestrator(object):
|
||||
uuidutils.generate_uuid().encode('ascii'))
|
||||
self.coord.start()
|
||||
|
||||
self._period = METRICS_CONF['period']
|
||||
self._wait_time = METRICS_CONF['wait_periods'] * self._period
|
||||
|
||||
def _lock(self, tenant_id):
|
||||
lock_name = b"cloudkitty-" + str(tenant_id).encode('ascii')
|
||||
return self.coord.get_lock(lock_name)
|
||||
|
||||
def _load_tenant_list(self):
|
||||
self._tenants = self.fetcher.get_tenants()
|
||||
random.shuffle(self._tenants)
|
||||
|
||||
def _init_messaging(self):
|
||||
target = oslo_messaging.Target(topic='cloudkitty',
|
||||
server=CONF.host,
|
||||
@ -255,11 +258,11 @@ class Orchestrator(object):
|
||||
self.server = messaging.get_server(target, endpoints)
|
||||
self.server.start()
|
||||
|
||||
def _check_state(self, tenant_id):
|
||||
def _check_state(self, tenant_id, period, wait_time):
|
||||
timestamp = self.storage.get_state(tenant_id)
|
||||
return ck_utils.check_time_state(timestamp,
|
||||
self._period,
|
||||
self._wait_time)
|
||||
period,
|
||||
wait_time)
|
||||
|
||||
def process_messages(self):
|
||||
# TODO(sheeprine): Code kept to handle threading and asynchronous
|
||||
@ -270,26 +273,36 @@ class Orchestrator(object):
|
||||
|
||||
def process(self):
|
||||
while True:
|
||||
self.process_messages()
|
||||
self._load_tenant_list()
|
||||
while len(self._tenants):
|
||||
for tenant in self._tenants[:]:
|
||||
lock = self._lock(tenant)
|
||||
if lock.acquire(blocking=False):
|
||||
if not self._check_state(tenant):
|
||||
self._tenants.remove(tenant)
|
||||
else:
|
||||
worker = Worker(self.collector,
|
||||
self.storage,
|
||||
tenant)
|
||||
worker.run()
|
||||
lock.release()
|
||||
self.coord.heartbeat()
|
||||
self.tenants = self.fetcher.get_tenants(METRICS_CONF)
|
||||
random.shuffle(self.tenants)
|
||||
LOG.info('Tenants loaded for fetcher %s', self.fetcher.name)
|
||||
|
||||
for tenant in self.tenants:
|
||||
lock = self._lock(tenant['tenant_id'])
|
||||
if lock.acquire(blocking=False):
|
||||
state = self._check_state(
|
||||
tenant['tenant_id'],
|
||||
tenant['period'],
|
||||
tenant['wait_periods'],
|
||||
)
|
||||
if not state:
|
||||
self.tenants.remove(tenant)
|
||||
else:
|
||||
worker = Worker(
|
||||
self.collector,
|
||||
self.storage,
|
||||
tenant,
|
||||
)
|
||||
|
||||
worker.run()
|
||||
lock.release()
|
||||
self.coord.heartbeat()
|
||||
|
||||
# NOTE(sheeprine): Slow down looping if all tenants are
|
||||
# being processed
|
||||
eventlet.sleep(1)
|
||||
# FIXME(sheeprine): We may cause a drift here
|
||||
eventlet.sleep(self._period)
|
||||
eventlet.sleep(tenant['period'])
|
||||
|
||||
def terminate(self):
|
||||
self.coord.stop()
|
||||
|
@ -25,31 +25,31 @@ from stevedore import driver
|
||||
from cloudkitty import collector as ck_collector
|
||||
from cloudkitty import utils as ck_utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
storage_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='sqlalchemy',
|
||||
help='Name of the storage backend driver.')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(storage_opts, group='storage')
|
||||
|
||||
# NOTE(mc): This hack is possible because only
|
||||
# one OpenStack configuration is allowed.
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
|
||||
|
||||
|
||||
def get_storage(collector=None):
|
||||
if not collector:
|
||||
collector = ck_collector.get_collector()
|
||||
storage_args = {
|
||||
'period': METRICS_CONF['period'],
|
||||
'collector': collector if collector else ck_collector.get_collector()}
|
||||
'period': METRICS_CONF.get('period', 3600),
|
||||
'collector': collector,
|
||||
}
|
||||
backend = driver.DriverManager(
|
||||
STORAGES_NAMESPACE,
|
||||
cfg.CONF.storage.backend,
|
||||
invoke_on_load=True,
|
||||
invoke_kwds=storage_args).driver
|
||||
invoke_kwds=storage_args
|
||||
).driver
|
||||
return backend
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ class BaseStorage(object):
|
||||
Handle incoming data from the global orchestrator, and store them.
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
self._period = kwargs.get('period', METRICS_CONF['period'])
|
||||
self._period = kwargs.get('period')
|
||||
self._collector = kwargs.get('collector')
|
||||
|
||||
# State vars
|
||||
|
@ -36,6 +36,10 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
# NOTE(mc): This hack is possible because only
|
||||
# one OpenStack configuration is allowed.
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
|
||||
|
||||
GNOCCHI_STORAGE_OPTS = 'storage_gnocchi'
|
||||
@ -49,7 +53,7 @@ gnocchi_storage_opts = [
|
||||
# The archive policy definition MUST include the collect period granularity
|
||||
cfg.StrOpt('archive_policy_definition',
|
||||
default='[{"granularity": '
|
||||
+ six.text_type(CONF.collect.period) +
|
||||
+ six.text_type(METRICS_CONF.get('period', 3600)) +
|
||||
', "timespan": "90 days"}, '
|
||||
'{"granularity": 86400, "timespan": "360 days"}, '
|
||||
'{"granularity": 2592000, "timespan": "1800 days"}]',
|
||||
@ -63,8 +67,6 @@ ks_loading.register_auth_conf_options(
|
||||
CONF,
|
||||
GNOCCHI_STORAGE_OPTS)
|
||||
|
||||
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
|
||||
|
||||
RESOURCE_TYPE_NAME_ROOT = 'rating_service_'
|
||||
|
||||
|
||||
@ -100,37 +102,24 @@ class GnocchiStorage(BaseHybridBackend):
|
||||
"creator",
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _get_service_metrics(service_name):
|
||||
metrics = METRICS_CONF['services_metrics'][service_name]
|
||||
metric_list = ['price']
|
||||
for metric in metrics:
|
||||
metric_list.append(list(metric.keys())[0])
|
||||
return metric_list
|
||||
|
||||
def _init_resource_types(self):
|
||||
transformer = gtransformer.GnocchiTransformer()
|
||||
services = METRICS_CONF['services']
|
||||
for service in services:
|
||||
service_dict = dict()
|
||||
service_dict['attributes'] = list()
|
||||
for attribute in transformer.get_metadata(service):
|
||||
for metric in list(self.conf['metrics'].keys()):
|
||||
metric_dict = dict()
|
||||
metric_dict['attributes'] = list()
|
||||
for attribute in transformer.get_metadata(metric):
|
||||
if attribute not in self.invalid_attribute_names:
|
||||
service_dict['attributes'].append(attribute)
|
||||
service_dict['required_attributes'] = [
|
||||
metric_dict['attributes'].append(attribute)
|
||||
metric_dict['required_attributes'] = [
|
||||
'resource_id',
|
||||
'unit',
|
||||
]
|
||||
try:
|
||||
service_dict['metrics'] = self._get_service_metrics(service)
|
||||
except KeyError:
|
||||
LOG.warning(
|
||||
'No metrics configured for service {}'.format(service))
|
||||
service_dict['metrics'] = list()
|
||||
service_dict['name'] = RESOURCE_TYPE_NAME_ROOT + service
|
||||
service_dict['qty_metric'] \
|
||||
= list(METRICS_CONF['metrics_units'][service].keys())[0]
|
||||
self._resource_type_data[service] = service_dict
|
||||
metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric
|
||||
metric_dict['qty_metric'] = 1
|
||||
if self.conf['metrics'][metric].get('countable_unit'):
|
||||
resource = self.conf['metrics'][metric]['resource']
|
||||
metric_dict['qty_metric'] = resource
|
||||
self._resource_type_data[metric] = metric_dict
|
||||
|
||||
def _get_res_type_dict(self, res_type):
|
||||
res_type_data = self._resource_type_data.get(res_type, None)
|
||||
@ -175,7 +164,7 @@ class GnocchiStorage(BaseHybridBackend):
|
||||
'name': metric,
|
||||
'archive_policy_name':
|
||||
CONF.storage_gnocchi.archive_policy_name,
|
||||
}) for metric in res_type_data['metrics']
|
||||
}) for metric in ['price', res_type]
|
||||
]
|
||||
|
||||
metrics_dict = dict()
|
||||
@ -235,6 +224,7 @@ class GnocchiStorage(BaseHybridBackend):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(GnocchiStorage, self).__init__(**kwargs)
|
||||
self.conf = kwargs['conf'] if 'conf' in kwargs else METRICS_CONF
|
||||
self.auth = ks_loading.load_auth_from_conf_options(
|
||||
CONF,
|
||||
GNOCCHI_STORAGE_OPTS)
|
||||
@ -251,7 +241,7 @@ class GnocchiStorage(BaseHybridBackend):
|
||||
CONF.storage_gnocchi.archive_policy_name)
|
||||
self._archive_policy_definition = json.loads(
|
||||
CONF.storage_gnocchi.archive_policy_definition)
|
||||
self._period = CONF.collect.period
|
||||
self._period = self.conf['period']
|
||||
if "period" in kwargs:
|
||||
self._period = kwargs["period"]
|
||||
self._measurements = dict()
|
||||
|
@ -387,6 +387,20 @@ class CORSConfigFixture(fixture.GabbiFixture):
|
||||
cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method
|
||||
|
||||
|
||||
class MetricsConfFixture(fixture.GabbiFixture):
|
||||
"""Inject Metrics configuration mock to the get_metrics_conf() function"""
|
||||
|
||||
def start_fixture(self):
|
||||
self._original_function = ck_utils.get_metrics_conf
|
||||
ck_utils.get_metrics_conf = mock.Mock(
|
||||
return_value=tests.samples.METRICS_CONF,
|
||||
)
|
||||
|
||||
def stop_fixture(self):
|
||||
"""Remove the get_metrics_conf() monkeypatch."""
|
||||
ck_utils.get_metrics_conf = self._original_function
|
||||
|
||||
|
||||
def setup_app():
|
||||
messaging.setup()
|
||||
# FIXME(sheeprine): Extension fixtures are interacting with transformers
|
||||
|
@ -1,45 +1,68 @@
|
||||
fixtures:
|
||||
- ConfigFixture
|
||||
- MetricsConfFixture
|
||||
|
||||
tests:
|
||||
- name: get config
|
||||
url: /v1/info/config
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.collect.services.`len`: 6
|
||||
$.collect.services[0]: compute
|
||||
$.collect.services[1]: volume
|
||||
$.collect.services[2]: network.bw.in
|
||||
$.collect.services[3]: network.bw.out
|
||||
$.collect.services[4]: network.floating
|
||||
$.collect.services[5]: image
|
||||
$.collect.collector: gnocchi
|
||||
$.collect.window: 1800
|
||||
$.collect.wait_periods: 2
|
||||
$.collect.period: 3600
|
||||
$.collector: gnocchi
|
||||
$.window: 1800
|
||||
$.wait_periods: 2
|
||||
$.period: 3600
|
||||
|
||||
- name: get services info
|
||||
url: /v1/info/services
|
||||
$.metrics.`len`: 13
|
||||
$.metrics.vcpus.unit: instance
|
||||
$.metrics.memory.unit: instance
|
||||
$.metrics.cpu.unit: instance
|
||||
$.metrics['disk.root.size'].unit: instance
|
||||
$.metrics['disk.ephemeral.size'].unit: instance
|
||||
$.metrics['image.size'].unit: MiB
|
||||
$.metrics['image.download'].unit: MiB
|
||||
$.metrics['image.serve'].unit: MiB
|
||||
$.metrics['volume.size'].unit: GiB
|
||||
$.metrics['network.incoming.bytes'].unit: MB
|
||||
$.metrics['network.outgoing.bytes'].unit: MB
|
||||
$.metrics['ip.floating'].unit: ip
|
||||
$.metrics['radosgw.objects.size'].unit: GiB
|
||||
|
||||
- name: get metrics info
|
||||
url: /v1/info/metrics
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.services.`len`: 6
|
||||
$.services[/service_id][0].service_id: compute
|
||||
$.services[/service_id][0].unit: instance
|
||||
$.services[/service_id][1].service_id: image
|
||||
$.services[/service_id][1].unit: MiB
|
||||
$.services[/service_id][2].service_id: network.bw.in
|
||||
$.services[/service_id][2].unit: MB
|
||||
$.services[/service_id][3].service_id: network.bw.out
|
||||
$.services[/service_id][3].unit: MB
|
||||
$.services[/service_id][4].service_id: network.floating
|
||||
$.services[/service_id][4].unit: ip
|
||||
$.services[/service_id][5].service_id: volume
|
||||
$.services[/service_id][5].unit: GiB
|
||||
$.metrics.`len`: 13
|
||||
$.metrics[/metric_id][0].metric_id: cpu
|
||||
$.metrics[/metric_id][0].unit: instance
|
||||
$.metrics[/metric_id][1].metric_id: disk.ephemeral.size
|
||||
$.metrics[/metric_id][1].unit: instance
|
||||
$.metrics[/metric_id][2].metric_id: disk.root.size
|
||||
$.metrics[/metric_id][2].unit: instance
|
||||
$.metrics[/metric_id][3].metric_id: image.download
|
||||
$.metrics[/metric_id][3].unit: MiB
|
||||
$.metrics[/metric_id][4].metric_id: image.serve
|
||||
$.metrics[/metric_id][4].unit: MiB
|
||||
$.metrics[/metric_id][5].metric_id: image.size
|
||||
$.metrics[/metric_id][5].unit: MiB
|
||||
$.metrics[/metric_id][6].metric_id: ip.floating
|
||||
$.metrics[/metric_id][6].unit: ip
|
||||
$.metrics[/metric_id][7].metric_id: memory
|
||||
$.metrics[/metric_id][7].unit: instance
|
||||
$.metrics[/metric_id][8].metric_id: network.incoming.bytes
|
||||
$.metrics[/metric_id][8].unit: MB
|
||||
$.metrics[/metric_id][9].metric_id: network.outgoing.bytes
|
||||
$.metrics[/metric_id][9].unit: MB
|
||||
$.metrics[/metric_id][10].metric_id: radosgw.objects.size
|
||||
$.metrics[/metric_id][10].unit: GiB
|
||||
$.metrics[/metric_id][11].metric_id: vcpus
|
||||
$.metrics[/metric_id][11].unit: instance
|
||||
$.metrics[/metric_id][12].metric_id: volume.size
|
||||
$.metrics[/metric_id][12].unit: GiB
|
||||
|
||||
- name: get compute service info
|
||||
url: /v1/info/services/compute
|
||||
- name: get cpu metric info
|
||||
url: /v1/info/metrics/cpu
|
||||
status: 200
|
||||
response_json_paths:
|
||||
$.service_id: compute
|
||||
$.metric_id: cpu
|
||||
$.unit: instance
|
||||
$.metadata.`len`: 8
|
||||
$.metadata.`len`: 4
|
||||
|
@ -18,6 +18,7 @@
|
||||
import copy
|
||||
import decimal
|
||||
|
||||
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
|
||||
from cloudkitty import utils as ck_utils
|
||||
|
||||
TENANT = 'f266f30b11f246b589fd266f85eeec39'
|
||||
@ -74,12 +75,12 @@ SECOND_PERIOD = {
|
||||
COLLECTED_DATA = [{
|
||||
'period': FIRST_PERIOD,
|
||||
'usage': {
|
||||
'compute': [{
|
||||
'cpu': [{
|
||||
'desc': COMPUTE_METADATA,
|
||||
'vol': {
|
||||
'qty': decimal.Decimal(1.0),
|
||||
'unit': 'instance'}}],
|
||||
'image': [{
|
||||
'image.size': [{
|
||||
'desc': IMAGE_METADATA,
|
||||
'vol': {
|
||||
'qty': decimal.Decimal(1.0),
|
||||
@ -87,7 +88,7 @@ COLLECTED_DATA = [{
|
||||
}}, {
|
||||
'period': SECOND_PERIOD,
|
||||
'usage': {
|
||||
'compute': [{
|
||||
'cpu': [{
|
||||
'desc': COMPUTE_METADATA,
|
||||
'vol': {
|
||||
'qty': decimal.Decimal(1.0),
|
||||
@ -95,11 +96,11 @@ COLLECTED_DATA = [{
|
||||
}}]
|
||||
|
||||
RATED_DATA = copy.deepcopy(COLLECTED_DATA)
|
||||
RATED_DATA[0]['usage']['compute'][0]['rating'] = {
|
||||
RATED_DATA[0]['usage']['cpu'][0]['rating'] = {
|
||||
'price': decimal.Decimal('0.42')}
|
||||
RATED_DATA[0]['usage']['image'][0]['rating'] = {
|
||||
RATED_DATA[0]['usage']['image.size'][0]['rating'] = {
|
||||
'price': decimal.Decimal('0.1337')}
|
||||
RATED_DATA[1]['usage']['compute'][0]['rating'] = {
|
||||
RATED_DATA[1]['usage']['cpu'][0]['rating'] = {
|
||||
'price': decimal.Decimal('0.42')}
|
||||
|
||||
|
||||
@ -121,70 +122,13 @@ def split_storage_data(raw_data):
|
||||
# FIXME(sheeprine): storage is not using decimal for rates, we need to
|
||||
# transition to decimal.
|
||||
STORED_DATA = copy.deepcopy(COLLECTED_DATA)
|
||||
STORED_DATA[0]['usage']['compute'][0]['rating'] = {
|
||||
STORED_DATA[0]['usage']['cpu'][0]['rating'] = {
|
||||
'price': 0.42}
|
||||
STORED_DATA[0]['usage']['image'][0]['rating'] = {
|
||||
STORED_DATA[0]['usage']['image.size'][0]['rating'] = {
|
||||
'price': 0.1337}
|
||||
STORED_DATA[1]['usage']['compute'][0]['rating'] = {
|
||||
STORED_DATA[1]['usage']['cpu'][0]['rating'] = {
|
||||
'price': 0.42}
|
||||
|
||||
STORED_DATA = split_storage_data(STORED_DATA)
|
||||
|
||||
METRICS_CONF = {
|
||||
'collector': 'gnocchi',
|
||||
'name': 'OpenStack',
|
||||
'period': 3600,
|
||||
'services': [
|
||||
'compute',
|
||||
'volume',
|
||||
'network.bw.in',
|
||||
'network.bw.out',
|
||||
'network.floating',
|
||||
'image'
|
||||
],
|
||||
'services_metrics': {
|
||||
'compute': [
|
||||
{'vcpus': 'max'},
|
||||
{'memory': 'max'},
|
||||
{'cpu': 'max'},
|
||||
{'disk.root.size': 'max'},
|
||||
{'disk.ephemeral.size': 'max'}
|
||||
],
|
||||
'image': [
|
||||
{'image.size': 'max'},
|
||||
{'image.download': 'max'},
|
||||
{'image.serve': 'max'}
|
||||
],
|
||||
'network.bw.in': [{'network.incoming.bytes': 'max'}],
|
||||
'network.bw.out': [{'network.outgoing.bytes': 'max'}],
|
||||
'network.floating': [{'ip.floating': 'max'}],
|
||||
'volume': [{'volume.size': 'max'}],
|
||||
'radosgw.usage': [{'radosgw.objects.size': 'max'}]},
|
||||
'services_objects': {
|
||||
'compute': 'instance',
|
||||
'image': 'image',
|
||||
'network.bw.in': 'instance_network_interface',
|
||||
'network.bw.out': 'instance_network_interface',
|
||||
'network.floating': 'network',
|
||||
'volume': 'volume',
|
||||
'radosgw.usage': 'ceph_account',
|
||||
},
|
||||
'metrics_units': {
|
||||
'compute': {1: {'unit': 'instance'}},
|
||||
'default_unit': {1: {'unit': 'unknown'}},
|
||||
'image': {'image.size': {'unit': 'MiB', 'factor': '1/1048576'}},
|
||||
'network.bw.in': {'network.incoming.bytes': {
|
||||
'unit': 'MB',
|
||||
'factor': '1/1000000'}},
|
||||
'network.bw.out': {'network.outgoing.bytes': {
|
||||
'unit': 'MB',
|
||||
'factor': '1/1000000'}},
|
||||
'network.floating': {1: {'unit': 'ip'}},
|
||||
'volume': {'volume.size': {'unit': 'GiB'}},
|
||||
'radosgw.usage': {'radosgw.objects.size': {
|
||||
'unit': 'GiB',
|
||||
'factor': '1/1073741824'}},
|
||||
},
|
||||
'wait_periods': 2,
|
||||
'window': 1800
|
||||
}
|
||||
METRICS_CONF = DEFAULT_METRICS_CONF
|
||||
|
@ -234,10 +234,10 @@ class StorageTotalTest(StorageTest):
|
||||
total = self.storage.get_total(
|
||||
begin=begin,
|
||||
end=end,
|
||||
service='compute')
|
||||
service='cpu')
|
||||
self.assertEqual(1, len(total))
|
||||
self.assertEqual(0.84, total[0]["rate"])
|
||||
self.assertEqual('compute', total[0]["res_type"])
|
||||
self.assertEqual('cpu', total[0]["res_type"])
|
||||
self.assertEqual(begin, total[0]["begin"])
|
||||
self.assertEqual(end, total[0]["end"])
|
||||
|
||||
@ -269,11 +269,11 @@ class StorageTotalTest(StorageTest):
|
||||
groupby="res_type")
|
||||
self.assertEqual(2, len(total))
|
||||
self.assertEqual(0.2674, total[0]["rate"])
|
||||
self.assertEqual('image', total[0]["res_type"])
|
||||
self.assertEqual('image.size', total[0]["res_type"])
|
||||
self.assertEqual(begin, total[0]["begin"])
|
||||
self.assertEqual(end, total[0]["end"])
|
||||
self.assertEqual(1.68, total[1]["rate"])
|
||||
self.assertEqual('compute', total[1]["res_type"])
|
||||
self.assertEqual('cpu', total[1]["res_type"])
|
||||
self.assertEqual(begin, total[1]["begin"])
|
||||
self.assertEqual(end, total[1]["end"])
|
||||
|
||||
@ -288,22 +288,22 @@ class StorageTotalTest(StorageTest):
|
||||
self.assertEqual(4, len(total))
|
||||
self.assertEqual(0.1337, total[0]["rate"])
|
||||
self.assertEqual(self._other_tenant_id, total[0]["tenant_id"])
|
||||
self.assertEqual('image', total[0]["res_type"])
|
||||
self.assertEqual('image.size', total[0]["res_type"])
|
||||
self.assertEqual(begin, total[0]["begin"])
|
||||
self.assertEqual(end, total[0]["end"])
|
||||
self.assertEqual(0.1337, total[1]["rate"])
|
||||
self.assertEqual(self._tenant_id, total[1]["tenant_id"])
|
||||
self.assertEqual('image', total[1]["res_type"])
|
||||
self.assertEqual('image.size', total[1]["res_type"])
|
||||
self.assertEqual(begin, total[1]["begin"])
|
||||
self.assertEqual(end, total[1]["end"])
|
||||
self.assertEqual(0.84, total[2]["rate"])
|
||||
self.assertEqual(self._other_tenant_id, total[2]["tenant_id"])
|
||||
self.assertEqual('compute', total[2]["res_type"])
|
||||
self.assertEqual('cpu', total[2]["res_type"])
|
||||
self.assertEqual(begin, total[2]["begin"])
|
||||
self.assertEqual(end, total[2]["end"])
|
||||
self.assertEqual(0.84, total[3]["rate"])
|
||||
self.assertEqual(self._tenant_id, total[3]["tenant_id"])
|
||||
self.assertEqual('compute', total[3]["res_type"])
|
||||
self.assertEqual('cpu', total[3]["res_type"])
|
||||
self.assertEqual(begin, total[3]["begin"])
|
||||
self.assertEqual(end, total[3]["end"])
|
||||
|
||||
@ -426,7 +426,7 @@ class StorageDataIntegrityTest(StorageTest):
|
||||
del expected_data[2]
|
||||
# NOTE(sheeprine): Quick and dirty sort (ensure result consistency,
|
||||
# order is not significant to the test result)
|
||||
if 'image' in stored_data[0]['usage']:
|
||||
if 'image.size' in stored_data[0]['usage']:
|
||||
stored_data[0]['usage'], stored_data[1]['usage'] = (
|
||||
stored_data[1]['usage'], stored_data[0]['usage'])
|
||||
self.assertEqual(
|
||||
|
@ -20,7 +20,7 @@ import unittest
|
||||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from cloudkitty.tenant_fetcher import keystone
|
||||
from cloudkitty.fetcher import keystone
|
||||
from cloudkitty import tests
|
||||
|
||||
|
||||
@ -72,10 +72,10 @@ class KeystoneFetcherTest(tests.TestCase):
|
||||
super(KeystoneFetcherTest, self).setUp()
|
||||
self.conf.set_override('backend', 'keystone', 'tenant_fetcher')
|
||||
self.conf.import_group('keystone_fetcher',
|
||||
'cloudkitty.tenant_fetcher.keystone')
|
||||
'cloudkitty.fetcher.keystone')
|
||||
|
||||
@unittest.SkipTest
|
||||
def test_keystone_tenant_fetcher_filter_list(self):
|
||||
def test_keystone_fetcher_filter_list(self):
|
||||
kclient = 'keystoneclient.client.Client'
|
||||
with mock.patch(kclient) as kclientmock:
|
||||
kclientmock.return_value = Client()
|
||||
|
@ -40,7 +40,7 @@ class OrchestratorTest(tests.TestCase):
|
||||
messaging_conf.transport_driver = 'fake'
|
||||
self.conf.set_override('backend', 'keystone', 'tenant_fetcher')
|
||||
self.conf.import_group('keystone_fetcher',
|
||||
'cloudkitty.tenant_fetcher.keystone')
|
||||
'cloudkitty.fetcher.keystone')
|
||||
|
||||
def setup_fake_modules(self):
|
||||
fake_module1 = tests.FakeRatingModule()
|
||||
|
@ -40,12 +40,18 @@ from six import moves
|
||||
from stevedore import extension
|
||||
|
||||
|
||||
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
|
||||
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
collect_opts = [
|
||||
cfg.StrOpt('fetcher',
|
||||
default='keystone',
|
||||
deprecated_for_removal=True,
|
||||
help='Project fetcher.'),
|
||||
cfg.StrOpt('collector',
|
||||
default='gnocchi',
|
||||
deprecated_for_removal=True,
|
||||
@ -77,8 +83,16 @@ collect_opts = [
|
||||
default='/etc/cloudkitty/metrics.yml',
|
||||
help='Metrology configuration file.'),
|
||||
]
|
||||
|
||||
storage_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='sqlalchemy',
|
||||
help='Name of the storage backend driver.')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(collect_opts, 'collect')
|
||||
CONF.register_opts(storage_opts, 'storage')
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
@ -263,28 +277,20 @@ def check_time_state(timestamp=None, period=0, wait_time=0):
|
||||
def get_metrics_conf(conf_path):
|
||||
"""Return loaded yaml metrology configuration.
|
||||
|
||||
In case of empty /etc/cloudkitty folder,
|
||||
a fallback is done on the former deprecated oslo config method.
|
||||
In case not found metrics.yml file,
|
||||
return an empty dict.
|
||||
"""
|
||||
res = None
|
||||
# NOTE(mc): We can not raise any exception in this function as it called
|
||||
# at some file imports. Default values should be used instead. This is
|
||||
# done for the docs and tests in gerrit which does not copy yaml conf file.
|
||||
try:
|
||||
with open(conf_path) as conf:
|
||||
res = yaml.safe_load(conf)
|
||||
res = res[0]
|
||||
except Exception as exc:
|
||||
res.update({'storage': CONF.storage.backend})
|
||||
return res or {}
|
||||
except Exception:
|
||||
LOG.warning('Error when trying to retrieve yaml metrology conf file.')
|
||||
LOG.warning(exc)
|
||||
LOG.warning('Fallback on the deprecated oslo config method.')
|
||||
|
||||
try:
|
||||
res = {key: val for key, val in CONF.collect.items()}
|
||||
except Exception as exc:
|
||||
err_msg = 'Error when trying to retrieve ' \
|
||||
'deprecated oslo config method.'
|
||||
LOG.error(err_msg)
|
||||
LOG.error(exc)
|
||||
|
||||
return res
|
||||
return {}
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
@ -30,14 +30,17 @@ Info
|
||||
.. rest-controller:: cloudkitty.api.v1.controllers.info:InfoController
|
||||
:webprefix: /v1/info
|
||||
|
||||
.. rest-controller:: cloudkitty.api.v1.controllers.info:MetricInfoController
|
||||
:webprefix: /v1/info/metric
|
||||
|
||||
.. autotype:: cloudkitty.api.v1.datamodels.info.CloudkittyMetricInfo
|
||||
:members:
|
||||
|
||||
.. autotype:: cloudkitty.api.v1.datamodels.info.CloudkittyMetricInfoCollection
|
||||
:members:
|
||||
|
||||
.. rest-controller:: cloudkitty.api.v1.controllers.info:ServiceInfoController
|
||||
:webprefix: /v1/info/services
|
||||
|
||||
.. autotype:: cloudkitty.api.v1.datamodels.info.CloudkittyServiceInfo
|
||||
:members:
|
||||
|
||||
.. autotype:: cloudkitty.api.v1.datamodels.info.CloudkittyServiceInfoCollection
|
||||
:members:
|
||||
:webprefix: /v1/info/service
|
||||
|
||||
|
||||
Rating
|
||||
|
@ -1,75 +1,102 @@
|
||||
- name: OpenStack
|
||||
name: OpenStack
|
||||
|
||||
collector: gnocchi
|
||||
period: 3600
|
||||
wait_periods: 2
|
||||
window: 1800
|
||||
fetcher: keystone
|
||||
collector: gnocchi
|
||||
|
||||
services:
|
||||
- compute
|
||||
- volume
|
||||
- network.bw.in
|
||||
- network.bw.out
|
||||
- network.floating
|
||||
- image
|
||||
period: 3600
|
||||
wait_periods: 2
|
||||
window: 1800
|
||||
|
||||
services_objects:
|
||||
compute: instance
|
||||
volume: volume
|
||||
network.bw.out: instance_network_interface
|
||||
network.bw.in: instance_network_interface
|
||||
network.floating: network
|
||||
image: image
|
||||
radosgw.usage: ceph_account
|
||||
services_objects:
|
||||
compute: instance
|
||||
volume: volume
|
||||
network.bw.out: instance_network_interface
|
||||
network.bw.in: instance_network_interface
|
||||
network.floating: network
|
||||
image: image
|
||||
radosgw.usage: ceph_account
|
||||
|
||||
services_metrics:
|
||||
compute:
|
||||
- vcpus: max
|
||||
- memory: max
|
||||
- cpu: max
|
||||
- disk.root.size: max
|
||||
- disk.ephemeral.size: max
|
||||
volume:
|
||||
- volume.size: max
|
||||
network.bw.in:
|
||||
- network.incoming.bytes: max
|
||||
network.bw.out:
|
||||
- network.outgoing.bytes: max
|
||||
network.floating:
|
||||
- ip.floating: max
|
||||
image:
|
||||
- image.size: max
|
||||
- image.download: max
|
||||
- image.serve: max
|
||||
radosgw.usage:
|
||||
- radosgw.objects.size: max
|
||||
metrics:
|
||||
vcpus:
|
||||
resource: instance
|
||||
unit: instance
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
metrics_units:
|
||||
compute:
|
||||
1:
|
||||
unit: instance
|
||||
volume:
|
||||
volume.size:
|
||||
unit: GiB
|
||||
network.bw.in:
|
||||
network.incoming.bytes:
|
||||
unit: MB
|
||||
factor: 1/1000000
|
||||
network.bw.out:
|
||||
network.outgoing.bytes:
|
||||
unit: MB
|
||||
factor: 1/1000000
|
||||
network.floating:
|
||||
1:
|
||||
unit: ip
|
||||
image:
|
||||
image.size:
|
||||
unit: MiB
|
||||
factor: 1/1048576
|
||||
radosgw.usage:
|
||||
radosgw.objects.size:
|
||||
unit: GiB
|
||||
factor: 1/1073741824
|
||||
default_unit:
|
||||
1:
|
||||
unit: unknown
|
||||
memory:
|
||||
resource: instance
|
||||
unit: instance
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
cpu:
|
||||
resource: instance
|
||||
unit: instance
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
disk.root.size:
|
||||
resource: instance
|
||||
unit: instance
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
disk.ephemeral.size:
|
||||
resource: instance
|
||||
unit: instance
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
image.size:
|
||||
resource: image
|
||||
unit: MiB
|
||||
factor: 1/1048576
|
||||
aggregation_method: max
|
||||
|
||||
image.download:
|
||||
resource: image
|
||||
unit: MiB
|
||||
factor: 1/1048576
|
||||
aggregation_method: max
|
||||
|
||||
image.serve:
|
||||
resource: image
|
||||
unit: MiB
|
||||
factor: 1/1048576
|
||||
aggregation_method: max
|
||||
|
||||
volume.size:
|
||||
resource: volume
|
||||
unit: GiB
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
|
||||
network.outgoing.bytes:
|
||||
resource: instance_network_interface
|
||||
unit: MB
|
||||
factor: 1/1000000
|
||||
aggregation_method: max
|
||||
|
||||
network.incoming.bytes:
|
||||
resource: instance_network_interface
|
||||
unit: MB
|
||||
factor: 1/1000000
|
||||
aggregation_method: max
|
||||
|
||||
ip.floating:
|
||||
resource: network
|
||||
unit: ip
|
||||
factor: 1
|
||||
aggregation_method: max
|
||||
countable_unit: true
|
||||
|
||||
radosgw.objects.size:
|
||||
resource: ceph_account
|
||||
unit: GiB
|
||||
factor: 1/1073741824
|
||||
aggregation_method: max
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
other:
|
||||
- |
|
||||
Cloudkitty now bases itself on metrics and no more on services for data
|
||||
valorization. The metrics.yml file information has been reorganized
|
||||
accordingly.
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
deprecations:
|
||||
- |
|
||||
The /v1/info/services and /v1/info/services/<service> endpoints have
|
||||
been deprecated. The /v1/info/metrics and /v1/info/metrics/<metric>
|
||||
endpoints should be used instead.
|
||||
The whole /v1/info API part is currently being reworked, and some
|
||||
endpoints will also be deprecated and deleted in the future.
|
5
releasenotes/notes/source-fetcher-43c4352508f7f944.yaml
Normal file
5
releasenotes/notes/source-fetcher-43c4352508f7f944.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
A Source Fetcher has been added, allowing to add new collectors to scrap
|
||||
metrics from non-OpenStack sources.
|
@ -51,8 +51,9 @@ cloudkitty.collector.backends =
|
||||
meta = cloudkitty.collector.meta:MetaCollector
|
||||
|
||||
cloudkitty.tenant.fetchers =
|
||||
fake = cloudkitty.tenant_fetcher.fake:FakeFetcher
|
||||
keystone = cloudkitty.tenant_fetcher.keystone:KeystoneFetcher
|
||||
fake = cloudkitty.fetcher.fake:FakeFetcher
|
||||
keystone = cloudkitty.fetcher.keystone:KeystoneFetcher
|
||||
source = cloudkitty.fetcher.source:SourceFetcher
|
||||
|
||||
cloudkitty.transformers =
|
||||
CloudKittyFormatTransformer = cloudkitty.transformer.format:CloudKittyFormatTransformer
|
||||
|
Loading…
x
Reference in New Issue
Block a user