Improve metrics configuration

This patch provides a refactoring of the metric
configuration model (and file description) to
improve genericity, maintainability and usage for
non-openstack deployment.

The new metric yaml format is defined in the
attached story task and is validated on load with
voluptuous.

Now, a processor is dedicated to one collector and
one storage backend. Thus, collector and storage
configuration go back to the cloudkitty oslo conf.

Collectors have been refactored to have a code as similar as possible,
in order to ease comprehension for new contributors.

Story: 2001883
Task: 14354
Task: 14355
Task: 14431

Change-Id: I948dd9cd5c113bdaa4e49c532354938ffb45f0e7
This commit is contained in:
Luka Peschke 2018-04-17 14:49:34 +02:00 committed by Maxime Cottret
parent 5035de30a8
commit 059a940392
34 changed files with 894 additions and 808 deletions

View File

@ -20,6 +20,7 @@ from oslo_log import log as logging
import pecan import pecan
from pecan import rest from pecan import rest
import six import six
import voluptuous
from wsme import types as wtypes from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan import wsmeext.pecan as wsme_pecan
@ -36,37 +37,45 @@ CONF = cfg.CONF
def get_all_metrics(): def get_all_metrics():
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) try:
METADATA = collector.get_metrics_based_collector_metadata() metrics_conf = collector.validate_conf(
if 'metrics' not in METRICS_CONF: ck_utils.load_conf(CONF.collect.metrics_conf))
except (voluptuous.Invalid, voluptuous.MultipleInvalid):
msg = 'Invalid endpoint: no metrics in current configuration.' msg = 'Invalid endpoint: no metrics in current configuration.'
pecan.abort(405, msg) pecan.abort(405, msg)
policy.authorize(pecan.request.context, 'info:list_metrics_info', {}) policy.authorize(pecan.request.context, 'info:list_metrics_info', {})
metrics_info_list = [] metrics_info_list = []
for metric, metadata in METADATA.items(): for metric_name, metric in metrics_conf.items():
info = metadata.copy() info = metric.copy()
info['metric_id'] = metric info['metric_id'] = info['alt_name']
metrics_info_list.append( metrics_info_list.append(
info_models.CloudkittyMetricInfo(**info)) info_models.CloudkittyMetricInfo(**info))
return info_models.CloudkittyMetricInfoCollection( return info_models.CloudkittyMetricInfoCollection(
metrics=metrics_info_list) metrics=metrics_info_list)
def _find_metric(name, conf):
for metric_name, metric in conf.items():
if metric['alt_name'] == name:
return metric
def get_one_metric(metric_name): def get_one_metric(metric_name):
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) try:
METADATA = collector.get_metrics_based_collector_metadata() metrics_conf = collector.validate_conf(
if 'metrics' not in METRICS_CONF: ck_utils.load_conf(CONF.collect.metrics_conf))
except (voluptuous.Invalid, voluptuous.MultipleInvalid):
msg = 'Invalid endpoint: no metrics in current configuration.' msg = 'Invalid endpoint: no metrics in current configuration.'
pecan.abort(405, msg) pecan.abort(405, msg)
policy.authorize(pecan.request.context, 'info:get_metric_info', {}) policy.authorize(pecan.request.context, 'info:get_metric_info', {})
try: metric = _find_metric(metric_name, metrics_conf)
info = METADATA[metric_name].copy() if not metric:
info['metric_id'] = metric_name
return info_models.CloudkittyMetricInfo(**info)
except KeyError:
pecan.abort(404, six.text_type(metric_name)) pecan.abort(404, six.text_type(metric_name))
info = metric.copy()
info['metric_id'] = info['alt_name']
return info_models.CloudkittyMetricInfo(**info)
class MetricInfoController(rest.RestController): class MetricInfoController(rest.RestController):
@ -131,4 +140,4 @@ class InfoController(rest.RestController):
def config(self): def config(self):
"""Return current configuration.""" """Return current configuration."""
policy.authorize(pecan.request.context, 'info:get_config', {}) policy.authorize(pecan.request.context, 'info:get_config', {})
return ck_utils.get_metrics_conf(CONF.collect.metrics_conf) return ck_utils.load_conf(CONF.collect.metrics_conf)

View File

@ -18,29 +18,14 @@
from oslo_config import cfg from oslo_config import cfg
from wsme import types as wtypes from wsme import types as wtypes
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils
CONF = cfg.CONF CONF = cfg.CONF
def get_metrics_list():
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
try:
metrics = list(metrics_conf['metrics'].keys())
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
except KeyError:
metrics = list(DEFAULT_METRICS_CONF['metrics'].keys())
cloudkitty_metrics = wtypes.Enum(wtypes.text, *metrics)
return cloudkitty_metrics
class CloudkittyMetricInfo(wtypes.Base): class CloudkittyMetricInfo(wtypes.Base):
"""Type describing a metric info in CloudKitty.""" """Type describing a metric info in CloudKitty."""
metric_id = get_metrics_list() metric_id = wtypes.text
"""Name of the metric.""" """Name of the metric."""
metadata = [wtypes.text] metadata = [wtypes.text]

View File

@ -28,16 +28,7 @@ LOG = log.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) METRICS_CONF = ck_utils.load_conf(CONF.collect.metrics_conf)
try:
SERVICE_NAMES = list(METRICS_CONF['metrics'].keys())
except KeyError:
LOG.error("No metrics specified in YAML configuration, "
"CloudKitty won't work as expected")
SERVICE_NAMES = ['compute', 'image']
CLOUDKITTY_SERVICES = wtypes.Enum(wtypes.text, *SERVICE_NAMES)
class CloudkittyResource(wtypes.Base): class CloudkittyResource(wtypes.Base):
@ -45,7 +36,7 @@ class CloudkittyResource(wtypes.Base):
""" """
service = CLOUDKITTY_SERVICES service = wtypes.text
"""Name of the service.""" """Name of the service."""
# FIXME(sheeprine): values should be dynamic # FIXME(sheeprine): values should be dynamic

View File

@ -16,42 +16,105 @@
# @author: Stéphane Albert # @author: Stéphane Albert
# #
import abc import abc
import fractions
from oslo_config import cfg from oslo_config import cfg
import six import six
from stevedore import driver from stevedore import driver
from voluptuous import All
from voluptuous import Any
from voluptuous import Coerce
from voluptuous import In
from voluptuous import Invalid
from voluptuous import Length
from voluptuous import Optional
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import transformer from cloudkitty import transformer
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
collect_opts = [
cfg.StrOpt('collector',
default='gnocchi',
help='Data collector.'),
cfg.IntOpt('period',
default=3600,
help='Rating period in seconds.'),
cfg.IntOpt('wait_periods',
default=2,
help='Wait for N periods before collecting new data.'),
cfg.StrOpt('metrics_conf',
default='/etc/cloudkitty/metrics.yml',
help='Metrology configuration file.'),
]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(collect_opts, 'collect')
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends' COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
def MetricDict(value):
if isinstance(value, dict) and len(value.keys()) > 0:
return value
raise Invalid("Not a dict with at least one key")
CONF_BASE_SCHEMA = {Required('metrics'): MetricDict}
METRIC_BASE_SCHEMA = {
# Display unit
Required('unit'): All(str, Length(min=1)),
# Factor for unit converion
Required('factor', default=1):
Any(int, float, Coerce(fractions.Fraction)),
# Offset for unit conversion
Required('offset', default=0):
# [int, float, fractions.Fraction],
Any(int, float, Coerce(fractions.Fraction)),
# Name to be used in dataframes, and used for service creation in hashmap
# module. Defaults to the name of the metric
Optional('alt_name'): All(str, Length(min=1)),
# This is what metrics are grouped by on collection.
Required('groupby', default=list): [
All(str, Length(min=1))
],
# Available in HashMap
Required('metadata', default=list): [
All(str, Length(min=1))
],
# Mutate collected value. May be any of (NONE, NUMBOOL, FLOOR, CEIL).
# Defaults to NONE
Required('mutate', default='NONE'):
In(['NONE', 'NUMBOOL', 'FLOOR', 'CEIL']),
# Collector-specific args. Should be overriden by schema provided for
# the given collector
Optional('extra_args'): dict,
}
def get_collector(transformers=None): def get_collector(transformers=None):
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) metrics_conf = ck_utils.load_conf(CONF.collect.metrics_conf)
if not transformers: if not transformers:
transformers = transformer.get_transformers() transformers = transformer.get_transformers()
collector_args = { collector_args = {
'period': metrics_conf.get('period', 3600), 'period': CONF.collect.period,
'transformers': transformers, 'transformers': transformers,
} }
collector_args.update({'conf': metrics_conf}) collector_args.update({'conf': metrics_conf})
return driver.DriverManager( return driver.DriverManager(
COLLECTORS_NAMESPACE, COLLECTORS_NAMESPACE,
metrics_conf.get('collector', 'gnocchi'), CONF.collect.collector,
invoke_on_load=True, invoke_on_load=True,
invoke_kwds=collector_args).driver invoke_kwds=collector_args).driver
def get_collector_without_invoke(): def get_collector_without_invoke():
"""Return the collector without invoke it.""" """Return the collector without invoke it."""
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
return driver.DriverManager( return driver.DriverManager(
COLLECTORS_NAMESPACE, COLLECTORS_NAMESPACE,
metrics_conf.get('collector', 'gnocchi'), CONF.collect.collector,
invoke_on_load=False invoke_on_load=False
).driver ).driver
@ -61,14 +124,15 @@ def get_metrics_based_collector_metadata():
Results are based on enabled collector and metrics in CONF. Results are based on enabled collector and metrics in CONF.
""" """
metrics_conf = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) metrics_conf = ck_utils.load_conf(CONF.collect.metrics_conf)
transformers = transformer.get_transformers() transformers = transformer.get_transformers()
collector = get_collector_without_invoke() collector = get_collector_without_invoke()
metadata = {} metadata = {}
if 'metrics' in metrics_conf: if 'metrics' in metrics_conf:
for metric in metrics_conf.get('metrics', {}): for metric_name, metric in metrics_conf.get('metrics', {}).items():
metadata[metric] = collector.get_metadata( alt_name = metric.get('alt_name', metric_name)
metric, metadata[alt_name] = collector.get_metadata(
metric_name,
transformers, transformers,
metrics_conf, metrics_conf,
) )
@ -102,17 +166,18 @@ class NoDataCollected(Exception):
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class BaseCollector(object): class BaseCollector(object):
collector_name = None collector_name = None
dependencies = [] dependencies = ['CloudKittyFormatTransformer']
def __init__(self, transformers, **kwargs): def __init__(self, transformers, **kwargs):
try: try:
self.transformers = transformers self.transformers = transformers
self.period = kwargs['period'] self.period = kwargs['period']
self.conf = kwargs['conf'] self.conf = self.check_configuration(kwargs['conf'])
except IndexError as e: except KeyError as e:
raise ValueError("Missing argument (%s)" % e) raise ValueError("Missing argument (%s)" % e)
self._check_transformers() self._check_transformers()
self.t_cloudkitty = self.transformers['CloudKittyFormatTransformer']
def _check_transformers(self): def _check_transformers(self):
"""Check for transformer prerequisites """Check for transformer prerequisites
@ -123,6 +188,13 @@ class BaseCollector(object):
raise TransformerDependencyError(self.collector_name, raise TransformerDependencyError(self.collector_name,
dependency) dependency)
@staticmethod
def check_configuration(self, conf):
"""Check metrics configuration
"""
return Schema(METRIC_BASE_SCHEMA)(conf)
@staticmethod @staticmethod
def last_month(): def last_month():
month_start = ck_utils.get_month_start() month_start = ck_utils.get_month_start()
@ -152,16 +224,35 @@ class BaseCollector(object):
""" """
return {"metadata": [], "unit": "undefined"} return {"metadata": [], "unit": "undefined"}
def retrieve(self, @abc.abstractmethod
resource, def fetch_all(self, metric_name, start, end,
start, project_id=None, q_filter=None):
end=None, pass
project_id=None,
q_filter=None): def retrieve(self, metric_name, start, end,
trans_resource = self._res_to_func(resource) project_id=None, q_filter=None):
if not hasattr(self, trans_resource):
raise NotImplementedError( data = self.fetch_all(
"No method found in collector '%s' for resource '%s'." metric_name,
% (self.collector_name, resource)) start,
func = getattr(self, trans_resource) end,
return func(resource, start, end, project_id, q_filter) project_id,
q_filter=q_filter,
)
name = self.conf[metric_name].get('alt_name', metric_name)
if data:
data = self.t_cloudkitty.format_service(name, data)
if not data:
raise NoDataCollected(self.collector_name, name)
return data
def validate_conf(conf):
"""Validates the provided configuration."""
collector = get_collector_without_invoke()
output = collector.check_configuration(conf)
for metric_name, metric in output.items():
if 'alt_name' not in metric.keys():
metric['alt_name'] = metric_name
return output

View File

@ -13,13 +13,16 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
import decimal
from gnocchiclient import auth as gauth from gnocchiclient import auth as gauth
from gnocchiclient import client as gclient from gnocchiclient import client as gclient
from keystoneauth1 import loading as ks_loading from keystoneauth1 import loading as ks_loading
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from voluptuous import All
from voluptuous import In
from voluptuous import Length
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import collector from cloudkitty import collector
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
@ -65,10 +68,25 @@ ks_loading.register_auth_conf_options(
CONF = cfg.CONF CONF = cfg.CONF
GNOCCHI_EXTRA_SCHEMA = {
Required('extra_args'): {
Required('resource_type'): All(str, Length(min=1)),
# Due to Gnocchi model, metric are grouped by resource.
# This parameter permits to adapt the key of the resource identifier
Required('resource_key', default='id'): All(str, Length(min=1)),
# This is needed to allow filtering on the project for the Openstack
# usecase.
# NOTE(MCO): maybe be removed in following releases
Required('scope_key', default='project_id'): All(str, Length(min=1)),
Required('aggregation_method', default='max'):
In(['max', 'mean', 'min']),
},
}
class GnocchiCollector(collector.BaseCollector): class GnocchiCollector(collector.BaseCollector):
collector_name = 'gnocchi' collector_name = 'gnocchi'
dependencies = ('GnocchiTransformer',
'CloudKittyFormatTransformer')
def __init__(self, transformers, **kwargs): def __init__(self, transformers, **kwargs):
super(GnocchiCollector, self).__init__(transformers, **kwargs) super(GnocchiCollector, self).__init__(transformers, **kwargs)
@ -94,14 +112,34 @@ class GnocchiCollector(collector.BaseCollector):
adapter_options=adapter_options, adapter_options=adapter_options,
) )
@staticmethod
def check_configuration(conf):
"""Check metrics configuration
"""
conf = Schema(collector.CONF_BASE_SCHEMA)(conf)
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
GNOCCHI_EXTRA_SCHEMA)
output = dict()
for metric_name, metric in conf['metrics'].items():
output[metric_name] = metric_schema(metric)
output[metric_name]['groupby'].append(
output[metric_name]['extra_args']['resource_key']
)
return output
@classmethod @classmethod
def get_metadata(cls, resource_name, transformers, conf): def get_metadata(cls, resource_name, transformers, conf):
info = super(GnocchiCollector, cls).get_metadata(resource_name, info = super(GnocchiCollector, cls).get_metadata(resource_name,
transformers) transformers)
try: try:
info["metadata"].extend(transformers['GnocchiTransformer'] info["metadata"].extend(
.get_metadata(resource_name)) conf[resource_name]['groupby']
info['unit'] = conf['metrics'][resource_name]['unit'] ).extend(
conf[resource_name]['metadata']
)
info['unit'] = conf[resource_name]['unit']
except KeyError: except KeyError:
pass pass
return info return info
@ -154,38 +192,43 @@ class GnocchiCollector(collector.BaseCollector):
self.gen_filter(cop="<=", started_at=end)) self.gen_filter(cop="<=", started_at=end))
return time_filter return time_filter
def _expand(self, metrics, resource, name, aggregate, start, end): def _fetch_resources(self, metric_name, start, end,
try: project_id=None, q_filter=None):
values = self._conn.metric.get_measures(
metric=metrics[name],
start=ck_utils.ts2dt(start),
stop=ck_utils.ts2dt(end),
aggregation=aggregate)
# NOTE(sheeprine): Get the list of values for the current
# metric and get the first result value.
# [point_date, granularity, value]
# ["2015-11-24T00:00:00+00:00", 86400.0, 64.0]
resource[name] = values[0][2]
except (IndexError, KeyError):
resource[name] = 0
def _expand_metrics(self, resources, mappings, start, end, resource_name):
for resource in resources:
metrics = resource.get('metrics', {})
self._expand(
metrics,
resource,
resource_name,
mappings,
start,
end,
)
def get_resources(self, resource_name, start, end,
project_id, q_filter=None):
"""Get resources during the timeframe. """Get resources during the timeframe.
:param resource_name: Resource name to filter on. :type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
# Get gnocchi specific conf
extra_args = self.conf[metric_name]['extra_args']
# Build query
query_parameters = self._generate_time_filter(start, end)
resource_type = extra_args['resource_type']
query_parameters.append(
self.gen_filter(cop="=", type=resource_type))
if project_id:
kwargs = {extra_args['scope_key']: project_id}
query_parameters.append(self.gen_filter(**kwargs))
if q_filter:
query_parameters.append(q_filter)
resources = self._conn.resource.search(
resource_type=resource_type,
query=self.extend_filter(*query_parameters))
return {res[extra_args['resource_key']]: res for res in resources}
def _fetch_metric(self, metric_name, start, end,
project_id=None, q_filter=None):
"""Get metric during the timeframe.
:param metric_name: metric name to filter on.
:type resource_name: str :type resource_name: str
:param start: Start of the timeframe. :param start: Start of the timeframe.
:param end: End of the timeframe if needed. :param end: End of the timeframe if needed.
@ -194,89 +237,94 @@ class GnocchiCollector(collector.BaseCollector):
:param q_filter: Append a custom filter. :param q_filter: Append a custom filter.
:type q_filter: list :type q_filter: list
""" """
# NOTE(sheeprine): We first get the list of every resource running
# without any details or history.
# Then we get information about the resource getting details and
# history.
# Translating the resource name if needed # Get gnocchi specific conf
query_parameters = self._generate_time_filter(start, end) extra_args = self.conf[metric_name]['extra_args']
resource_type = self.conf['metrics'][resource_name]['resource'] # get ressource type
resource_type = extra_args['resource_type']
# build search query using ressource type and project_id if provided
query_parameters = list()
query_parameters.append( query_parameters.append(
self.gen_filter(cop="=", type=resource_type)) self.gen_filter(cop="=", type=resource_type))
query_parameters.append( if project_id:
self.gen_filter(project_id=project_id)) kwargs = {extra_args['scope_key']: project_id}
query_parameters.append(self.gen_filter(**kwargs))
if q_filter: if q_filter:
query_parameters.append(q_filter) query_parameters.append(q_filter)
resources = self._conn.resource.search(
# build aggregration operation
op = ["aggregate", extra_args['aggregation_method'],
["metric", metric_name, extra_args['aggregation_method']]]
# get groupby
groupby = self.conf[metric_name]['groupby']
return self._conn.aggregates.fetch(
op,
resource_type=resource_type, resource_type=resource_type,
query=self.extend_filter(*query_parameters)) start=ck_utils.ts2dt(start),
return resources stop=ck_utils.ts2dt(end),
groupby=groupby,
search=self.extend_filter(*query_parameters))
def resource_info(self, resource_name, start, end, def _format_data(self, metconf, data, resources_info=None):
project_id, q_filter=None): """Formats gnocchi data to CK data.
met = self.conf['metrics'][resource_name]
unit = met['unit']
qty = 1 if met.get('countable_unit') else met['resource']
resources = self.get_resources( Returns metadata, groupby and qty
resource_name,
"""
groupby = data['group']
# if resource info is provided, add additional
# metadata as defined in the conf
metadata = dict()
if resources_info:
resource = resources_info[
groupby[metconf['extra_args']['resource_key']]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['measures']['measures']['aggregated'][0][2]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
mutated_qty = ck_utils.mutate(converted_qty, metconf['mutate'])
return metadata, groupby, mutated_qty
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
met = self.conf[metric_name]
data = self._fetch_metric(
metric_name,
start, start,
end, end,
project_id=project_id, project_id=project_id,
q_filter=q_filter, q_filter=q_filter,
) )
formated_resources = list() resources_info = None
for resource in resources: if met['metadata']:
resource_data = self.t_gnocchi.strip_resource_data( resources_info = self._fetch_resources(
resource_name, resource) metric_name,
mapp = self.conf['metrics'][resource_name]['aggregation_method']
self._expand_metrics(
[resource_data],
mapp,
start, start,
end, end,
resource_name, project_id=project_id,
q_filter=q_filter
) )
resource_data.pop('metrics', None) formated_resources = list()
for d in data:
# Unit conversion # Only if aggregates have been found
if isinstance(qty, str): if d['measures']['measures']['aggregated']:
resource_data[resource_name] = ck_utils.convert_unit( metadata, groupby, qty = self._format_data(
resource_data[resource_name], met, d, resources_info)
self.conf['metrics'][resource_name].get('factor', 1), data = self.t_cloudkitty.format_item(
self.conf['metrics'][resource_name].get('offset', 0), groupby,
metadata,
met['unit'],
qty=qty,
) )
formated_resources.append(data)
val = qty if isinstance(qty, int) else resource_data[resource_name]
data = self.t_cloudkitty.format_item(
resource_data,
unit,
decimal.Decimal(val)
)
# NOTE(sheeprine): Reference to gnocchi resource used by storage
data['resource_id'] = data['desc']['resource_id']
formated_resources.append(data)
return formated_resources return formated_resources
def retrieve(self, resource_name, start, end,
project_id, q_filter=None):
resources = self.resource_info(
resource_name,
start,
end,
project_id,
q_filter=q_filter,
)
if not resources:
raise collector.NoDataCollected(self.collector_name, resource_name)
return self.t_cloudkitty.format_service(resource_name, resources)

View File

@ -15,13 +15,16 @@
# #
# @author: Luka Peschke # @author: Luka Peschke
# #
import decimal
from keystoneauth1 import loading as ks_loading from keystoneauth1 import loading as ks_loading
from keystoneclient.v3 import client as ks_client from keystoneclient.v3 import client as ks_client
from monascaclient import client as mclient from monascaclient import client as mclient
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from voluptuous import All
from voluptuous import In
from voluptuous import Length
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import collector from cloudkitty import collector
from cloudkitty import transformer from cloudkitty import transformer
@ -43,7 +46,22 @@ ks_loading.register_auth_conf_options(
COLLECTOR_MONASCA_OPTS) COLLECTOR_MONASCA_OPTS)
CONF = cfg.CONF CONF = cfg.CONF
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) METRICS_CONF = ck_utils.load_conf(CONF.collect.metrics_conf)
MONASCA_EXTRA_SCHEMA = {
Required('extra_args'): {
# Key corresponding to the resource id in a metric's dimensions
# Allows to adapt the resource identifier. Should not need to be
# modified in a standard OpenStack installation
Required('resource_key', default='resource_id'):
All(str, Length(min=1)),
# This is needed to allow filtering on the project for the Openstack
# usecase. May be removed in following releases
Required('scope_key', default='project_id'): All(str, Length(min=1)),
Required('aggregation_method', default='max'):
In(['max', 'mean', 'min']),
},
}
class EndpointNotFound(Exception): class EndpointNotFound(Exception):
@ -53,13 +71,24 @@ class EndpointNotFound(Exception):
class MonascaCollector(collector.BaseCollector): class MonascaCollector(collector.BaseCollector):
collector_name = 'monasca' collector_name = 'monasca'
dependencies = ['CloudKittyFormatTransformer']
@staticmethod
def check_configuration(conf):
"""Check metrics configuration
"""
conf = Schema(collector.CONF_BASE_SCHEMA)(conf)
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
MONASCA_EXTRA_SCHEMA)
output = dict()
for metric_name, metric in conf['metrics'].items():
output[metric_name] = metric_schema(metric)
return output
def __init__(self, transformers, **kwargs): def __init__(self, transformers, **kwargs):
super(MonascaCollector, self).__init__(transformers, **kwargs) super(MonascaCollector, self).__init__(transformers, **kwargs)
self.t_cloudkitty = self.transformers['CloudKittyFormatTransformer']
self.auth = ks_loading.load_auth_from_conf_options( self.auth = ks_loading.load_auth_from_conf_options(
CONF, CONF,
COLLECTOR_MONASCA_OPTS) COLLECTOR_MONASCA_OPTS)
@ -90,28 +119,13 @@ class MonascaCollector(collector.BaseCollector):
return endpoint.url return endpoint.url
return None return None
def _get_metadata(self, resource_type, transformers, conf): def _get_metadata(self, metric_name, transformers, conf):
info = {} info = {}
info['unit'] = conf['metrics'][resource_type]['unit'] info['unit'] = conf['metrics'][metric_name]['unit']
start = ck_utils.dt2ts(ck_utils.get_month_start()) dimension_names = self._conn.metric.list_dimension_names(
end = ck_utils.dt2ts(ck_utils.get_month_end()) metric_name=metric_name)
info['metadata'] = [d['dimension_name'] for d in dimension_names]
try:
resource = self.active_resources(
resource_type,
start,
end,
None,
)[0]
except IndexError:
resource = {}
info['metadata'] = resource.get('dimensions', {}).keys()
service_metrics = METRICS_CONF['services_metrics'][resource_type]
for service_metric in service_metrics:
metric, statistics = list(service_metric.items())[0]
info['metadata'].append(metric)
return info return info
# NOTE(lukapeschke) if anyone sees a better way to do this, # NOTE(lukapeschke) if anyone sees a better way to do this,
@ -124,144 +138,124 @@ class MonascaCollector(collector.BaseCollector):
tmp = cls(**args) tmp = cls(**args)
return tmp._get_metadata(resource_type, transformers, conf) return tmp._get_metadata(resource_type, transformers, conf)
def _get_resource_metadata(self, resource_type, start, def _get_dimensions(self, metric_name, project_id, q_filter):
end, resource_id, conf): extra_args = self.conf[metric_name]['extra_args']
meter = conf['metrics'][resource_type]['resource']
if not meter:
return {}
measurements = self._conn.metrics.list_measurements(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
merge_metrics=True,
dimensions={'resource_id': resource_id},
)
try:
# Getting the last measurement of given period
metadata = measurements[-1]['measurements'][-1][2]
except (KeyError, IndexError):
metadata = {}
return metadata
def _get_resource_qty(self, meter, start, end, resource_id, statistics):
# NOTE(lukapeschke) the period trick is used to aggregate
# the measurements
period = end - start
statistics = self._conn.metrics.list_statistics(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
dimensions={'resource_id': resource_id},
statistics=statistics,
period=period,
merge_metrics=True,
)
try:
# If several statistics are returned (should not happen),
# use the latest
qty = decimal.Decimal(statistics[-1]['statistics'][-1][1])
except (KeyError, IndexError):
qty = decimal.Decimal(0)
return qty
def _is_resource_active(self, meter, resource_id, start, end):
measurements = self._conn.metrics.list_measurements(
name=meter,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
group_by='resource_id',
merge_metrics=True,
dimensions={'resource_id': resource_id},
)
return len(measurements) > 0
def active_resources(self, resource_type, start,
end, project_id, conf, **kwargs):
meter = conf['metrics'][resource_type]['resource']
if not meter:
return {}
dimensions = {} dimensions = {}
if project_id: if project_id:
dimensions['project_id'] = project_id dimensions[extra_args['scope_key']] = project_id
dimensions.update(kwargs) if q_filter:
resources = self._conn.metrics.list(name=meter, dimensions=dimensions) dimensions.update(q_filter)
output = [] return dimensions
for resource in resources:
try:
resource_id = resource['dimensions']['resource_id']
if (resource_id not in
[item['dimensions']['resource_id'] for item in output]
and self._is_resource_active(meter, resource_id,
start, end)):
output.append(resource)
except KeyError:
continue
return output
def _expand_metrics(self, resource, resource_id, def _fetch_measures(self, metric_name, start, end,
mappings, start, end, resource_type): project_id=None, q_filter=None):
for mapping in mappings: """Get measures for given metric during the timeframe.
name, statistics = list(mapping.items())[0]
qty = self._get_resource_qty(
name,
start,
end,
resource_id,
statistics,
)
conv_data = METRICS_CONF['metrics'][resource_type].get(name) :param metric_name: metric name to filter on.
if conv_data: :type metric_name: str
resource[name] = ck_utils.convert_unit( :param start: Start of the timeframe.
qty, :param end: End of the timeframe if needed.
conv_data.get('factor', 1), :param project_id: Filter on a specific tenant/project.
conv_data.get('offset', 0), :type project_id: str
) :param q_filter: Append a custom filter.
:type q_filter: list
"""
def resource_info(self, resource_type, start, end, dimensions = self._get_dimensions(metric_name, project_id, q_filter)
project_id, q_filter=None): group_by = self.conf[metric_name]['groupby']
met = self.conf['metrics'][resource_type] # NOTE(lpeschke): One aggregated measure per collect period
unit = met['unit'] period = end - start
qty = 1 if met.get('countable_unit') else met['resource']
active_resources = self.active_resources( extra_args = self.conf[metric_name]['extra_args']
resource_type, start, end, project_id return self._conn.metrics.list_statistics(
name=metric_name,
merge_metrics=True,
dimensions=dimensions,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
period=period,
statistics=extra_args['aggregation_method'],
group_by=group_by)
def _fetch_metrics(self, metric_name, start, end,
project_id=None, q_filter=None):
"""List active metrics during the timeframe.
:param metric_name: metric name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
dimensions = self._get_dimensions(metric_name, project_id, q_filter)
metrics = self._conn.metrics.list(
name=metric_name,
dimensions=dimensions,
start_time=ck_utils.ts2dt(start),
end_time=ck_utils.ts2dt(end),
) )
resource_data = [] resource_key = self.conf[metric_name]['extra_args']['resource_key']
for resource in active_resources:
resource_id = resource['dimensions']['resource_id']
data = resource['dimensions']
mappings = (
resource_type,
METRICS_CONF['metrics'][resource_type]['aggregation_method'],
)
self._expand_metrics( return {metric['dimensions'][resource_key]:
data, metric['dimensions'] for metric in metrics}
resource_id,
mappings, def _format_data(self, metconf, data, resources_info=None):
"""Formats Monasca data to CK data.
Returns metadata, groupby and qty
"""
groupby = data['dimensions']
resource_key = metconf['extra_args']['resource_key']
metadata = dict()
if resources_info:
resource = resources_info[groupby[resource_key]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['statistics'][0][1]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
mutated_qty = ck_utils.mutate(converted_qty, metconf['mutate'])
return metadata, groupby, mutated_qty
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
met = self.conf[metric_name]
data = self._fetch_measures(
metric_name,
start,
end,
project_id=project_id,
q_filter=q_filter,
)
resources_info = None
if met['metadata']:
resources_info = self._fetch_metrics(
metric_name,
start, start,
end, end,
resource_type, project_id=project_id,
q_filter=q_filter,
) )
resource_qty = qty
if not (isinstance(qty, int) or isinstance(qty, decimal.Decimal)):
resource_qty = METRICS_CONF['services_objects'][resource_type]
resource_qty = data[resource_qty]
resource = self.t_cloudkitty.format_item(data, unit, resource_qty) formated_resources = list()
resource['desc']['resource_id'] = resource_id for d in data:
resource['resource_id'] = resource_id if len(d['statistics']):
resource_data.append(resource) metadata, groupby, qty = self._format_data(
return resource_data met, d, resources_info)
data = self.t_cloudkitty.format_item(
def retrieve(self, resource_type, start, end, project_id, q_filter=None): groupby,
resources = self.resource_info(resource_type, start, end, metadata,
project_id=project_id, met['unit'],
q_filter=q_filter) qty=qty,
if not resources: )
raise collector.NoDataCollected(self.collector_name, resource_type) formated_resources.append(data)
return self.t_cloudkitty.format_service(resource_type, resources) return formated_resources

View File

@ -22,6 +22,7 @@ import cloudkitty.collector.monasca
import cloudkitty.config import cloudkitty.config
import cloudkitty.fetcher import cloudkitty.fetcher
import cloudkitty.fetcher.keystone import cloudkitty.fetcher.keystone
import cloudkitty.fetcher.source
import cloudkitty.orchestrator import cloudkitty.orchestrator
import cloudkitty.service import cloudkitty.service
import cloudkitty.storage import cloudkitty.storage
@ -34,7 +35,7 @@ _opts = [
('api', list(itertools.chain( ('api', list(itertools.chain(
cloudkitty.api.app.api_opts,))), cloudkitty.api.app.api_opts,))),
('collect', list(itertools.chain( ('collect', list(itertools.chain(
cloudkitty.utils.collect_opts))), cloudkitty.collector.collect_opts))),
('collector_monasca', list(itertools.chain( ('collector_monasca', list(itertools.chain(
cloudkitty.collector.monasca.collector_monasca_opts))), cloudkitty.collector.monasca.collector_monasca_opts))),
('gnocchi_collector', list(itertools.chain( ('gnocchi_collector', list(itertools.chain(
@ -43,6 +44,8 @@ _opts = [
('keystone_fetcher', list(itertools.chain( ('keystone_fetcher', list(itertools.chain(
cloudkitty.fetcher.keystone.keystone_fetcher_opts, cloudkitty.fetcher.keystone.keystone_fetcher_opts,
cloudkitty.fetcher.keystone.keystone_common_opts))), cloudkitty.fetcher.keystone.keystone_common_opts))),
('source_fetcher', list(itertools.chain(
cloudkitty.fetcher.source.source_fetcher_opts))),
('orchestrator', list(itertools.chain( ('orchestrator', list(itertools.chain(
cloudkitty.orchestrator.orchestrator_opts))), cloudkitty.orchestrator.orchestrator_opts))),
('output', list(itertools.chain( ('output', list(itertools.chain(
@ -50,7 +53,7 @@ _opts = [
('state', list(itertools.chain( ('state', list(itertools.chain(
cloudkitty.config.state_opts))), cloudkitty.config.state_opts))),
('storage', list(itertools.chain( ('storage', list(itertools.chain(
cloudkitty.utils.storage_opts))), cloudkitty.storage.storage_opts))),
('storage_gnocchi', list(itertools.chain( ('storage_gnocchi', list(itertools.chain(
cloudkitty.storage.hybrid.backends.gnocchi.gnocchi_storage_opts))), cloudkitty.storage.hybrid.backends.gnocchi.gnocchi_storage_opts))),
('fetcher', list(itertools.chain( ('fetcher', list(itertools.chain(

View File

@ -1,124 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Martin CAMEY
#
DEFAULT_METRICS_CONF = {
'name': 'OpenStack',
'fetcher': 'keystone',
'collector': 'gnocchi',
'period': 3600,
'wait_periods': 2,
'window': 1800,
'services_objects': {
'compute': 'instance',
'volume': 'volume',
'network.bw.out': 'instance_network_interface',
'network.bw.in': 'instance_network_interface',
'network.floating': 'network',
'image': 'image',
'radosgw.usage': 'ceph_account',
},
'metrics': {
'vcpus': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'memory': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'cpu': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'disk.root.size': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'disk.ephemeral.size': {
'resource': 'instance',
'unit': 'instance',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'image.size': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'image.download': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'image.serve': {
'resource': 'image',
'unit': 'MiB',
'factor': 1 / 1048576,
'aggregation_method': 'max',
},
'volume.size': {
'resource': 'volume',
'unit': 'GiB',
'factor': 1,
'aggregation_method': 'max',
},
'network.outgoing.bytes': {
'resource': 'instance_network_interface',
'unit': 'MB',
'factor': 1 / 1000000,
'aggregation_method': 'max',
},
'network.incoming.bytes': {
'resource': 'instance_network_interface',
'unit': 'MB',
'factor': 1 / 1000000,
'aggregation_method': 'max',
},
'ip.floating': {
'resource': 'network',
'unit': 'ip',
'factor': 1,
'aggregation_method': 'max',
'countable_unit': True,
},
'radosgw.objects.size': {
'resource': 'ceph_account',
'unit': 'GiB',
'factor': 1 / 1073741824,
'aggregation_method': 'max',
},
},
}

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2015 Objectif Libre # Copyright 2015 Objectif Libre
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -21,13 +20,15 @@ import abc
from oslo_config import cfg from oslo_config import cfg
import six import six
FETCHER_OPTS = 'fetcher'
DEPRECATED_FETCHER_OPTS = 'tenant_fetcher'
fetchers_opts = [ fetchers_opts = [
cfg.StrOpt('backend', cfg.StrOpt('backend',
default='keystone', default='keystone',
help='Driver used to fetch tenant list.') help='Driver used to fetch tenant list.',
deprecated_group=DEPRECATED_FETCHER_OPTS)
] ]
cfg.CONF.register_opts(fetchers_opts, 'fetcher')
cfg.CONF.register_opts(fetchers_opts, 'tenant_fetcher')
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)

View File

@ -61,18 +61,18 @@ class KeystoneFetcher(fetcher.BaseFetcher):
session=self.session, session=self.session,
auth_url=self.auth.auth_url) auth_url=self.auth.auth_url)
def get_tenants(self, conf=None): def get_tenants(self):
keystone_version = discover.normalize_version_number( keystone_version = discover.normalize_version_number(
CONF.keystone_fetcher.keystone_version) CONF.keystone_fetcher.keystone_version)
auth_dispatch = {(3,): ('project', 'projects', 'list'), auth_dispatch = {(3,): ('project', 'projects', 'list'),
(2,): ('tenant', 'tenants', 'roles_for_user')} (2,): ('tenant', 'tenants', 'roles_for_user')}
for auth_version, auth_version_mapping in auth_dispatch.items(): for auth_version, auth_version_mapping in auth_dispatch.items():
if discover.version_match(auth_version, keystone_version): if discover.version_match(auth_version, keystone_version):
return self._do_get_tenants(auth_version_mapping, conf) return self._do_get_tenants(auth_version_mapping)
msg = "Keystone version you've specified is not supported" msg = "Keystone version you've specified is not supported"
raise exceptions.VersionNotAvailable(msg) raise exceptions.VersionNotAvailable(msg)
def _do_get_tenants(self, auth_version_mapping, conf): def _do_get_tenants(self, auth_version_mapping):
tenant_attr, tenants_attr, role_func = auth_version_mapping tenant_attr, tenants_attr, role_func = auth_version_mapping
tenant_list = getattr(self.admin_ks, tenants_attr).list() tenant_list = getattr(self.admin_ks, tenants_attr).list()
my_user_id = self.session.get_user_id() my_user_id = self.session.get_user_id()
@ -82,8 +82,4 @@ class KeystoneFetcher(fetcher.BaseFetcher):
tenant_attr: tenant}) tenant_attr: tenant})
if 'rating' not in [role.name for role in roles]: if 'rating' not in [role.name for role in roles]:
tenant_list.remove(tenant) tenant_list.remove(tenant)
if conf: return [tenant.id for tenant in tenant_list]
res = [{'tenant_id': tenant.id} for tenant in tenant_list]
for tenant in res:
tenant.update(conf)
return res

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# !/usr/bin/env python
# Copyright 2015 Objectif Libre # Copyright 2015 Objectif Libre
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,22 +15,25 @@
# #
# @author: Martin CAMEY # @author: Martin CAMEY
# #
import hashlib from oslo_config import cfg
from cloudkitty import fetcher from cloudkitty import fetcher
SOURCE_FETCHER_OPTS = 'source_fetcher'
source_fetcher_opts = [
cfg.ListOpt('sources',
default=list(),
help='list of source identifiers'), ]
cfg.CONF.register_opts(source_fetcher_opts, SOURCE_FETCHER_OPTS)
CONF = cfg.CONF
class SourceFetcher(fetcher.BaseFetcher): class SourceFetcher(fetcher.BaseFetcher):
"""Source projects fetcher.""" """Source projects fetcher."""
name = 'source' name = 'source'
def get_projects(self, conf=None): def get_tenants(self):
if conf: return CONF.source_fetcher.sources
tmp = hashlib.md5()
tmp.update(conf['name'])
conf['tenant_id'] = tmp.hexdigest()
return [conf]
def get_tenants(self, conf=None):
return self.get_projects(conf=conf)

View File

@ -41,7 +41,6 @@ eventlet.monkey_patch()
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('backend', 'cloudkitty.fetcher', 'tenant_fetcher')
orchestrator_opts = [ orchestrator_opts = [
cfg.StrOpt('coordination_url', cfg.StrOpt('coordination_url',
@ -51,9 +50,9 @@ orchestrator_opts = [
] ]
CONF.register_opts(orchestrator_opts, group='orchestrator') CONF.register_opts(orchestrator_opts, group='orchestrator')
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) CONF.import_opt('backend', 'cloudkitty.fetcher', 'fetcher')
FETCHERS_NAMESPACE = 'cloudkitty.tenant.fetchers' FETCHERS_NAMESPACE = 'cloudkitty.fetchers'
PROCESSORS_NAMESPACE = 'cloudkitty.rating.processors' PROCESSORS_NAMESPACE = 'cloudkitty.rating.processors'
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends' COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
STORAGES_NAMESPACE = 'cloudkitty.storage.backends' STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
@ -153,13 +152,13 @@ class APIWorker(BaseWorker):
class Worker(BaseWorker): class Worker(BaseWorker):
def __init__(self, collector, storage, tenant): def __init__(self, collector, storage, tenant_id):
self._collector = collector self._collector = collector
self._storage = storage self._storage = storage
self._period = tenant['period'] self._period = CONF.collect.period
self._wait_time = tenant['wait_periods'] * self._period self._wait_time = CONF.collect.wait_periods * self._period
self._tenant_id = tenant['tenant_id'] self._tenant_id = tenant_id
self.conf = tenant self._conf = ck_utils.load_conf(CONF.collect.metrics_conf)
super(Worker, self).__init__(self._tenant_id) super(Worker, self).__init__(self._tenant_id)
@ -182,7 +181,7 @@ class Worker(BaseWorker):
timestamp = self._storage.get_state(self._tenant_id) timestamp = self._storage.get_state(self._tenant_id)
return ck_utils.check_time_state(timestamp, return ck_utils.check_time_state(timestamp,
self._period, self._period,
self._wait_time) CONF.collect.wait_periods)
def run(self): def run(self):
while True: while True:
@ -190,7 +189,7 @@ class Worker(BaseWorker):
if not timestamp: if not timestamp:
break break
metrics = list(self.conf['metrics'].keys()) metrics = list(self._conf['metrics'].keys())
for metric in metrics: for metric in metrics:
try: try:
@ -225,8 +224,8 @@ class Orchestrator(object):
def __init__(self): def __init__(self):
self.fetcher = driver.DriverManager( self.fetcher = driver.DriverManager(
FETCHERS_NAMESPACE, FETCHERS_NAMESPACE,
METRICS_CONF['fetcher'], CONF.fetcher.backend,
invoke_on_load=True invoke_on_load=True,
).driver ).driver
transformers = transformer.get_transformers() transformers = transformer.get_transformers()
@ -258,11 +257,11 @@ class Orchestrator(object):
self.server = messaging.get_server(target, endpoints) self.server = messaging.get_server(target, endpoints)
self.server.start() self.server.start()
def _check_state(self, tenant_id, period, wait_time): def _check_state(self, tenant_id):
timestamp = self.storage.get_state(tenant_id) timestamp = self.storage.get_state(tenant_id)
return ck_utils.check_time_state(timestamp, return ck_utils.check_time_state(timestamp,
period, CONF.collect.period,
wait_time) CONF.collect.wait_periods)
def process_messages(self): def process_messages(self):
# TODO(sheeprine): Code kept to handle threading and asynchronous # TODO(sheeprine): Code kept to handle threading and asynchronous
@ -273,36 +272,31 @@ class Orchestrator(object):
def process(self): def process(self):
while True: while True:
self.tenants = self.fetcher.get_tenants(METRICS_CONF) self.tenants = self.fetcher.get_tenants()
random.shuffle(self.tenants) random.shuffle(self.tenants)
LOG.info('Tenants loaded for fetcher %s', self.fetcher.name) LOG.info('Tenants loaded for fetcher %s', self.fetcher.name)
for tenant in self.tenants: for tenant_id in self.tenants:
lock = self._lock(tenant['tenant_id'])
lock = self._lock(tenant_id)
if lock.acquire(blocking=False): if lock.acquire(blocking=False):
state = self._check_state( state = self._check_state(tenant_id)
tenant['tenant_id'], if state:
tenant['period'],
tenant['wait_periods'],
)
if not state:
self.tenants.remove(tenant)
else:
worker = Worker( worker = Worker(
self.collector, self.collector,
self.storage, self.storage,
tenant, tenant_id,
) )
worker.run() worker.run()
lock.release() lock.release()
self.coord.heartbeat() self.coord.heartbeat()
# NOTE(sheeprine): Slow down looping if all tenants are # NOTE(sheeprine): Slow down looping if all tenants are
# being processed # being processed
eventlet.sleep(1) eventlet.sleep(1)
# FIXME(sheeprine): We may cause a drift here # FIXME(sheeprine): We may cause a drift here
eventlet.sleep(tenant['period']) eventlet.sleep(CONF.collect.period)
def terminate(self): def terminate(self):
self.coord.stop() self.coord.stop()

View File

@ -25,21 +25,28 @@ from stevedore import driver
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
storage_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
help='Name of the storage backend driver.')
]
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
# NOTE(mc): This hack is possible because only CONF.import_opt('period', 'cloudkitty.collector', 'collect')
# one OpenStack configuration is allowed.
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf) CONF.register_opts(storage_opts, 'storage')
STORAGES_NAMESPACE = 'cloudkitty.storage.backends' STORAGES_NAMESPACE = 'cloudkitty.storage.backends'
def get_storage(): def get_storage(**kwargs):
storage_args = { storage_args = {
'period': METRICS_CONF.get('period', 3600), 'period': CONF.collect.period,
} }
storage_args.update(kwargs)
backend = driver.DriverManager( backend = driver.DriverManager(
STORAGES_NAMESPACE, STORAGES_NAMESPACE,
cfg.CONF.storage.backend, cfg.CONF.storage.backend,

View File

@ -27,8 +27,8 @@ from oslo_log import log as logging
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six import six
from cloudkitty.collector import validate_conf
from cloudkitty.storage.hybrid.backends import BaseHybridBackend from cloudkitty.storage.hybrid.backends import BaseHybridBackend
from cloudkitty.transformer import gnocchi as gtransformer
import cloudkitty.utils as ck_utils import cloudkitty.utils as ck_utils
@ -36,10 +36,6 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
# NOTE(mc): This hack is possible because only
# one OpenStack configuration is allowed.
METRICS_CONF = ck_utils.get_metrics_conf(CONF.collect.metrics_conf)
CONF.import_opt('period', 'cloudkitty.collector', 'collect') CONF.import_opt('period', 'cloudkitty.collector', 'collect')
GNOCCHI_STORAGE_OPTS = 'storage_gnocchi' GNOCCHI_STORAGE_OPTS = 'storage_gnocchi'
@ -53,7 +49,7 @@ gnocchi_storage_opts = [
# The archive policy definition MUST include the collect period granularity # The archive policy definition MUST include the collect period granularity
cfg.StrOpt('archive_policy_definition', cfg.StrOpt('archive_policy_definition',
default='[{"granularity": ' default='[{"granularity": '
+ six.text_type(METRICS_CONF.get('period', 3600)) + + six.text_type(CONF.collect.period) +
', "timespan": "90 days"}, ' ', "timespan": "90 days"}, '
'{"granularity": 86400, "timespan": "360 days"}, ' '{"granularity": 86400, "timespan": "360 days"}, '
'{"granularity": 2592000, "timespan": "1800 days"}]', '{"granularity": 2592000, "timespan": "1800 days"}]',
@ -68,6 +64,7 @@ ks_loading.register_auth_conf_options(
GNOCCHI_STORAGE_OPTS) GNOCCHI_STORAGE_OPTS)
RESOURCE_TYPE_NAME_ROOT = 'rating_service_' RESOURCE_TYPE_NAME_ROOT = 'rating_service_'
METADATA_NAME_ROOT = 'ckmeta_'
class DecimalJSONEncoder(json.JSONEncoder): class DecimalJSONEncoder(json.JSONEncoder):
@ -92,34 +89,27 @@ class GnocchiStorage(BaseHybridBackend):
""" """
# NOTE(lukapeschke): List taken directly from gnocchi code groupby_keys = ['res_type', 'tenant_id']
invalid_attribute_names = [ groupby_values = ['type', 'project_id']
"id", "type", "metrics",
"revision", "revision_start", "revision_end",
"started_at", "ended_at",
"user_id", "project_id",
"created_by_user_id", "created_by_project_id", "get_metric",
"creator",
]
def _init_resource_types(self): def _init_resource_types(self):
transformer = gtransformer.GnocchiTransformer() for metric_name, metric in self.conf.items():
for metric in list(self.conf['metrics'].keys()):
metric_dict = dict() metric_dict = dict()
metric_dict['attributes'] = list() metric_dict['attributes'] = list()
for attribute in transformer.get_metadata(metric): for attribute in metric.get('metadata', {}):
if attribute not in self.invalid_attribute_names: metric_dict['attributes'].append(
metric_dict['attributes'].append(attribute) METADATA_NAME_ROOT + attribute)
metric_dict['required_attributes'] = [ metric_dict['required_attributes'] = ['unit', 'resource_id']
'resource_id', for attribute in metric['groupby']:
'unit', metric_dict['required_attributes'].append(
] METADATA_NAME_ROOT + attribute)
metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric
metric_dict['qty_metric'] = 1 metric_dict['name'] = RESOURCE_TYPE_NAME_ROOT + metric['alt_name']
if self.conf['metrics'][metric].get('countable_unit'): if metric['mutate'] == 'NUMBOOL':
resource = self.conf['metrics'][metric]['resource'] metric_dict['qty_metric'] = 1
metric_dict['qty_metric'] = resource else:
self._resource_type_data[metric] = metric_dict metric_dict['qty_metric'] = metric_name
self._resource_type_data[metric['alt_name']] = metric_dict
def _get_res_type_dict(self, res_type): def _get_res_type_dict(self, res_type):
res_type_data = self._resource_type_data.get(res_type, None) res_type_data = self._resource_type_data.get(res_type, None)
@ -148,16 +138,19 @@ class GnocchiStorage(BaseHybridBackend):
"Unknown resource type '{}'".format(res_type)) "Unknown resource type '{}'".format(res_type))
res_dict = { res_dict = {
'id': data['resource_id'], 'id': data['id'],
'resource_id': data['resource_id'], 'resource_id': data['id'],
'project_id': tenant_id, 'project_id': tenant_id,
'user_id': data['user_id'], 'user_id': 'cloudkitty',
'unit': data['unit'], 'unit': data['unit'],
} }
for attr in res_type_data['attributes']: for key in ['attributes', 'required_attributes']:
res_dict[attr] = data.get(attr, None) or 'None' for attr in res_type_data[key]:
if isinstance(res_dict[attr], decimal.Decimal): if METADATA_NAME_ROOT in attr:
res_dict[attr] = float(res_dict[attr]) res_dict[attr] = data.get(
attr.replace(METADATA_NAME_ROOT, ''), None) or ''
if isinstance(res_dict[attr], decimal.Decimal):
res_dict[attr] = float(res_dict[attr])
created_metrics = [ created_metrics = [
self._conn.metric.create({ self._conn.metric.create({
@ -224,7 +217,9 @@ class GnocchiStorage(BaseHybridBackend):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(GnocchiStorage, self).__init__(**kwargs) super(GnocchiStorage, self).__init__(**kwargs)
self.conf = kwargs['conf'] if 'conf' in kwargs else METRICS_CONF conf = kwargs.get('conf') or ck_utils.load_conf(
CONF.collect.metrics_conf)
self.conf = validate_conf(conf)
self.auth = ks_loading.load_auth_from_conf_options( self.auth = ks_loading.load_auth_from_conf_options(
CONF, CONF,
GNOCCHI_STORAGE_OPTS) GNOCCHI_STORAGE_OPTS)
@ -241,9 +236,7 @@ class GnocchiStorage(BaseHybridBackend):
CONF.storage_gnocchi.archive_policy_name) CONF.storage_gnocchi.archive_policy_name)
self._archive_policy_definition = json.loads( self._archive_policy_definition = json.loads(
CONF.storage_gnocchi.archive_policy_definition) CONF.storage_gnocchi.archive_policy_definition)
self._period = self.conf['period'] self._period = kwargs.get('period') or CONF.collect.period
if "period" in kwargs:
self._period = kwargs["period"]
self._measurements = dict() self._measurements = dict()
self._resource_type_data = dict() self._resource_type_data = dict()
self._init_resource_types() self._init_resource_types()
@ -288,21 +281,57 @@ class GnocchiStorage(BaseHybridBackend):
def get_total(self, begin=None, end=None, tenant_id=None, def get_total(self, begin=None, end=None, tenant_id=None,
service=None, groupby=None): service=None, groupby=None):
# Query can't be None if we don't specify a resource_id # Query can't be None if we don't specify a resource_id
query = {} query = {'and': [{
'like': {'type': RESOURCE_TYPE_NAME_ROOT + '%'},
}]}
if tenant_id: if tenant_id:
query['='] = {"project_id": tenant_id} query['and'].append({'=': {'project_id': tenant_id}})
measures = self._conn.metric.aggregation(
metrics='price', query=query, gb = []
start=begin, stop=end, if groupby:
aggregation='sum', for elem in groupby.split(','):
granularity=self._period, if elem in self.groupby_keys:
needed_overlap=0) gb.append(self.groupby_values[
rate = sum(measure[2] for measure in measures) if len(measures) else 0 self.groupby_keys.index(elem)])
return [{ # Setting gb to None instead of an empty list
'begin': begin, gb = gb if len(gb) > 0 else None
'end': end,
'rate': rate, # build aggregration operation
}] op = ['aggregate', 'sum', ['metric', 'price', 'sum']]
try:
aggregates = self._conn.aggregates.fetch(
op,
start=begin,
stop=end,
groupby=gb,
search=query)
# No 'price' metric found
except gexceptions.BadRequest:
return [dict(begin=begin, end=end, rate=0)]
# In case no group_by was specified
if not isinstance(aggregates, list):
aggregates = [aggregates]
total_list = list()
for aggregate in aggregates:
if groupby:
measures = aggregate['measures']['measures']['aggregated']
else:
measures = aggregate['measures']['aggregated']
if len(measures) > 0:
rate = sum(measure[2] for measure in measures
if (measure[1] == self._period))
total = dict(begin=begin, end=end, rate=rate)
if gb:
for value in gb:
key = self.groupby_keys[
self.groupby_values.index(value)]
total[key] = aggregate['group'][value].replace(
RESOURCE_TYPE_NAME_ROOT, '')
total_list.append(total)
return total_list
def _append_measurements(self, resource, data, tenant_id): def _append_measurements(self, resource, data, tenant_id):
if not self._measurements.get(tenant_id, None): if not self._measurements.get(tenant_id, None):
@ -322,7 +351,7 @@ class GnocchiStorage(BaseHybridBackend):
def append_time_frame(self, res_type, frame, tenant_id): def append_time_frame(self, res_type, frame, tenant_id):
flat_frame = ck_utils.flat_dict(frame) flat_frame = ck_utils.flat_dict(frame)
resource = self._find_resource(res_type, flat_frame['resource_id']) resource = self._find_resource(res_type, flat_frame['id'])
if not resource: if not resource:
resource = self._create_resource(res_type, tenant_id, flat_frame) resource = self._create_resource(res_type, tenant_id, flat_frame)
self._append_measurements(resource, flat_frame, tenant_id) self._append_measurements(resource, flat_frame, tenant_id)
@ -441,7 +470,8 @@ class GnocchiStorage(BaseHybridBackend):
resource_type, resource_measures['group']['id']) resource_type, resource_measures['group']['id'])
if not resource: if not resource:
continue continue
desc = {a: resource.get(a, None) for a in attributes} desc = {attr.replace(METADATA_NAME_ROOT, ''):
resource.get(attr, None) for attr in attributes}
formatted_frame = self._format_frame( formatted_frame = self._format_frame(
resource_type, resource, desc, measure, tenant_id) resource_type, resource, desc, measure, tenant_id)
output.append(formatted_frame) output.append(formatted_frame)

View File

@ -43,6 +43,7 @@ from cloudkitty import rating
from cloudkitty import storage from cloudkitty import storage
from cloudkitty.storage.sqlalchemy import models from cloudkitty.storage.sqlalchemy import models
from cloudkitty import tests from cloudkitty import tests
from cloudkitty.tests import test_utils
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
@ -278,7 +279,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
"begin": begin, "begin": begin,
"end": end}, "end": end},
"usage": { "usage": {
"compute": [ "cpu": [
{ {
"desc": { "desc": {
"dummy": True, "dummy": True,
@ -292,7 +293,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
"begin": begin, "begin": begin,
"end": end}, "end": end},
"usage": { "usage": {
"image": [ "image.size": [
{ {
"desc": { "desc": {
"dummy": True, "dummy": True,
@ -313,7 +314,7 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
return_value=dict()) return_value=dict())
with auth: with auth:
with session: with session:
self.storage = storage.get_storage() self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init() self.storage.init()
self.initialize_data() self.initialize_data()
@ -391,14 +392,14 @@ class MetricsConfFixture(fixture.GabbiFixture):
"""Inject Metrics configuration mock to the get_metrics_conf() function""" """Inject Metrics configuration mock to the get_metrics_conf() function"""
def start_fixture(self): def start_fixture(self):
self._original_function = ck_utils.get_metrics_conf self._original_function = ck_utils.load_conf
ck_utils.get_metrics_conf = mock.Mock( ck_utils.load_conf = mock.Mock(
return_value=tests.samples.METRICS_CONF, return_value=tests.samples.METRICS_CONF,
) )
def stop_fixture(self): def stop_fixture(self):
"""Remove the get_metrics_conf() monkeypatch.""" """Remove the get_metrics_conf() monkeypatch."""
ck_utils.get_metrics_conf = self._original_function ck_utils.load_conf = self._original_function
def setup_app(): def setup_app():

View File

@ -123,7 +123,7 @@ tests:
x-roles: admin x-roles: admin
data: data:
resources: resources:
- service: "compute" - service: "cpu"
volume: "1.0" volume: "1.0"
desc: desc:
test: 1 test: 1

View File

@ -7,20 +7,9 @@ tests:
url: /v1/info/config url: /v1/info/config
status: 200 status: 200
response_json_paths: response_json_paths:
$.collector: gnocchi $.metrics.`len`: 7
$.window: 1800 $.metrics['cpu'].unit: instance
$.wait_periods: 2
$.period: 3600
$.metrics.`len`: 13
$.metrics.vcpus.unit: instance
$.metrics.memory.unit: instance
$.metrics.cpu.unit: instance
$.metrics['disk.root.size'].unit: instance
$.metrics['disk.ephemeral.size'].unit: instance
$.metrics['image.size'].unit: MiB $.metrics['image.size'].unit: MiB
$.metrics['image.download'].unit: MiB
$.metrics['image.serve'].unit: MiB
$.metrics['volume.size'].unit: GiB $.metrics['volume.size'].unit: GiB
$.metrics['network.incoming.bytes'].unit: MB $.metrics['network.incoming.bytes'].unit: MB
$.metrics['network.outgoing.bytes'].unit: MB $.metrics['network.outgoing.bytes'].unit: MB
@ -31,38 +20,25 @@ tests:
url: /v1/info/metrics url: /v1/info/metrics
status: 200 status: 200
response_json_paths: response_json_paths:
$.metrics.`len`: 13 $.metrics.`len`: 7
$.metrics[/metric_id][0].metric_id: cpu $.metrics[/metric_id][0].metric_id: image.size
$.metrics[/metric_id][0].unit: instance $.metrics[/metric_id][0].unit: MiB
$.metrics[/metric_id][1].metric_id: disk.ephemeral.size $.metrics[/metric_id][1].metric_id: instance
$.metrics[/metric_id][1].unit: instance $.metrics[/metric_id][1].unit: instance
$.metrics[/metric_id][2].metric_id: disk.root.size $.metrics[/metric_id][2].metric_id: ip.floating
$.metrics[/metric_id][2].unit: instance $.metrics[/metric_id][2].unit: ip
$.metrics[/metric_id][3].metric_id: image.download $.metrics[/metric_id][3].metric_id: network.incoming.bytes
$.metrics[/metric_id][3].unit: MiB $.metrics[/metric_id][3].unit: MB
$.metrics[/metric_id][4].metric_id: image.serve $.metrics[/metric_id][4].metric_id: network.outgoing.bytes
$.metrics[/metric_id][4].unit: MiB $.metrics[/metric_id][4].unit: MB
$.metrics[/metric_id][5].metric_id: image.size $.metrics[/metric_id][5].metric_id: radosgw.objects.size
$.metrics[/metric_id][5].unit: MiB $.metrics[/metric_id][5].unit: GiB
$.metrics[/metric_id][6].metric_id: ip.floating $.metrics[/metric_id][6].metric_id: volume.size
$.metrics[/metric_id][6].unit: ip $.metrics[/metric_id][6].unit: GiB
$.metrics[/metric_id][7].metric_id: memory
$.metrics[/metric_id][7].unit: instance
$.metrics[/metric_id][8].metric_id: network.incoming.bytes
$.metrics[/metric_id][8].unit: MB
$.metrics[/metric_id][9].metric_id: network.outgoing.bytes
$.metrics[/metric_id][9].unit: MB
$.metrics[/metric_id][10].metric_id: radosgw.objects.size
$.metrics[/metric_id][10].unit: GiB
$.metrics[/metric_id][11].metric_id: vcpus
$.metrics[/metric_id][11].unit: instance
$.metrics[/metric_id][12].metric_id: volume.size
$.metrics[/metric_id][12].unit: GiB
- name: get cpu metric info - name: get cpu metric info
url: /v1/info/metrics/cpu url: /v1/info/metrics/instance
status: 200 status: 200
response_json_paths: response_json_paths:
$.metric_id: cpu $.metric_id: instance
$.unit: instance $.unit: instance
$.metadata.`len`: 4

View File

@ -123,7 +123,7 @@ tests:
x-roles: admin x-roles: admin
data: data:
resources: resources:
- service: "compute" - service: "cpu"
volume: "1.0" volume: "1.0"
desc: desc:
test: 1 test: 1

View File

@ -73,7 +73,7 @@ tests:
query_parameters: query_parameters:
begin: "2015-01-01T00:00:00" begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00" end: "2015-02-04T00:00:00"
service: "compute" service: "cpu"
status: 200 status: 200
response_strings: response_strings:
- "110.971" - "110.971"
@ -83,7 +83,7 @@ tests:
query_parameters: query_parameters:
begin: "2015-01-01T00:00:00" begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00" end: "2015-02-04T00:00:00"
service: "image" service: "image.size"
status: 200 status: 200
response_strings: response_strings:
- "10.043" - "10.043"
@ -94,7 +94,7 @@ tests:
begin: "2015-01-01T00:00:00" begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00" end: "2015-02-04T00:00:00"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
service: "compute" service: "cpu"
status: 200 status: 200
response_strings: response_strings:
- "37.436" - "37.436"
@ -133,19 +133,19 @@ tests:
query_parameters: query_parameters:
begin: "2015-01-01T00:00:00" begin: "2015-01-01T00:00:00"
end: "2015-02-04T00:00:00" end: "2015-02-04T00:00:00"
service: "compute" service: "cpu"
groupby: "tenant_id" groupby: "tenant_id"
status: 200 status: 200
response_json_paths: response_json_paths:
$.summary.`len`: 2 $.summary.`len`: 2
$.summary[0].rate: "37.436" $.summary[0].rate: "37.436"
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].res_type: "compute" $.summary[0].res_type: "cpu"
$.summary[0].begin: "2015-01-01T00:00:00" $.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00" $.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535" $.summary[1].rate: "73.535"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "compute" $.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00" $.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00" $.summary[1].end: "2015-02-04T00:00:00"
@ -159,12 +159,12 @@ tests:
response_json_paths: response_json_paths:
$.summary.`len`: 2 $.summary.`len`: 2
$.summary[0].rate: "10.043" $.summary[0].rate: "10.043"
$.summary[0].res_type: "image" $.summary[0].res_type: "image.size"
$.summary[0].tenant_id: "ALL" $.summary[0].tenant_id: "ALL"
$.summary[0].begin: "2015-01-01T00:00:00" $.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00" $.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "110.971" $.summary[1].rate: "110.971"
$.summary[1].res_type: "compute" $.summary[1].res_type: "cpu"
$.summary[1].tenant_id: "ALL" $.summary[1].tenant_id: "ALL"
$.summary[1].begin: "2015-01-01T00:00:00" $.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00" $.summary[1].end: "2015-02-04T00:00:00"
@ -181,12 +181,12 @@ tests:
$.summary.`len`: 2 $.summary.`len`: 2
$.summary[0].rate: "6.655" $.summary[0].rate: "6.655"
$.summary[0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.summary[0].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[0].res_type: "image" $.summary[0].res_type: "image.size"
$.summary[0].begin: "2015-01-01T00:00:00" $.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00" $.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "73.535" $.summary[1].rate: "73.535"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].res_type: "compute" $.summary[1].res_type: "cpu"
$.summary[1].begin: "2015-01-01T00:00:00" $.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00" $.summary[1].end: "2015-02-04T00:00:00"
@ -200,22 +200,22 @@ tests:
response_json_paths: response_json_paths:
$.summary.`len`: 4 $.summary.`len`: 4
$.summary[0].rate: "3.388" $.summary[0].rate: "3.388"
$.summary[0].res_type: "image" $.summary[0].res_type: "image.size"
$.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.summary[0].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[0].begin: "2015-01-01T00:00:00" $.summary[0].begin: "2015-01-01T00:00:00"
$.summary[0].end: "2015-02-04T00:00:00" $.summary[0].end: "2015-02-04T00:00:00"
$.summary[1].rate: "6.655" $.summary[1].rate: "6.655"
$.summary[1].res_type: "image" $.summary[1].res_type: "image.size"
$.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.summary[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[1].begin: "2015-01-01T00:00:00" $.summary[1].begin: "2015-01-01T00:00:00"
$.summary[1].end: "2015-02-04T00:00:00" $.summary[1].end: "2015-02-04T00:00:00"
$.summary[2].rate: "37.436" $.summary[2].rate: "37.436"
$.summary[2].res_type: "compute" $.summary[2].res_type: "cpu"
$.summary[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.summary[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
$.summary[2].begin: "2015-01-01T00:00:00" $.summary[2].begin: "2015-01-01T00:00:00"
$.summary[2].end: "2015-02-04T00:00:00" $.summary[2].end: "2015-02-04T00:00:00"
$.summary[3].rate: "73.535" $.summary[3].rate: "73.535"
$.summary[3].res_type: "compute" $.summary[3].res_type: "cpu"
$.summary[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.summary[3].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
$.summary[3].begin: "2015-01-01T00:00:00" $.summary[3].begin: "2015-01-01T00:00:00"
$.summary[3].end: "2015-02-04T00:00:00" $.summary[3].end: "2015-02-04T00:00:00"

View File

@ -74,7 +74,7 @@ tests:
$.dataframes[0].resources.`len`: 1 $.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1" $.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337" $.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute" $.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true $.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0 $.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
@ -83,7 +83,7 @@ tests:
$.dataframes[1].resources.`len`: 1 $.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1" $.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121" $.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image" $.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true $.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0 $.dataframes[1].resources[0].desc.fake_meta: 1.0
@ -102,7 +102,7 @@ tests:
$.dataframes[0].resources.`len`: 1 $.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1" $.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337" $.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute" $.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true $.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0 $.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.dataframes[1].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -111,7 +111,7 @@ tests:
$.dataframes[1].resources.`len`: 1 $.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1" $.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121" $.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image" $.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true $.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0 $.dataframes[1].resources[0].desc.fake_meta: 1.0
@ -129,7 +129,7 @@ tests:
$.dataframes[0].resources.`len`: 1 $.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1" $.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337" $.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute" $.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true $.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0 $.dataframes[0].resources[0].desc.fake_meta: 1.0
$.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375" $.dataframes[1].tenant_id: "8f82cc70-e50c-466e-8624-24bdea811375"
@ -138,7 +138,7 @@ tests:
$.dataframes[1].resources.`len`: 1 $.dataframes[1].resources.`len`: 1
$.dataframes[1].resources[0].volume: "1" $.dataframes[1].resources[0].volume: "1"
$.dataframes[1].resources[0].rating: "0.121" $.dataframes[1].resources[0].rating: "0.121"
$.dataframes[1].resources[0].service: "image" $.dataframes[1].resources[0].service: "image.size"
$.dataframes[1].resources[0].desc.dummy: true $.dataframes[1].resources[0].desc.dummy: true
$.dataframes[1].resources[0].desc.fake_meta: 1.0 $.dataframes[1].resources[0].desc.fake_meta: 1.0
$.dataframes[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.dataframes[2].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -147,7 +147,7 @@ tests:
$.dataframes[2].resources.`len`: 1 $.dataframes[2].resources.`len`: 1
$.dataframes[2].resources[0].volume: "1" $.dataframes[2].resources[0].volume: "1"
$.dataframes[2].resources[0].rating: "1.337" $.dataframes[2].resources[0].rating: "1.337"
$.dataframes[2].resources[0].service: "compute" $.dataframes[2].resources[0].service: "cpu"
$.dataframes[2].resources[0].desc.dummy: true $.dataframes[2].resources[0].desc.dummy: true
$.dataframes[2].resources[0].desc.fake_meta: 1.0 $.dataframes[2].resources[0].desc.fake_meta: 1.0
$.dataframes[3].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" $.dataframes[3].tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
@ -156,16 +156,16 @@ tests:
$.dataframes[3].resources.`len`: 1 $.dataframes[3].resources.`len`: 1
$.dataframes[3].resources[0].volume: "1" $.dataframes[3].resources[0].volume: "1"
$.dataframes[3].resources[0].rating: "0.121" $.dataframes[3].resources[0].rating: "0.121"
$.dataframes[3].resources[0].service: "image" $.dataframes[3].resources[0].service: "image.size"
$.dataframes[3].resources[0].desc.dummy: true $.dataframes[3].resources[0].desc.dummy: true
$.dataframes[3].resources[0].desc.fake_meta: 1.0 $.dataframes[3].resources[0].desc.fake_meta: 1.0
- name: fetch data filtering on compute service and tenant - name: fetch data filtering on cpu service and tenant
url: /v1/storage/dataframes url: /v1/storage/dataframes
query_parameters: query_parameters:
begin: "2015-01-04T13:00:00" begin: "2015-01-04T13:00:00"
end: "2015-01-04T14:00:00" end: "2015-01-04T14:00:00"
resource_type: "compute" resource_type: "cpu"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
status: 200 status: 200
response_json_paths: response_json_paths:
@ -176,7 +176,7 @@ tests:
$.dataframes[0].resources.`len`: 1 $.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1" $.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "1.337" $.dataframes[0].resources[0].rating: "1.337"
$.dataframes[0].resources[0].service: "compute" $.dataframes[0].resources[0].service: "cpu"
$.dataframes[0].resources[0].desc.dummy: true $.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0 $.dataframes[0].resources[0].desc.fake_meta: 1.0
@ -185,7 +185,7 @@ tests:
query_parameters: query_parameters:
begin: "2015-01-04T13:00:00" begin: "2015-01-04T13:00:00"
end: "2015-01-04T14:00:00" end: "2015-01-04T14:00:00"
resource_type: "image" resource_type: "image.size"
tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e" tenant_id: "7606a24a-b8ad-4ae0-be6c-3d7a41334a2e"
status: 200 status: 200
response_json_paths: response_json_paths:
@ -196,7 +196,7 @@ tests:
$.dataframes[0].resources.`len`: 1 $.dataframes[0].resources.`len`: 1
$.dataframes[0].resources[0].volume: "1" $.dataframes[0].resources[0].volume: "1"
$.dataframes[0].resources[0].rating: "0.121" $.dataframes[0].resources[0].rating: "0.121"
$.dataframes[0].resources[0].service: "image" $.dataframes[0].resources[0].service: "image.size"
$.dataframes[0].resources[0].desc.dummy: true $.dataframes[0].resources[0].desc.dummy: true
$.dataframes[0].resources[0].desc.fake_meta: 1.0 $.dataframes[0].resources[0].desc.fake_meta: 1.0

View File

@ -40,10 +40,10 @@ tests:
content-type: application/json content-type: application/json
x-roles: admin x-roles: admin
data: data:
name: "compute" name: "cpu"
status: 201 status: 201
response_json_paths: response_json_paths:
$.name: "compute" $.name: "cpu"
response_store_environ: response_store_environ:
hash_error_service_id: $.service_id hash_error_service_id: $.service_id
@ -54,10 +54,10 @@ tests:
content-type: application/json content-type: application/json
x-roles: admin x-roles: admin
data: data:
name: "compute" name: "cpu"
status: 409 status: 409
response_strings: response_strings:
- "Service compute already exists (UUID: $RESPONSE['$.service_id'])" - "Service cpu already exists (UUID: $RESPONSE['$.service_id'])"
- name: create a service mapping with an invalid type - name: create a service mapping with an invalid type
url: /v1/rating/module_config/hashmap/mappings url: /v1/rating/module_config/hashmap/mappings

View File

@ -11,11 +11,11 @@ tests:
content-type: application/json content-type: application/json
x-roles: admin x-roles: admin
data: data:
name: "compute" name: "cpu"
status: 201 status: 201
response_json_paths: response_json_paths:
$.service_id: "6c1b8a30-797f-4b7e-ad66-9879b79059fb" $.service_id: "6c1b8a30-797f-4b7e-ad66-9879b79059fb"
$.name: "compute" $.name: "cpu"
response_headers: response_headers:
location: $SCHEME://$NETLOC/v1/rating/module_config/hashmap/services/6c1b8a30-797f-4b7e-ad66-9879b79059fb location: $SCHEME://$NETLOC/v1/rating/module_config/hashmap/services/6c1b8a30-797f-4b7e-ad66-9879b79059fb

View File

@ -20,10 +20,10 @@ tests:
content-type: application/json content-type: application/json
x-roles: admin x-roles: admin
data: data:
name: "compute" name: "cpu"
status: 201 status: 201
response_json_paths: response_json_paths:
$.name: "compute" $.name: "cpu"
response_store_environ: response_store_environ:
hash_service_id: $.service_id hash_service_id: $.service_id
@ -32,7 +32,7 @@ tests:
status: 200 status: 200
response_json_paths: response_json_paths:
$.service_id: $RESPONSE['$.service_id'] $.service_id: $RESPONSE['$.service_id']
$.name: "compute" $.name: "cpu"
- name: create a flat service mapping - name: create a flat service mapping
url: /v1/rating/module_config/hashmap/mappings url: /v1/rating/module_config/hashmap/mappings
@ -60,7 +60,7 @@ tests:
status: 200 status: 200
response_json_paths: response_json_paths:
$.services.`len`: 1 $.services.`len`: 1
$.services[0].name: "compute" $.services[0].name: "cpu"
- name: create a rate service mapping - name: create a rate service mapping
url: /v1/rating/module_config/hashmap/mappings url: /v1/rating/module_config/hashmap/mappings

View File

@ -18,7 +18,6 @@
import copy import copy
import decimal import decimal
from cloudkitty.default_metrics_conf import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
TENANT = 'f266f30b11f246b589fd266f85eeec39' TENANT = 'f266f30b11f246b589fd266f85eeec39'
@ -37,6 +36,7 @@ COMPUTE_METADATA = {
'flavor': 'm1.nano', 'flavor': 'm1.nano',
'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568', 'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568',
'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17', 'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17',
'id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'resource_id': '1558f911-b55a-4fd2-9173-c8f1f23e5639', 'resource_id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'memory': '64', 'memory': '64',
'metadata': { 'metadata': {
@ -50,6 +50,7 @@ COMPUTE_METADATA = {
IMAGE_METADATA = { IMAGE_METADATA = {
'checksum': '836c69cbcd1dc4f225daedbab6edc7c7', 'checksum': '836c69cbcd1dc4f225daedbab6edc7c7',
'resource_id': '7b5b73f2-9181-4307-a710-b1aa6472526d', 'resource_id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
'id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
'container_format': 'aki', 'container_format': 'aki',
'created_at': '2014-06-04T16:26:01', 'created_at': '2014-06-04T16:26:01',
'deleted': 'False', 'deleted': 'False',
@ -75,7 +76,7 @@ SECOND_PERIOD = {
COLLECTED_DATA = [{ COLLECTED_DATA = [{
'period': FIRST_PERIOD, 'period': FIRST_PERIOD,
'usage': { 'usage': {
'cpu': [{ 'instance': [{
'desc': COMPUTE_METADATA, 'desc': COMPUTE_METADATA,
'vol': { 'vol': {
'qty': decimal.Decimal(1.0), 'qty': decimal.Decimal(1.0),
@ -88,22 +89,134 @@ COLLECTED_DATA = [{
}}, { }}, {
'period': SECOND_PERIOD, 'period': SECOND_PERIOD,
'usage': { 'usage': {
'cpu': [{ 'instance': [{
'desc': COMPUTE_METADATA, 'desc': COMPUTE_METADATA,
'vol': { 'vol': {
'qty': decimal.Decimal(1.0), 'qty': decimal.Decimal(1.0),
'unit': 'instance'}}] 'unit': 'instance'}}]
}}] },
}]
RATED_DATA = copy.deepcopy(COLLECTED_DATA) RATED_DATA = copy.deepcopy(COLLECTED_DATA)
RATED_DATA[0]['usage']['cpu'][0]['rating'] = { RATED_DATA[0]['usage']['instance'][0]['rating'] = {
'price': decimal.Decimal('0.42')} 'price': decimal.Decimal('0.42')}
RATED_DATA[0]['usage']['image.size'][0]['rating'] = { RATED_DATA[0]['usage']['image.size'][0]['rating'] = {
'price': decimal.Decimal('0.1337')} 'price': decimal.Decimal('0.1337')}
RATED_DATA[1]['usage']['cpu'][0]['rating'] = { RATED_DATA[1]['usage']['instance'][0]['rating'] = {
'price': decimal.Decimal('0.42')} 'price': decimal.Decimal('0.42')}
DEFAULT_METRICS_CONF = {
"metrics": {
"cpu": {
"unit": "instance",
"alt_name": "instance",
"groupby": [
"id",
"project_id"
],
"metadata": [
"flavor",
"flavor_id",
"vcpus"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance"
}
},
"image.size": {
"unit": "MiB",
"factor": "1/1048576",
"groupby": [
"id",
"project_id"
],
"metadata": [
"container_format",
"disk_format"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "image"
}
},
"volume.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"metadata": [
"volume_type"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "volume"
}
},
"network.outgoing.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"network.incoming.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"ip.floating": {
"unit": "ip",
"groupby": [
"id",
"project_id"
],
"metadata": [
"state"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "network"
}
},
"radosgw.objects.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1073741824",
"extra_args": {
"aggregation_method": "max",
"resource_type": "ceph_account"
}
}
}
}
def split_storage_data(raw_data): def split_storage_data(raw_data):
final_data = [] final_data = []
for frame in raw_data: for frame in raw_data:
@ -122,11 +235,11 @@ def split_storage_data(raw_data):
# FIXME(sheeprine): storage is not using decimal for rates, we need to # FIXME(sheeprine): storage is not using decimal for rates, we need to
# transition to decimal. # transition to decimal.
STORED_DATA = copy.deepcopy(COLLECTED_DATA) STORED_DATA = copy.deepcopy(COLLECTED_DATA)
STORED_DATA[0]['usage']['cpu'][0]['rating'] = { STORED_DATA[0]['usage']['instance'][0]['rating'] = {
'price': 0.42} 'price': 0.42}
STORED_DATA[0]['usage']['image.size'][0]['rating'] = { STORED_DATA[0]['usage']['image.size'][0]['rating'] = {
'price': 0.1337} 'price': 0.1337}
STORED_DATA[1]['usage']['cpu'][0]['rating'] = { STORED_DATA[1]['usage']['instance'][0]['rating'] = {
'price': 0.42} 'price': 0.42}
STORED_DATA = split_storage_data(STORED_DATA) STORED_DATA = split_storage_data(STORED_DATA)

View File

@ -21,18 +21,17 @@ import mock
from gnocchiclient import exceptions as gexc from gnocchiclient import exceptions as gexc
from cloudkitty import storage from cloudkitty import storage
from cloudkitty.storage.hybrid.backends import gnocchi as hgnocchi
from cloudkitty import tests from cloudkitty import tests
from cloudkitty.tests import samples from cloudkitty.tests import test_utils
class BaseHybridStorageTest(tests.TestCase): class BaseHybridStorageTest(tests.TestCase):
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self): def setUp(self):
super(BaseHybridStorageTest, self).setUp() super(BaseHybridStorageTest, self).setUp()
self.conf.set_override('backend', 'hybrid', 'storage') self.conf.set_override('backend', 'hybrid', 'storage')
hgnocchi.METRICS_CONF = samples.METRICS_CONF self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage = storage.get_storage()
with mock.patch.object( with mock.patch.object(
self.storage._hybrid_backend, 'init'): self.storage._hybrid_backend, 'init'):
self.storage.init() self.storage.init()

View File

@ -22,9 +22,9 @@ import sqlalchemy
import testscenarios import testscenarios
from cloudkitty import storage from cloudkitty import storage
from cloudkitty.storage.hybrid.backends import gnocchi as hgnocchi
from cloudkitty import tests from cloudkitty import tests
from cloudkitty.tests import samples from cloudkitty.tests import samples
from cloudkitty.tests import test_utils
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
@ -40,13 +40,13 @@ class StorageTest(tests.TestCase):
cls.storage_scenarios) cls.storage_scenarios)
@mock.patch('cloudkitty.storage.hybrid.backends.gnocchi.gclient') @mock.patch('cloudkitty.storage.hybrid.backends.gnocchi.gclient')
@mock.patch('cloudkitty.utils.load_conf', new=test_utils.load_conf)
def setUp(self, gclient_mock): def setUp(self, gclient_mock):
super(StorageTest, self).setUp() super(StorageTest, self).setUp()
hgnocchi.METRICS_CONF = samples.METRICS_CONF
self._tenant_id = samples.TENANT self._tenant_id = samples.TENANT
self._other_tenant_id = '8d3ae50089ea4142-9c6e1269db6a0b64' self._other_tenant_id = '8d3ae50089ea4142-9c6e1269db6a0b64'
self.conf.set_override('backend', self.storage_backend, 'storage') self.conf.set_override('backend', self.storage_backend, 'storage')
self.storage = storage.get_storage() self.storage = storage.get_storage(conf=test_utils.load_conf())
self.storage.init() self.storage.init()
def insert_data(self): def insert_data(self):
@ -234,10 +234,10 @@ class StorageTotalTest(StorageTest):
total = self.storage.get_total( total = self.storage.get_total(
begin=begin, begin=begin,
end=end, end=end,
service='cpu') service='instance')
self.assertEqual(1, len(total)) self.assertEqual(1, len(total))
self.assertEqual(0.84, total[0]["rate"]) self.assertEqual(0.84, total[0]["rate"])
self.assertEqual('cpu', total[0]["res_type"]) self.assertEqual('instance', total[0]["res_type"])
self.assertEqual(begin, total[0]["begin"]) self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"]) self.assertEqual(end, total[0]["end"])
@ -273,7 +273,7 @@ class StorageTotalTest(StorageTest):
self.assertEqual(begin, total[0]["begin"]) self.assertEqual(begin, total[0]["begin"])
self.assertEqual(end, total[0]["end"]) self.assertEqual(end, total[0]["end"])
self.assertEqual(1.68, total[1]["rate"]) self.assertEqual(1.68, total[1]["rate"])
self.assertEqual('cpu', total[1]["res_type"]) self.assertEqual('instance', total[1]["res_type"])
self.assertEqual(begin, total[1]["begin"]) self.assertEqual(begin, total[1]["begin"])
self.assertEqual(end, total[1]["end"]) self.assertEqual(end, total[1]["end"])
@ -298,12 +298,12 @@ class StorageTotalTest(StorageTest):
self.assertEqual(end, total[1]["end"]) self.assertEqual(end, total[1]["end"])
self.assertEqual(0.84, total[2]["rate"]) self.assertEqual(0.84, total[2]["rate"])
self.assertEqual(self._other_tenant_id, total[2]["tenant_id"]) self.assertEqual(self._other_tenant_id, total[2]["tenant_id"])
self.assertEqual('cpu', total[2]["res_type"]) self.assertEqual('instance', total[2]["res_type"])
self.assertEqual(begin, total[2]["begin"]) self.assertEqual(begin, total[2]["begin"])
self.assertEqual(end, total[2]["end"]) self.assertEqual(end, total[2]["end"])
self.assertEqual(0.84, total[3]["rate"]) self.assertEqual(0.84, total[3]["rate"])
self.assertEqual(self._tenant_id, total[3]["tenant_id"]) self.assertEqual(self._tenant_id, total[3]["tenant_id"])
self.assertEqual('cpu', total[3]["res_type"]) self.assertEqual('instance', total[3]["res_type"])
self.assertEqual(begin, total[3]["begin"]) self.assertEqual(begin, total[3]["begin"])
self.assertEqual(end, total[3]["end"]) self.assertEqual(end, total[3]["end"])
@ -429,6 +429,9 @@ class StorageDataIntegrityTest(StorageTest):
if 'image.size' in stored_data[0]['usage']: if 'image.size' in stored_data[0]['usage']:
stored_data[0]['usage'], stored_data[1]['usage'] = ( stored_data[0]['usage'], stored_data[1]['usage'] = (
stored_data[1]['usage'], stored_data[0]['usage']) stored_data[1]['usage'], stored_data[0]['usage'])
if 'image.size' in expected_data[0]['usage']:
expected_data[0]['usage'], expected_data[1]['usage'] = (
expected_data[1]['usage'], expected_data[0]['usage'])
self.assertEqual( self.assertEqual(
expected_data, expected_data,
stored_data) stored_data)

View File

@ -38,7 +38,6 @@ class OrchestratorTest(tests.TestCase):
super(OrchestratorTest, self).setUp() super(OrchestratorTest, self).setUp()
messaging_conf = self.useFixture(conffixture.ConfFixture(self.conf)) messaging_conf = self.useFixture(conffixture.ConfFixture(self.conf))
messaging_conf.transport_url = 'fake:/' messaging_conf.transport_url = 'fake:/'
self.conf.set_override('backend', 'keystone', 'tenant_fetcher')
self.conf.import_group('keystone_fetcher', self.conf.import_group('keystone_fetcher',
'cloudkitty.fetcher.keystone') 'cloudkitty.fetcher.keystone')

View File

@ -24,6 +24,7 @@ import unittest
import mock import mock
from oslo_utils import timeutils from oslo_utils import timeutils
from cloudkitty.tests.samples import DEFAULT_METRICS_CONF
from cloudkitty import utils as ck_utils from cloudkitty import utils as ck_utils
@ -195,3 +196,7 @@ class ConvertUnitTest(unittest.TestCase):
def test_convert_decimal(self): def test_convert_decimal(self):
result = ck_utils.num2decimal(decimal.Decimal(2)) result = ck_utils.num2decimal(decimal.Decimal(2))
self.assertEqual(result, decimal.Decimal(2)) self.assertEqual(result, decimal.Decimal(2))
def load_conf(*args):
return DEFAULT_METRICS_CONF

View File

@ -15,13 +15,23 @@
# #
# @author: Stéphane Albert # @author: Stéphane Albert
# #
from oslo_log import log
from cloudkitty import transformer from cloudkitty import transformer
LOG = log.getLogger(__name__)
class CloudKittyFormatTransformer(transformer.BaseTransformer): class CloudKittyFormatTransformer(transformer.BaseTransformer):
def format_item(self, desc, unit, qty=1.0): def format_item(self, groupby, metadata, unit, qty=1.0):
data = {} data = {}
data['desc'] = desc data['groupby'] = groupby
data['metadata'] = metadata
# For backward compatibility.
data['desc'] = data['groupby'].copy()
data['desc'].update(data['metadata'])
data['vol'] = {'unit': unit, 'qty': qty} data['vol'] = {'unit': unit, 'qty': qty}
return data return data

View File

@ -26,74 +26,24 @@ import contextlib
import datetime import datetime
import decimal import decimal
import fractions import fractions
import math
import shutil import shutil
import six import six
import sys import sys
import tempfile import tempfile
import yaml import yaml
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import timeutils from oslo_utils import timeutils
from six import moves from six import moves
from stevedore import extension from stevedore import extension
COLLECTORS_NAMESPACE = 'cloudkitty.collector.backends'
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
collect_opts = [
cfg.StrOpt('fetcher',
default='keystone',
deprecated_for_removal=True,
help='Project fetcher.'),
cfg.StrOpt('collector',
default='gnocchi',
deprecated_for_removal=True,
help='Data collector.'),
cfg.IntOpt('window',
default=1800,
deprecated_for_removal=True,
help='Number of samples to collect per call.'),
cfg.IntOpt('period',
default=3600,
deprecated_for_removal=True,
help='Rating period in seconds.'),
cfg.IntOpt('wait_periods',
default=2,
deprecated_for_removal=True,
help='Wait for N periods before collecting new data.'),
cfg.ListOpt('services',
default=[
'compute',
'volume',
'network.bw.in',
'network.bw.out',
'network.floating',
'image',
],
deprecated_for_removal=True,
help='Services to monitor.'),
cfg.StrOpt('metrics_conf',
default='/etc/cloudkitty/metrics.yml',
help='Metrology configuration file.'),
]
storage_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
help='Name of the storage backend driver.')
]
CONF = cfg.CONF
CONF.register_opts(collect_opts, 'collect')
CONF.register_opts(storage_opts, 'storage')
def isotime(at=None, subsecond=False): def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format.""" """Stringify time in ISO 8601 format."""
@ -263,21 +213,22 @@ def refresh_stevedore(namespace=None):
cache.clear() cache.clear()
def check_time_state(timestamp=None, period=0, wait_time=0): def check_time_state(timestamp=None, period=0, wait_periods=0):
if not timestamp: if not timestamp:
return get_month_start_timestamp() return get_month_start_timestamp()
now = utcnow_ts() now = utcnow_ts()
next_timestamp = timestamp + period next_timestamp = timestamp + period
wait_time = wait_periods * period
if next_timestamp + wait_time < now: if next_timestamp + wait_time < now:
return next_timestamp return next_timestamp
return 0 return 0
def get_metrics_conf(conf_path): def load_conf(conf_path):
"""Return loaded yaml metrology configuration. """Return loaded yaml configuration.
In case not found metrics.yml file, In case not found yaml file,
return an empty dict. return an empty dict.
""" """
# NOTE(mc): We can not raise any exception in this function as it called # NOTE(mc): We can not raise any exception in this function as it called
@ -286,10 +237,9 @@ def get_metrics_conf(conf_path):
try: try:
with open(conf_path) as conf: with open(conf_path) as conf:
res = yaml.safe_load(conf) res = yaml.safe_load(conf)
res.update({'storage': CONF.storage.backend})
return res or {} return res or {}
except Exception: except Exception:
LOG.warning('Error when trying to retrieve yaml metrology conf file.') LOG.warning("Error when trying to retrieve {} file.".format(conf_path))
return {} return {}
@ -306,6 +256,21 @@ def tempdir(**kwargs):
six.text_type(e)) six.text_type(e))
def mutate(value, mode='NONE'):
"""Mutate value according provided mode."""
if mode == 'NUMBOOL':
return float(value != 0.0)
if mode == 'FLOOR':
return math.floor(value)
if mode == 'CEIL':
return math.ceil(value)
return value
def num2decimal(num): def num2decimal(num):
"""Converts a number into a decimal.Decimal. """Converts a number into a decimal.Decimal.
@ -322,7 +287,7 @@ def num2decimal(num):
return decimal.Decimal(num) return decimal.Decimal(num)
def convert_unit(value, factor=1, offset=0): def convert_unit(value, factor, offset):
"""Return converted value depending on the provided factor and offset.""" """Return converted value depending on the provided factor and offset."""
return num2decimal(value) * num2decimal(factor) + num2decimal(offset) return num2decimal(value) * num2decimal(factor) + num2decimal(offset)

View File

@ -1,102 +1,85 @@
name: OpenStack
fetcher: keystone
collector: gnocchi
period: 3600
wait_periods: 2
window: 1800
services_objects:
compute: instance
volume: volume
network.bw.out: instance_network_interface
network.bw.in: instance_network_interface
network.floating: network
image: image
radosgw.usage: ceph_account
metrics: metrics:
vcpus:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
memory:
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
cpu: cpu:
resource: instance
unit: instance unit: instance
factor: 1 alt_name: instance
aggregation_method: max groupby:
countable_unit: true - id
- project_id
disk.root.size: metadata:
resource: instance - flavor
unit: instance - flavor_id
factor: 1 - vcpus
aggregation_method: max mutate: NUMBOOL
countable_unit: true extra_args:
aggregation_method: max
disk.ephemeral.size: resource_type: instance
resource: instance
unit: instance
factor: 1
aggregation_method: max
countable_unit: true
image.size: image.size:
resource: image
unit: MiB unit: MiB
factor: 1/1048576 factor: 1/1048576
aggregation_method: max groupby:
- id
image.download: - project_id
resource: image metadata:
unit: MiB - container_format
factor: 1/1048576 - disk_format
aggregation_method: max extra_args:
aggregation_method: max
image.serve: resource_type: image
resource: image
unit: MiB
factor: 1/1048576
aggregation_method: max
volume.size: volume.size:
resource: volume
unit: GiB unit: GiB
factor: 1 groupby:
aggregation_method: max - id
- project_id
metadata:
- volume_type
extra_args:
aggregation_method: max
resource_type: volume
network.outgoing.bytes: network.outgoing.bytes:
resource: instance_network_interface
unit: MB unit: MB
groupby:
- id
- project_id
factor: 1/1000000 factor: 1/1000000
aggregation_method: max metadata:
- instance_id
extra_args:
aggregation_method: max
resource_type: instance_network_interface
network.incoming.bytes: network.incoming.bytes:
resource: instance_network_interface
unit: MB unit: MB
groupby:
- id
- project_id
factor: 1/1000000 factor: 1/1000000
aggregation_method: max metadata:
- instance_id
extra_args:
aggregation_method: max
resource_type: instance_network_interface
ip.floating: ip.floating:
resource: network
unit: ip unit: ip
factor: 1 groupby:
aggregation_method: max - id
countable_unit: true - project_id
metadata:
- state
mutate: NUMBOOL
extra_args:
aggregation_method: max
resource_type: network
radosgw.objects.size: radosgw.objects.size:
resource: ceph_account
unit: GiB unit: GiB
groupby:
- id
- project_id
factor: 1/1073741824 factor: 1/1073741824
aggregation_method: max extra_args:
aggregation_method: max
resource_type: ceph_account

View File

@ -0,0 +1,4 @@
features:
- |
The format of the 'metrics.yml' configuration file has been improved,
and will be stable.

View File

@ -27,3 +27,4 @@ SQLAlchemy<1.1.0,>=1.0.10 # MIT
six>=1.9.0 # MIT six>=1.9.0 # MIT
stevedore>=1.5.0 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0
tooz>=1.28.0 # Apache-2.0 tooz>=1.28.0 # Apache-2.0
voluptuous>=0.11.1,<1.0.0 # BSD-3

View File

@ -50,7 +50,7 @@ cloudkitty.collector.backends =
monasca = cloudkitty.collector.monasca:MonascaCollector monasca = cloudkitty.collector.monasca:MonascaCollector
meta = cloudkitty.collector.meta:MetaCollector meta = cloudkitty.collector.meta:MetaCollector
cloudkitty.tenant.fetchers = cloudkitty.fetchers =
fake = cloudkitty.fetcher.fake:FakeFetcher fake = cloudkitty.fetcher.fake:FakeFetcher
keystone = cloudkitty.fetcher.keystone:KeystoneFetcher keystone = cloudkitty.fetcher.keystone:KeystoneFetcher
source = cloudkitty.fetcher.source:SourceFetcher source = cloudkitty.fetcher.source:SourceFetcher