Handle the scope id as a regular groupby attribute in storage
Until now, the scope_id was handled separately in the storage backend. Given that the state is not handled by the storage backend anymore, the scope_id should not be treated differently by the storage backend. This aims at simplifying cloudkitty's internal architecture. In order to make the scope available to the whole orchestrator/API context, a "scope_key" option has been added to cloudkitty.conf, and it has been removed from the extra_args from the Gnocchi and Monasca collectors. Change-Id: Idaeb783cf6fb566d795b1a502f45d9260544ce02 Story: 2001372 Task: 26765
This commit is contained in:
parent
689177f27f
commit
0f79fbe944
@ -18,6 +18,7 @@
|
||||
import datetime
|
||||
import decimal
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import pecan
|
||||
from pecan import rest
|
||||
@ -30,6 +31,10 @@ from cloudkitty import utils as ck_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
CONF.import_opt('scope_key', 'cloudkitty.collector', 'collect')
|
||||
|
||||
|
||||
class InvalidFilter(Exception):
|
||||
"""Exception raised when a storage filter is invalid"""
|
||||
@ -94,8 +99,9 @@ class ReportController(rest.RestController):
|
||||
# FIXME(sheeprine): We should filter on user id.
|
||||
# Use keystone token information by default but make it overridable and
|
||||
# enforce it by policy engine
|
||||
groupby = ['project_id']
|
||||
group_filters = {'project_id': tenant_id} if tenant_id else None
|
||||
scope_key = CONF.collect.scope_key
|
||||
groupby = [scope_key]
|
||||
group_filters = {scope_key: tenant_id} if tenant_id else None
|
||||
total_resources = storage.total(
|
||||
groupby=groupby,
|
||||
begin=begin, end=end,
|
||||
@ -133,12 +139,13 @@ class ReportController(rest.RestController):
|
||||
{"tenant_id": tenant_id})
|
||||
storage = pecan.request.storage_backend
|
||||
|
||||
scope_key = CONF.collect.scope_key
|
||||
storage_groupby = []
|
||||
if groupby is not None and 'tenant_id' in groupby:
|
||||
storage_groupby.append('project_id')
|
||||
storage_groupby.append(scope_key)
|
||||
if groupby is not None and 'res_type' in groupby:
|
||||
storage_groupby.append('type')
|
||||
group_filters = {'project_id': tenant_id} if tenant_id else None
|
||||
group_filters = {scope_key: tenant_id} if tenant_id else None
|
||||
results = storage.total(
|
||||
groupby=storage_groupby,
|
||||
begin=begin, end=end,
|
||||
@ -149,7 +156,7 @@ class ReportController(rest.RestController):
|
||||
for res in results:
|
||||
kwargs = {
|
||||
'res_type': res.get('type') or res.get('res_type'),
|
||||
'tenant_id': res.get('project_id') or res.get('tenant_id'),
|
||||
'tenant_id': res.get(scope_key) or res.get('tenant_id'),
|
||||
'begin': res['begin'],
|
||||
'end': res['end'],
|
||||
'rate': res['rate'],
|
||||
|
@ -18,6 +18,7 @@
|
||||
import datetime
|
||||
import decimal
|
||||
|
||||
from oslo_config import cfg
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
@ -29,6 +30,11 @@ from cloudkitty import storage
|
||||
from cloudkitty import utils as ck_utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
CONF.import_opt('scope_key', 'cloudkitty.collector', 'collect')
|
||||
|
||||
|
||||
class DataFramesController(rest.RestController):
|
||||
"""REST Controller to access stored data frames."""
|
||||
|
||||
@ -50,9 +56,10 @@ class DataFramesController(rest.RestController):
|
||||
|
||||
policy.authorize(pecan.request.context, 'storage:list_data_frames', {})
|
||||
|
||||
scope_key = CONF.collect.scope_key
|
||||
backend = pecan.request.storage_backend
|
||||
dataframes = []
|
||||
group_filters = {'project_id': tenant_id} if tenant_id else None
|
||||
group_filters = {scope_key: tenant_id} if tenant_id else None
|
||||
|
||||
if begin:
|
||||
begin = ck_utils.dt2ts(begin)
|
||||
@ -84,7 +91,7 @@ class DataFramesController(rest.RestController):
|
||||
volume=data['vol']['qty'],
|
||||
rating=price)
|
||||
if frame_tenant is None:
|
||||
frame_tenant = data['scope_id']
|
||||
frame_tenant = desc[scope_key]
|
||||
resources.append(resource)
|
||||
dataframe = storage_models.DataFrame(
|
||||
begin=ck_utils.iso2dt(frame['period']['begin']),
|
||||
|
@ -47,6 +47,10 @@ collect_opts = [
|
||||
cfg.StrOpt('metrics_conf',
|
||||
default='/etc/cloudkitty/metrics.yml',
|
||||
help='Metrology configuration file.'),
|
||||
cfg.StrOpt('scope_key',
|
||||
default='project_id',
|
||||
help='Key defining a scope. project_id or domain_id for '
|
||||
'OpenStack, but can be anything.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
@ -78,10 +78,6 @@ GNOCCHI_EXTRA_SCHEMA = {
|
||||
# Due to Gnocchi model, metric are grouped by resource.
|
||||
# This parameter permits to adapt the key of the resource identifier
|
||||
Required('resource_key', default='id'): All(str, Length(min=1)),
|
||||
# This is needed to allow filtering on the project for the Openstack
|
||||
# usecase.
|
||||
# NOTE(MCO): maybe be removed in following releases
|
||||
Required('scope_key', default='project_id'): All(str, Length(min=1)),
|
||||
Required('aggregation_method', default='max'):
|
||||
In(['max', 'mean', 'min']),
|
||||
},
|
||||
@ -130,12 +126,16 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
|
||||
GNOCCHI_EXTRA_SCHEMA)
|
||||
|
||||
scope_key = CONF.collect.scope_key
|
||||
|
||||
output = dict()
|
||||
for metric_name, metric in conf['metrics'].items():
|
||||
output[metric_name] = metric_schema(metric)
|
||||
output[metric_name]['groupby'].append(
|
||||
output[metric_name]['extra_args']['resource_key']
|
||||
)
|
||||
if scope_key not in output[metric_name]['groupby']:
|
||||
output[metric_name]['groupby'].append(scope_key)
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
@ -220,9 +220,10 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
query_parameters = self._generate_time_filter(start, end)
|
||||
|
||||
resource_type = extra_args['resource_type']
|
||||
scope_key = CONF.collect.scope_key
|
||||
|
||||
if project_id:
|
||||
kwargs = {extra_args['scope_key']: project_id}
|
||||
kwargs = {scope_key: project_id}
|
||||
query_parameters.append(self.gen_filter(**kwargs))
|
||||
if q_filter:
|
||||
query_parameters.append(q_filter)
|
||||
@ -261,13 +262,15 @@ class GnocchiCollector(collector.BaseCollector):
|
||||
|
||||
# get ressource type
|
||||
resource_type = extra_args['resource_type']
|
||||
scope_key = CONF.collect.scope_key
|
||||
|
||||
# build search query using ressource type and project_id if provided
|
||||
query_parameters = list()
|
||||
query_parameters.append(
|
||||
self.gen_filter(cop="=", type=resource_type))
|
||||
|
||||
if project_id:
|
||||
kwargs = {extra_args['scope_key']: project_id}
|
||||
kwargs = {scope_key: project_id}
|
||||
query_parameters.append(self.gen_filter(**kwargs))
|
||||
if q_filter:
|
||||
query_parameters.append(q_filter)
|
||||
|
@ -68,9 +68,6 @@ MONASCA_EXTRA_SCHEMA = {
|
||||
# modified in a standard OpenStack installation
|
||||
Required('resource_key', default='resource_id'):
|
||||
All(str, Length(min=1)),
|
||||
# This is needed to allow filtering on the project for the Openstack
|
||||
# usecase. May be removed in following releases
|
||||
Required('scope_key', default='project_id'): All(str, Length(min=1)),
|
||||
Required('aggregation_method', default='max'):
|
||||
In(['max', 'mean', 'min']),
|
||||
},
|
||||
@ -94,9 +91,13 @@ class MonascaCollector(collector.BaseCollector):
|
||||
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
|
||||
MONASCA_EXTRA_SCHEMA)
|
||||
|
||||
scope_key = CONF.collect.scope_key
|
||||
|
||||
output = dict()
|
||||
for metric_name, metric in conf['metrics'].items():
|
||||
output[metric_name] = metric_schema(metric)
|
||||
if scope_key not in output[metric_name]['groupby']:
|
||||
output[metric_name]['groupby'].append(scope_key)
|
||||
return output
|
||||
|
||||
def __init__(self, transformers, **kwargs):
|
||||
@ -154,10 +155,10 @@ class MonascaCollector(collector.BaseCollector):
|
||||
return tmp._get_metadata(resource_type, transformers, conf)
|
||||
|
||||
def _get_dimensions(self, metric_name, project_id, q_filter):
|
||||
extra_args = self.conf[metric_name]['extra_args']
|
||||
dimensions = {}
|
||||
scope_key = CONF.collect.scope_key
|
||||
if project_id:
|
||||
dimensions[extra_args['scope_key']] = project_id
|
||||
dimensions[scope_key] = project_id
|
||||
if q_filter:
|
||||
dimensions.update(q_filter)
|
||||
return dimensions
|
||||
|
@ -89,12 +89,6 @@ class V1StorageAdapter(storage_v2.BaseStorage):
|
||||
res_type=metric_types,
|
||||
tenant_id=tenant_id)
|
||||
|
||||
for frame in frames:
|
||||
for _, data_list in frame['usage'].items():
|
||||
for data in data_list:
|
||||
data['scope_id'] = (data.get('project_id')
|
||||
or data.get('tenant_id'))
|
||||
|
||||
return {
|
||||
'total': len(frames),
|
||||
'dataframes': frames,
|
||||
|
@ -48,8 +48,11 @@ class BaseStorage(object):
|
||||
def init(self):
|
||||
"""Called for storage backend initialization"""
|
||||
|
||||
# NOTE(peschk_l): scope_id must not be used by any v2 storage backend. It
|
||||
# is only present for backward compatibility with the v1 storage. It will
|
||||
# be removed together with the v1 storage
|
||||
@abc.abstractmethod
|
||||
def push(self, dataframes, scope_id):
|
||||
def push(self, dataframes, scope_id=None):
|
||||
"""Pushes dataframes to the storage backend
|
||||
|
||||
A dataframe has the following format::
|
||||
@ -85,8 +88,6 @@ class BaseStorage(object):
|
||||
|
||||
:param dataframes: List of dataframes
|
||||
:type dataframes: list
|
||||
:param scope_id: ID of the scope (A project ID for example).
|
||||
:type scope_id: str
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
|
@ -92,7 +92,7 @@ class GnocchiResource(object):
|
||||
It provides utils for resource_type/resource creation and identifying.
|
||||
"""
|
||||
|
||||
def __init__(self, name, metric, conn, scope_id):
|
||||
def __init__(self, name, metric, conn):
|
||||
"""Resource_type name, metric, gnocchiclient"""
|
||||
|
||||
self.name = name
|
||||
@ -100,7 +100,6 @@ class GnocchiResource(object):
|
||||
self.unit = metric['vol']['unit']
|
||||
self.groupby = {
|
||||
k: v if v else '' for k, v in metric['groupby'].items()}
|
||||
self.groupby['ck_scope_id'] = scope_id
|
||||
self.metadata = {
|
||||
k: v if v else '' for k, v in metric['metadata'].items()}
|
||||
self._trans_groupby = {
|
||||
@ -369,8 +368,8 @@ class GnocchiStorage(BaseStorage):
|
||||
def init(self):
|
||||
self._check_archive_policy()
|
||||
|
||||
def _check_resource(self, metric_name, metric, scope_id):
|
||||
resource = GnocchiResource(metric_name, metric, self._conn, scope_id)
|
||||
def _check_resource(self, metric_name, metric):
|
||||
resource = GnocchiResource(metric_name, metric, self._conn)
|
||||
if resource in self._cacher:
|
||||
return self._cacher.get(resource)
|
||||
resource.create()
|
||||
@ -389,7 +388,9 @@ class GnocchiStorage(BaseStorage):
|
||||
time.sleep(1)
|
||||
self._conn.metric.batch_metrics_measures(measures)
|
||||
|
||||
def push(self, dataframes, scope_id):
|
||||
# Do not use scope_id, as it is deprecated and will be
|
||||
# removed together with the v1 storage
|
||||
def push(self, dataframes, scope_id=None):
|
||||
if not isinstance(dataframes, list):
|
||||
dataframes = [dataframes]
|
||||
measures = {}
|
||||
@ -398,8 +399,7 @@ class GnocchiStorage(BaseStorage):
|
||||
timestamp = dataframe['period']['begin']
|
||||
for metric_name, metrics in dataframe['usage'].items():
|
||||
for metric in metrics:
|
||||
resource = self._check_resource(
|
||||
metric_name, metric, scope_id)
|
||||
resource = self._check_resource(metric_name, metric)
|
||||
if resource.needs_update:
|
||||
resource.update(metric)
|
||||
if not resource.qty or not resource.cost:
|
||||
@ -465,8 +465,7 @@ class GnocchiStorage(BaseStorage):
|
||||
def _get_resource_frame(self,
|
||||
cost_measure,
|
||||
qty_measure,
|
||||
resource,
|
||||
scope_id):
|
||||
resource):
|
||||
# Getting price
|
||||
price = decimal.Decimal(cost_measure[2])
|
||||
price_dict = {'price': float(price)}
|
||||
@ -491,11 +490,9 @@ class GnocchiStorage(BaseStorage):
|
||||
'metadata': metadata,
|
||||
'vol': vol_dict,
|
||||
'rating': price_dict,
|
||||
'scope_id': scope_id,
|
||||
}
|
||||
|
||||
def _to_cloudkitty(self,
|
||||
scope_id,
|
||||
res_type,
|
||||
resource,
|
||||
cost_measure,
|
||||
@ -512,8 +509,7 @@ class GnocchiStorage(BaseStorage):
|
||||
|
||||
return {
|
||||
'usage': {res_type: [
|
||||
self._get_resource_frame(
|
||||
cost_measure, qty_measure, resource, scope_id)]
|
||||
self._get_resource_frame(cost_measure, qty_measure, resource)],
|
||||
},
|
||||
'period': period_dict,
|
||||
}
|
||||
@ -559,14 +555,12 @@ class GnocchiStorage(BaseStorage):
|
||||
|
||||
# Raw metrics do not contain all required attributes
|
||||
resource = resource_info[resource_id]
|
||||
scope_id = resource[GROUPBY_NAME_ROOT + 'ck_scope_id']
|
||||
|
||||
dataframe = dataframes.get(measure['cost'][0])
|
||||
ck_resource_type_name = resource_type.replace(
|
||||
RESOURCE_TYPE_NAME_ROOT, '')
|
||||
if dataframe is None:
|
||||
dataframes[measure['cost'][0]] = self._to_cloudkitty(
|
||||
scope_id,
|
||||
ck_resource_type_name,
|
||||
resource,
|
||||
measure['cost'],
|
||||
@ -574,11 +568,11 @@ class GnocchiStorage(BaseStorage):
|
||||
elif dataframe['usage'].get(ck_resource_type_name) is None:
|
||||
dataframe['usage'][ck_resource_type_name] = [
|
||||
self._get_resource_frame(
|
||||
measure['cost'], measure['qty'], resource, scope_id)]
|
||||
measure['cost'], measure['qty'], resource)]
|
||||
else:
|
||||
dataframe['usage'][ck_resource_type_name].append(
|
||||
self._get_resource_frame(
|
||||
measure['cost'], measure['qty'], resource, scope_id))
|
||||
measure['cost'], measure['qty'], resource))
|
||||
return self._dataframes_to_list(dataframes)
|
||||
|
||||
@staticmethod
|
||||
|
@ -277,7 +277,7 @@ class QuoteFakeRPC(BaseFakeRPC):
|
||||
|
||||
|
||||
class BaseStorageDataFixture(fixture.GabbiFixture):
|
||||
def create_fake_data(self, begin, end):
|
||||
def create_fake_data(self, begin, end, project_id):
|
||||
data = [{
|
||||
"period": {
|
||||
"begin": begin,
|
||||
@ -287,7 +287,8 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
|
||||
{
|
||||
"desc": {
|
||||
"dummy": True,
|
||||
"fake_meta": 1.0},
|
||||
"fake_meta": 1.0,
|
||||
"project_id": project_id},
|
||||
"vol": {
|
||||
"qty": 1,
|
||||
"unit": "nothing"},
|
||||
@ -301,7 +302,8 @@ class BaseStorageDataFixture(fixture.GabbiFixture):
|
||||
{
|
||||
"desc": {
|
||||
"dummy": True,
|
||||
"fake_meta": 1.0},
|
||||
"fake_meta": 1.0,
|
||||
"project_id": project_id},
|
||||
"vol": {
|
||||
"qty": 1,
|
||||
"unit": "nothing"},
|
||||
@ -341,13 +343,13 @@ class StorageDataFixture(BaseStorageDataFixture):
|
||||
for i in range(data_ts,
|
||||
data_ts + data_duration,
|
||||
3600):
|
||||
data = self.create_fake_data(i, i + 3600)
|
||||
data = self.create_fake_data(i, i + 3600, tenant_list[0])
|
||||
self.storage.push(data, tenant_list[0])
|
||||
half_duration = int(data_duration / 2)
|
||||
for i in range(data_ts,
|
||||
data_ts + half_duration,
|
||||
3600):
|
||||
data = self.create_fake_data(i, i + 3600)
|
||||
data = self.create_fake_data(i, i + 3600, tenant_list[1])
|
||||
self.storage.push(data, tenant_list[1])
|
||||
|
||||
|
||||
@ -357,9 +359,9 @@ class NowStorageDataFixture(BaseStorageDataFixture):
|
||||
for i in range(begin,
|
||||
begin + 3600 * 12,
|
||||
3600):
|
||||
data = self.create_fake_data(i, i + 3600)
|
||||
self.storage.push(data,
|
||||
'3d9a1b33-482f-42fd-aef9-b575a3da9369')
|
||||
project_id = '3d9a1b33-482f-42fd-aef9-b575a3da9369'
|
||||
data = self.create_fake_data(i, i + 3600, project_id)
|
||||
self.storage.push(data, project_id)
|
||||
|
||||
|
||||
class CORSConfigFixture(fixture.GabbiFixture):
|
||||
|
5
releasenotes/notes/add-scope-key-58135c2a5c6dae68.yaml
Normal file
5
releasenotes/notes/add-scope-key-58135c2a5c6dae68.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
other:
|
||||
- |
|
||||
The "scope_key" option is now defained in cloudkitty.conf and has been
|
||||
removed from the cloudkitty and monasca collector's extra_args
|
Loading…
Reference in New Issue
Block a user