diff --git a/cloudkitty/api/v2/summary/summary.py b/cloudkitty/api/v2/summary/summary.py index 197f474a..4bb17830 100644 --- a/cloudkitty/api/v2/summary/summary.py +++ b/cloudkitty/api/v2/summary/summary.py @@ -26,6 +26,7 @@ class Summary(base.BaseResource): @api_utils.paginated @api_utils.add_input_schema('query', { + voluptuous.Optional('custom_fields'): api_utils.SingleQueryParam(str), voluptuous.Optional('groupby'): api_utils.MultiQueryParam(str), voluptuous.Optional('filters'): api_utils.SingleDictQueryParam(str, str), @@ -34,9 +35,8 @@ class Summary(base.BaseResource): voluptuous.Optional('end'): api_utils.SingleQueryParam( tzutils.dt_from_iso), }) - def get(self, groupby=None, filters={}, - begin=None, end=None, - offset=0, limit=100): + def get(self, custom_fields=None, groupby=None, filters={}, begin=None, + end=None, offset=0, limit=100): policy.authorize( flask.request.context, 'summary:get_summary', @@ -55,15 +55,20 @@ class Summary(base.BaseResource): filters['project_id'] = flask.request.context.project_id metric_types = [filters.pop('type')] if 'type' in filters else None - total = self._storage.total( - begin=begin, end=end, - groupby=groupby, - filters=filters, - metric_types=metric_types, - offset=offset, - limit=limit, - paginate=True, - ) + arguments = { + 'begin': begin, + 'end': end, + 'groupby': groupby, + 'filters': filters, + 'metric_types': metric_types, + 'offset': offset, + 'limit': limit, + 'paginate': True + } + if custom_fields: + arguments['custom_fields'] = custom_fields + + total = self._storage.total(**arguments) columns = [] if len(total['results']) > 0: columns = list(total['results'][0].keys()) diff --git a/cloudkitty/storage/__init__.py b/cloudkitty/storage/__init__.py index 91d962ab..2b66dfe9 100644 --- a/cloudkitty/storage/__init__.py +++ b/cloudkitty/storage/__init__.py @@ -127,28 +127,31 @@ class V1StorageAdapter(storage_v2.BaseStorage): if end: elem['end'] = tzutils.utc_to_local(end) - def total(self, groupby=None, - begin=None, end=None, - metric_types=None, - filters=None, - offset=0, limit=100, paginate=True): - tenant_id = filters.get('project_id') if filters else None + def total(self, **arguments): + filters = arguments.pop('filters', None) + if filters: + tenant_id = filters.get('project_id') - storage_gby = [] - if groupby: - for elem in set(groupby): - if elem == 'type': - storage_gby.append('res_type') - elif elem == 'project_id': - storage_gby.append('tenant_id') - storage_gby = ','.join(storage_gby) if storage_gby else None - metric_types = self._check_metric_types(metric_types) - total = self.storage.get_total( - tzutils.local_to_utc(begin, naive=True), - tzutils.local_to_utc(end, naive=True), - tenant_id=tenant_id, - service=metric_types, - groupby=storage_gby) + arguments['tenant_id'] = tenant_id + else: + tenant_id = None + + groupby = arguments.get('groupby') + storage_gby = self.get_storage_groupby(groupby) + + metric_types = arguments.pop('metric_types', None) + if metric_types: + metric_types = self._check_metric_types(metric_types) + arguments['service'] = metric_types + + arguments['begin'] = tzutils.local_to_utc( + arguments['begin'], naive=True) + arguments['end'] = tzutils.local_to_utc( + arguments['end'], naive=True) + + arguments['groupby'] = storage_gby + + total = self.storage.get_total(**arguments) for t in total: if t.get('tenant_id') is None: @@ -165,9 +168,19 @@ class V1StorageAdapter(storage_v2.BaseStorage): 'results': total, } + @staticmethod + def get_storage_groupby(groupby): + storage_gby = [] + if groupby: + for elem in set(groupby): + if elem == 'type': + storage_gby.append('res_type') + elif elem == 'project_id': + storage_gby.append('tenant_id') + return ','.join(storage_gby) if storage_gby else None + def get_tenants(self, begin, end): - tenants = self.storage.get_tenants(begin, end) - return tenants + return self.storage.get_tenants(begin, end) def get_state(self, tenant_id=None): return self.storage.get_state(tenant_id) diff --git a/cloudkitty/storage/v1/sqlalchemy/__init__.py b/cloudkitty/storage/v1/sqlalchemy/__init__.py index 78625073..8920e023 100644 --- a/cloudkitty/storage/v1/sqlalchemy/__init__.py +++ b/cloudkitty/storage/v1/sqlalchemy/__init__.py @@ -82,8 +82,8 @@ class SQLAlchemyStorage(storage.BaseStorage): if r: return r.begin - def get_total(self, begin=None, end=None, tenant_id=None, - service=None, groupby=None): + def get_total(self, begin=None, end=None, tenant_id=None, service=None, + groupby=None): session = db.get_session() querymodels = [ sqlalchemy.func.sum(self.frame_model.rate).label('rate') diff --git a/cloudkitty/storage/v2/__init__.py b/cloudkitty/storage/v2/__init__.py index 858d5881..fe21268f 100644 --- a/cloudkitty/storage/v2/__init__.py +++ b/cloudkitty/storage/v2/__init__.py @@ -100,11 +100,9 @@ class BaseStorage(object): """ @abc.abstractmethod - def total(self, groupby=None, - begin=None, end=None, - metric_types=None, - filters=None, - offset=0, limit=1000, paginate=True): + def total(self, groupby=None, begin=None, end=None, metric_types=None, + filters=None, custom_fields=None, offset=0, limit=1000, + paginate=True): """Returns a grouped total for given groupby. :param groupby: Attributes on which to group by. These attributes must @@ -116,10 +114,14 @@ class BaseStorage(object): :type begin: datetime :param end: End date :type end: datetime - :param filters: Attributes to filter on. ex: {'flavor_id': '42'} - :type filters: dict :param metric_types: Metric type to filter on. :type metric_types: str or list + :param custom_fields: the custom fields that one desires to add in + the summary reporting. Each driver must handle + these values by themselves. + :type: custom_fields: list of strings + :param filters: Attributes to filter on. ex: {'flavor_id': '42'} + :type filters: dict :param offset: Offset for pagination :type offset: int :param limit: Maximum amount of elements to return @@ -127,6 +129,7 @@ class BaseStorage(object): :param paginate: Defaults to True. If False, all found results will be returned. :type paginate: bool + :rtype: dict Returns a dict with the following format:: diff --git a/cloudkitty/storage/v2/elasticsearch/__init__.py b/cloudkitty/storage/v2/elasticsearch/__init__.py index 97bda166..058e9126 100644 --- a/cloudkitty/storage/v2/elasticsearch/__init__.py +++ b/cloudkitty/storage/v2/elasticsearch/__init__.py @@ -25,7 +25,6 @@ from cloudkitty.utils import tz as tzutils LOG = log.getLogger(__name__) - CONF = cfg.CONF ELASTICSEARCH_STORAGE_GROUP = 'storage_elasticsearch' @@ -34,7 +33,7 @@ elasticsearch_storage_opts = [ cfg.StrOpt( 'host', help='Elasticsearch host, along with port and protocol. ' - 'Defaults to http://localhost:9200', + 'Defaults to http://localhost:9200', default='http://localhost:9200'), cfg.StrOpt( 'index_name', @@ -42,22 +41,21 @@ elasticsearch_storage_opts = [ default='cloudkitty'), cfg.BoolOpt('insecure', help='Set to true to allow insecure HTTPS ' - 'connections to Elasticsearch', + 'connections to Elasticsearch', default=False), cfg.StrOpt('cafile', help='Path of the CA certificate to trust for ' - 'HTTPS connections.', + 'HTTPS connections.', default=None), cfg.IntOpt('scroll_duration', help="Duration (in seconds) for which the ES scroll contexts " - "should be kept alive.", + "should be kept alive.", advanced=True, default=30, min=0, max=300), ] CONF.register_opts(elasticsearch_storage_opts, ELASTICSEARCH_STORAGE_GROUP) - CLOUDKITTY_INDEX_MAPPING = { "dynamic_templates": [ { @@ -190,16 +188,16 @@ class ElasticsearchStorage(v2_storage.BaseStorage): output[key] = value return output - def total(self, groupby=None, - begin=None, end=None, - metric_types=None, - filters=None, - offset=0, limit=1000, paginate=True): + def total(self, groupby=None, begin=None, end=None, metric_types=None, + filters=None, custom_fields=None, offset=0, limit=1000, + paginate=True): begin, end = self._local_to_utc(begin or tzutils.get_month_start(), end or tzutils.get_next_month()) - total, docs = self._conn.total( - begin, end, metric_types, filters, groupby, - offset=offset, limit=limit, paginate=paginate) + + total, docs = self._conn.total(begin, end, metric_types, filters, + groupby, custom_fields=custom_fields, + offset=offset, limit=limit, + paginate=paginate) return { 'total': total, 'results': [self._doc_to_total_result(doc, begin, end) diff --git a/cloudkitty/storage/v2/elasticsearch/client.py b/cloudkitty/storage/v2/elasticsearch/client.py index e9735e90..79651b8c 100644 --- a/cloudkitty/storage/v2/elasticsearch/client.py +++ b/cloudkitty/storage/v2/elasticsearch/client.py @@ -349,7 +349,12 @@ class ElasticsearchClient(object): return self._req(self._sess.post, url, data, None) def total(self, begin, end, metric_types, filters, groupby, - offset=0, limit=1000, paginate=True): + custom_fields=None, offset=0, limit=1000, paginate=True): + + if custom_fields: + LOG.warning("'custom_fields' are not implemented yet for " + "ElasticSearch. Therefore, the custom fields [%s] " + "informed by the user will be ignored.", custom_fields) if not paginate: offset = 0 diff --git a/cloudkitty/storage/v2/influx.py b/cloudkitty/storage/v2/influx.py index a1d706ec..4d08f4c1 100644 --- a/cloudkitty/storage/v2/influx.py +++ b/cloudkitty/storage/v2/influx.py @@ -187,8 +187,12 @@ class InfluxClient(object): for mtype in types) return ' AND (' + type_query + ')' - def get_total(self, types, begin, end, groupby=None, filters=None): - query = 'SELECT SUM(qty) AS qty, SUM(price) AS price FROM "dataframes"' + def get_total(self, types, begin, end, custom_fields, + groupby=None, filters=None): + + self.validate_custom_fields(custom_fields) + + query = 'SELECT %s FROM "dataframes"' % custom_fields query += self._get_time_query(begin, end) query += self._get_filter_query(filters) query += self._get_type_query(types) @@ -204,7 +208,21 @@ class InfluxClient(object): query += ' GROUP BY ' + groupby_query query += ';' - return self._conn.query(query) + total = self._conn.query(query) + LOG.debug( + "Data [%s] received when executing query [%s].", total, query) + return total + + @staticmethod + def validate_custom_fields(custom_fields): + forbidden_clauses = ["select", "from", "drop", "delete", "create", + "alter", "insert", "update"] + for field in custom_fields.split(","): + if field.lower() in forbidden_clauses: + raise RuntimeError("Clause [%s] is not allowed in custom" + " fields summary get report. The following" + " clauses are not allowed [%s].", + field, forbidden_clauses) def retrieve(self, types, @@ -349,24 +367,25 @@ class InfluxStorage(v2_storage.BaseStorage): output = { 'begin': begin, 'end': end, - 'qty': point['qty'], - 'rate': point['price'], } + + for key in point.keys(): + if "time" != key: + output[key] = point[key] + if groupby: for group in _sanitized_groupby(groupby): output[group] = series_groupby.get(group, '') return output - def total(self, groupby=None, - begin=None, end=None, - metric_types=None, - filters=None, - offset=0, limit=1000, paginate=True): + def total(self, groupby=None, begin=None, end=None, metric_types=None, + filters=None, offset=0, limit=1000, paginate=True, + custom_fields="SUM(qty) AS qty, SUM(price) AS rate"): begin, end = self._check_begin_end(begin, end) - total = self._conn.get_total( - metric_types, begin, end, groupby, filters) + total = self._conn.get_total(metric_types, begin, end, + custom_fields, groupby, filters) output = [] for (series_name, series_groupby), points in total.items(): @@ -374,7 +393,12 @@ class InfluxStorage(v2_storage.BaseStorage): # NOTE(peschk_l): InfluxDB returns all timestamps for a given # period and interval, even those with no data. This filters # out periods with no data - if point['qty'] is not None and point['price'] is not None: + + # NOTE (rafaelweingartner): the summary get API is allowing + # users to customize the report. Therefore, we only ignore + # data points, if all of the entries have None values. + # Otherwise, they are presented to the user. + if [k for k in point.keys() if point[k]]: output.append(self._get_total_elem( tzutils.utc_to_local(begin), tzutils.utc_to_local(end), @@ -385,6 +409,7 @@ class InfluxStorage(v2_storage.BaseStorage): groupby = _sanitized_groupby(groupby) if groupby: output.sort(key=lambda x: [x[group] for group in groupby]) + return { 'total': len(output), 'results': output[offset:offset + limit] if paginate else output, diff --git a/cloudkitty/tests/storage/v2/es_utils.py b/cloudkitty/tests/storage/v2/es_utils.py index 86921569..40e27c38 100644 --- a/cloudkitty/tests/storage/v2/es_utils.py +++ b/cloudkitty/tests/storage/v2/es_utils.py @@ -65,7 +65,7 @@ class FakeElasticsearchClient(client.ElasticsearchClient): return len(output), output def total(self, begin, end, metric_types, filters, groupby, - offset=0, limit=1000, paginate=True): + custom_fields=None, offset=0, limit=1000, paginate=True): filter_func = functools.partial( self.__filter_func, begin, end, filters, metric_types) docs = list(filter(filter_func, self._docs)) diff --git a/cloudkitty/tests/storage/v2/influx_utils.py b/cloudkitty/tests/storage/v2/influx_utils.py index de99c7ec..76fce87a 100644 --- a/cloudkitty/tests/storage/v2/influx_utils.py +++ b/cloudkitty/tests/storage/v2/influx_utils.py @@ -89,7 +89,8 @@ class FakeInfluxClient(InfluxClient): series.append(target_serie) return target_serie - def get_total(self, types, begin, end, groupby=None, filters=None): + def get_total(self, types, begin, end, custom_fields, groupby=None, + filters=None): total = copy.deepcopy(self.total_sample) series = [] diff --git a/cloudkitty/tests/storage/v2/test_storage_unit.py b/cloudkitty/tests/storage/v2/test_storage_unit.py index 3b91619d..8957582d 100644 --- a/cloudkitty/tests/storage/v2/test_storage_unit.py +++ b/cloudkitty/tests/storage/v2/test_storage_unit.py @@ -101,7 +101,8 @@ class StorageUnitTest(TestCase): self.assertEqual(len(total['results']), expected_total_len) self.assertEqual(total['total'], expected_total_len) - returned_total = round(sum(r['rate'] for r in total['results']), 5) + returned_total = round( + sum(r.get('rate', r.get('price')) for r in total['results']), 5) self.assertLessEqual( abs(expected_total - float(returned_total)), 0.00001) @@ -189,14 +190,18 @@ class StorageUnitTest(TestCase): total['results'].sort(key=lambda x: x['project_id'], reverse=True) + first_element = total['results'][0] self.assertLessEqual( - abs(round(float(total['results'][0]['rate']) - - expected_total_first, 5)), + abs(round( + float(first_element.get('rate', first_element.get('price'))) + - expected_total_first, 5)), 0.00001, ) + second_element = total['results'][1] self.assertLessEqual( - abs(round(float(total['results'][1]['rate']) - - expected_total_second, 5)), + abs(round( + float(second_element.get('rate', second_element.get('price'))) + - expected_total_second, 5)), 0.00001, ) self.assertLessEqual( @@ -228,14 +233,17 @@ class StorageUnitTest(TestCase): total['results'].sort(key=lambda x: x['project_id'], reverse=True) + first_entry = total['results'][0] + second_entry = total['results'][1] self.assertLessEqual( - abs(round(float(total['results'][0]['rate']) + abs(round(float(first_entry.get('rate', first_entry.get('price'))) - expected_total_first, 5)), 0.00001, ) self.assertLessEqual( - abs(round(float(total['results'][1]['rate']) - - expected_total_second, 5)), + abs(round( + float(second_entry.get('rate', second_entry.get('price'))) + - expected_total_second, 5)), 0.00001, ) self.assertLessEqual( diff --git a/doc/source/api-reference/v2/summary/summary.inc b/doc/source/api-reference/v2/summary/summary.inc index 99ed7fde..84e71246 100644 --- a/doc/source/api-reference/v2/summary/summary.inc +++ b/doc/source/api-reference/v2/summary/summary.inc @@ -17,6 +17,7 @@ Get a rating summary for one or several tenants. - end: end - groupby: groupby - filters: filters + - custom_fields: custom_fields Status codes ------------ diff --git a/doc/source/api-reference/v2/summary/summary_parameters.yml b/doc/source/api-reference/v2/summary/summary_parameters.yml index f0ebb0a3..f4fa8d5a 100644 --- a/doc/source/api-reference/v2/summary/summary_parameters.yml +++ b/doc/source/api-reference/v2/summary/summary_parameters.yml @@ -5,6 +5,30 @@ begin: &begin type: iso8601 timestamp required: false +custom_fields: + in: query + description: | + Optional attributes to customize the summary GET API response. When + using this parameter, users can create custom reports. The default + behavior is to list the sum of the quantity and the sum of the price, + which is projected as ``rate`` field. The default value for the + ``custom_fields`` parameter is ``SUM(qty) AS qty, SUM(price) AS rate``. + One can customize this field as they wish with InfluxDB queries. The + following statements ``"select", "from", "drop", "delete", "create", + "alter", "insert", "update"`` are not allowed though. For instance, if + one wants to retrieve the quantity field as the last value of the + quantity, and not the sum (this is quite interesting when generating + reports for storage values), the user can send the parameter as ``last(qty) + AS qty, SUM(price) AS rate``. To discover all possible fields that one + can work with, the user can also use ``*`` as a parameter. + + ``Currently this feature only works for Influx storage backend.`` It + (the feature) depends on the storage backend driver to support it. If + the user tries to set this configuration while using other storage + backends, it will be ignored. + type: list of strings + required: false + end: &end in: query description: |