Browse Source

Meters GET request implemented.

Meters GET request implemented and its unit test added.

Added ceilometer API file.

Copy the Meter POST request code from metrics. Ceilometer specific
feature not implemented yet.

Added meter validator to validate Ceilometer meter format for POST.

Change-Id: I4e095348729f32e63c478559a9bc5a90a29663a5
xiaotan2 3 years ago
parent
commit
7c0766d455

+ 2
- 0
AUTHORS View File

@@ -1,4 +1,6 @@
1
+Andreas Jaeger <aj@suse.com>
1 2
 Chang-Yi Lee <cy.l@inwinstack.com>
2 3
 Jiaming Lin <robin890650@gmail.com>
3 4
 Tong Li <litong01@us.ibm.com>
4 5
 spzala <spzala@us.ibm.com>
6
+xiaotan2 <xiaotan2@uw.edu>

+ 28
- 6
ChangeLog View File

@@ -1,7 +1,29 @@
1
-kiloeyes (1.0)
1
+CHANGES
2
+=======
2 3
 
3
-    * Initial project setup
4
-      Choose framework of wsgiref, pasteDeploy, falcon.
5
-      The server will be wsgiref server like any other OpenStack server
6
-      Use PasteDeploy to allow WSGI pipelines
7
-      Use Falcon framework to implement ReSTful API services
4
+* Meters GET request implemented
5
+* Added more instructions on how to configure keystone middleware
6
+* move up one more dependencies
7
+* Move to the same level of dependencies as other openstack project
8
+* Added more functions for the vagrant sub project
9
+* Move out the binary files folder out of the project
10
+* Restructure the vagrant scripts and fix a dependency error
11
+* Changed vagrant file so that installation can be easier
12
+* Remove sphinx requires from test-requirements
13
+* Reformat the readme.MD file and update the listOpt in kafka_conn.py
14
+* Make sure that the index field mapping is correct
15
+* ES now returns timestamp as milliseconds vs seconds
16
+* enable bulk message post on persister
17
+* Added more instructions
18
+* bulk insert can not be done implicitly
19
+* fix the partitions data type error
20
+* Updated the installation instructions
21
+* added more instruction on how to create an all-in-one kiloeyes
22
+* unit test passed for py27
23
+* Add Vagrant sample file to ease development environment bootstrap
24
+* Make the server more flexible with configuration files
25
+* remove old openstack incubator project reference
26
+* remove old oslo.config and use new oslo_config
27
+* Make minor modifications in the README
28
+* seeding the project
29
+* Added .gitreview

+ 2
- 1
etc/kiloeyes.conf View File

@@ -11,6 +11,7 @@ dispatcher = versions
11 11
 dispatcher = alarmdefinitions
12 12
 dispatcher = notificationmethods
13 13
 dispatcher = alarms
14
+dispatcher = meters
14 15
 
15 16
 [metrics]
16 17
 topic = metrics
@@ -78,6 +79,6 @@ compact = False
78 79
 partitions = 0
79 80
 
80 81
 [es_conn]
81
-uri = http://localhost:9200
82
+uri = http://128.84.105.102:9200
82 83
 time_id = timestamp
83 84
 drop_data = False

+ 4
- 1
etc/kiloeyes.ini View File

@@ -14,9 +14,12 @@ use = egg: kiloeyes#login
14 14
 [filter:inspector]
15 15
 use = egg: kiloeyes#inspector
16 16
 
17
-[filter:validator]
17
+[filter:metric_validator]
18 18
 use = egg: kiloeyes#metric_validator
19 19
 
20
+[filter:meter_validator]
21
+use = egg: kiloeyes#meter_validator
22
+
20 23
 [server:main]
21 24
 use = egg:gunicorn#main
22 25
 host = 0.0.0.0

+ 34
- 0
kiloeyes/api/ceilometer_api_v2.py View File

@@ -0,0 +1,34 @@
1
+# Copyright 2013 IBM Corp
2
+##
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+from kiloeyes.common import resource_api
16
+from oslo_log import log
17
+
18
+LOG = log.getLogger(__name__)
19
+
20
+
21
+# Ceilometer V2 API
22
+class V2API(object):
23
+    def __init__(self, global_conf):
24
+        LOG.debug('initializing V2API!')
25
+        self.global_conf = global_conf
26
+
27
+    # Meter APIs
28
+    @resource_api.Restify('/v2.0/meters', method='get')
29
+    def get_meters(self, req, res):
30
+        res.status = '501 Not Implemented'
31
+
32
+    @resource_api.Restify('/v2.0/meters', method='post')
33
+    def post_meters(self, req, res):
34
+        res.status = '501 Not Implemented'

+ 133
- 0
kiloeyes/middleware/meter_validator.py View File

@@ -0,0 +1,133 @@
1
+# Copyright 2013 IBM Corp
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+
16
+import datetime
17
+import StringIO
18
+try:
19
+    import ujson as json
20
+except ImportError:
21
+    import json
22
+
23
+
24
+class MeterValidator(object):
25
+    """middleware that validate the meter input stream.
26
+
27
+    This middleware checks if the input stream actually follows meter spec
28
+    and all the messages in the request has valid meter data. If the body
29
+    is valid json and compliant with the spec, then the request will forward
30
+    the request to the next in the pipeline, otherwise, it will reject the
31
+    request with response code of 400 or 406.
32
+    """
33
+    def __init__(self, app, conf):
34
+        self.app = app
35
+        self.conf = conf
36
+
37
+    def _is_valid_meter(self, meter):
38
+        """Validate a message
39
+
40
+        According to the Ceilometer OldSample, the external message format is
41
+        {
42
+            "counter_name": "instance",
43
+            "counter_type": "gauge",
44
+            "counter_unit": "instance",
45
+            "counter_volume": 1.0,
46
+            "message_id": "5460acce-4fd6-480d-ab18-9735ec7b1996",
47
+            "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
48
+            "recorded_at": "2016-04-21T00:07:20.174109",
49
+            "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
50
+            "resource_metadata": {
51
+                "name1": "value1",
52
+                "name2": "value2"
53
+            },
54
+            "source": "openstack",
55
+            "timestamp": "2016-04-21T00:07:20.174114",
56
+            "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff"
57
+        }
58
+
59
+        Once this is validated, the message needs to be transformed into
60
+        the following internal format:
61
+
62
+        The current valid message format is as follows (interna):
63
+        {
64
+            "meter": {"something": "The meter as a JSON object"},
65
+            "meta": {
66
+                "tenantId": "the tenant ID acquired",
67
+                "region": "the region that the metric was submitted under",
68
+            },
69
+            "creation_time": "the time when the API received the metric",
70
+        }
71
+        """
72
+        if (meter.get('counter_name') and meter.get('counter_volume') and
73
+            meter.get('message_id') and meter.get('project_id') and
74
+            meter.get('source') and meter.get('timestamp') and
75
+                meter.get('user_id')):
76
+            return True
77
+        else:
78
+            return False
79
+
80
+    def __call__(self, env, start_response):
81
+        # if request starts with /datapoints/, then let it go on.
82
+        # this login middle
83
+        if (env.get('PATH_INFO', '').startswith('/v2.0/meters') and
84
+                env.get('REQUEST_METHOD', '') == 'POST'):
85
+            # We only check the requests which are posting against meters
86
+            # endpoint
87
+            try:
88
+                body = env['wsgi.input'].read()
89
+                meters = json.loads(body)
90
+                # Do business logic validation here.
91
+                is_valid = True
92
+                if isinstance(meters, list):
93
+                    for meter in meters:
94
+                        if not self._is_valid_meter(meter):
95
+                            is_valid = False
96
+                            break
97
+                else:
98
+                    is_valid = self._is_valid_meter(meters)
99
+
100
+                if is_valid:
101
+                    # If the message is valid, then wrap it into this internal
102
+                    # format. The tenantId should be available from the
103
+                    # request since this should have been authenticated.
104
+                    # ideally this transformation should be done somewhere
105
+                    # else. For the sake of simplicity, do the simple one
106
+                    # here to make the life a bit easier.
107
+
108
+                    # TODO(HP) Add logic to get region id from request header
109
+                    # HTTP_X_SERVICE_CATALOG, then find endpoints, then region
110
+                    region_id = None
111
+                    msg = {'meter': meters,
112
+                           'meta': {'tenantId': env.get('HTTP_X_PROJECT_ID'),
113
+                                    'region': region_id},
114
+                           'creation_time': datetime.datetime.now()}
115
+                    env['wsgi.input'] = StringIO.StringIO(json.dumps(msg))
116
+                    return self.app(env, start_response)
117
+            except Exception:
118
+                pass
119
+            # It is either invalid or exceptioned out while parsing json
120
+            # we will send the request back with 400.
121
+            start_response("400 Bad Request", [], '')
122
+            return []
123
+        else:
124
+            # not a metric post request, move on.
125
+            return self.app(env, start_response)
126
+
127
+
128
+def filter_factory(global_conf, **local_conf):
129
+
130
+    def validator_filter(app):
131
+        return MeterValidator(app, local_conf)
132
+
133
+    return validator_filter

+ 0
- 4
kiloeyes/middleware/metric_validator.py View File

@@ -23,7 +23,6 @@ except ImportError:
23 23
 
24 24
 class MetricValidator(object):
25 25
     """middleware that validate the metric input stream.
26
-
27 26
     This middleware checks if the input stream actually follows metric spec
28 27
     and all the messages in the request has valid metric data. If the body
29 28
     is valid json and compliant with the spec, then the request will forward
@@ -36,7 +35,6 @@ class MetricValidator(object):
36 35
 
37 36
     def _is_valid_metric(self, metric):
38 37
         """Validate a message
39
-
40 38
         The external message format is
41 39
         {
42 40
            "name":"name1",
@@ -47,10 +45,8 @@ class MetricValidator(object):
47 45
            "timestamp":1405630174,
48 46
            "value":1.0
49 47
         }
50
-
51 48
         Once this is validated, the message needs to be transformed into
52 49
         the following internal format:
53
-
54 50
         The current valid message format is as follows (interna):
55 51
         {
56 52
             "metric": {"something": "The metric as a JSON object"},

+ 147
- 0
kiloeyes/tests/v2/elasticsearch/test_meters.py View File

@@ -0,0 +1,147 @@
1
+# Copyright 2013 IBM Corp
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+import falcon
16
+import mock
17
+from oslo_config import fixture as fixture_config
18
+from oslotest import base
19
+import requests
20
+
21
+from kiloeyes.common import kafka_conn
22
+from kiloeyes.v2.elasticsearch import meters
23
+
24
+try:
25
+    import ujson as json
26
+except ImportError:
27
+    import json
28
+
29
+
30
+class TestMeterDispatcher(base.BaseTestCase):
31
+
32
+    def setUp(self):
33
+        super(TestMeterDispatcher, self).setUp()
34
+        self.CONF = self.useFixture(fixture_config.Config()).conf
35
+        self.CONF.set_override('uri', 'fake_url', group='kafka_opts')
36
+        self.CONF.set_override('topic', 'fake', group='meters')
37
+        self.CONF.set_override('doc_type', 'fake', group='meters')
38
+        self.CONF.set_override('index_prefix', 'also_fake', group='meters')
39
+        self.CONF.set_override('index_template', 'etc/metrics.template',
40
+                               group='meters')
41
+        self.CONF.set_override('uri', 'http://fake_es_uri', group='es_conn')
42
+
43
+        res = mock.Mock()
44
+        res.status_code = 200
45
+        res.json.return_value = {"data": {"mappings": {"fake": {
46
+            "properties": {
47
+                "dimensions": {"properties": {
48
+                    "key1": {"type": "long"}, "key2": {"type": "long"},
49
+                    "rkey0": {"type": "long"}, "rkey1": {"type": "long"},
50
+                    "rkey2": {"type": "long"}, "rkey3": {"type": "long"}}},
51
+                "name": {"type": "string", "index": "not_analyzed"},
52
+                "timestamp": {"type": "string", "index": "not_analyzed"},
53
+                "value": {"type": "double"}}}}}}
54
+        put_res = mock.Mock()
55
+        put_res.status_code = '200'
56
+        with mock.patch.object(requests, 'get',
57
+                               return_value=res):
58
+            with mock.patch.object(requests, 'put', return_value=put_res):
59
+                self.dispatcher = meters.MeterDispatcher({})
60
+
61
+    def test_initialization(self):
62
+        # test that the kafka connection uri should be 'fake' as it was passed
63
+        # in from configuration
64
+        self.assertEqual(self.dispatcher._kafka_conn.uri, 'fake_url')
65
+
66
+        # test that the topic is meters as it was passed into dispatcher
67
+        self.assertEqual(self.dispatcher._kafka_conn.topic, 'fake')
68
+
69
+        # test that the doc type of the es connection is fake
70
+        self.assertEqual(self.dispatcher._es_conn.doc_type, 'fake')
71
+
72
+        self.assertEqual(self.dispatcher._es_conn.uri, 'http://fake_es_uri/')
73
+
74
+        # test that the query url is correctly formed
75
+        self.assertEqual(self.dispatcher._query_url, (
76
+            'http://fake_es_uri/also_fake*/fake/_search?search_type=count'))
77
+
78
+    def test_post_data(self):
79
+        with mock.patch.object(kafka_conn.KafkaConnection, 'send_messages',
80
+                               return_value=204):
81
+            res = mock.Mock()
82
+            self.dispatcher.post_data(mock.Mock(), res)
83
+
84
+        # test that the response code is 204
85
+        self.assertEqual(getattr(falcon, 'HTTP_204'), res.status)
86
+
87
+        with mock.patch.object(kafka_conn.KafkaConnection, 'send_messages',
88
+                               return_value=400):
89
+            res = mock.Mock()
90
+            self.dispatcher.post_data(mock.Mock(), res)
91
+
92
+        # test that the response code is 204
93
+        self.assertEqual(getattr(falcon, 'HTTP_400'), res.status)
94
+
95
+    def test_get_meters(self):
96
+        res = mock.Mock()
97
+        req = mock.Mock()
98
+
99
+        def _side_effect(arg):
100
+            if arg == 'name':
101
+                return 'tongli'
102
+            elif arg == 'dimensions':
103
+                return 'key1:100, key2:200'
104
+        req.get_param.side_effect = _side_effect
105
+
106
+        req_result = mock.Mock()
107
+        response_str = """
108
+        {"aggregations":{"by_name":{"doc_count_error_upper_bound":0,
109
+        "sum_other_doc_count":0,"buckets":[{"key":"BABMGD","doc_count":300,
110
+        "by_dim":{"buckets":[{"key": "64e6ce08b3b8547b7c32e5cfa5b7d81f",
111
+        "doc_count":300,"meters":{"hits":{"hits":[{ "_type": "metrics",
112
+        "_id": "AVOziWmP6-pxt0dRmr7j", "_index": "data_20160401000000",
113
+        "_source":{"name":"BABMGD", "value": 4,
114
+        "timestamp": 1461337094000,
115
+        "dimensions_hash": "0afdb86f508962bb5d8af52df07ef35a",
116
+        "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
117
+        "tenant_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
118
+        "user_agent": "openstack", "dimensions": null,
119
+        "user": "admin", "value_meta": null, "tenant": "admin",
120
+        "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff"}}]}}}]}}]}}}
121
+        """
122
+
123
+        req_result.json.return_value = json.loads(response_str)
124
+        req_result.status_code = 200
125
+
126
+        with mock.patch.object(requests, 'post', return_value=req_result):
127
+            self.dispatcher.get_meter(req, res)
128
+
129
+        # test that the response code is 200
130
+        self.assertEqual(res.status, getattr(falcon, 'HTTP_200'))
131
+        obj = json.loads(res.body)
132
+        self.assertEqual(obj[0]['name'], 'BABMGD')
133
+        self.assertEqual(obj[0]['meter_id'], 'AVOziWmP6-pxt0dRmr7j')
134
+        self.assertEqual(obj[0]['type'], 'metrics')
135
+        self.assertEqual(obj[0]['user_id'],
136
+                         'efd87807-12d2-4b38-9c70-5f5c2ac427ff')
137
+        self.assertEqual(obj[0]['project_id'],
138
+                         '35b17138-b364-4e6a-a131-8f3099c5be68')
139
+        self.assertEqual(len(obj), 1)
140
+
141
+    def test_post_meters(self):
142
+        with mock.patch.object(kafka_conn.KafkaConnection, 'send_messages',
143
+                               return_value=204):
144
+            res = mock.Mock()
145
+            self.dispatcher.post_meters(mock.Mock(), res)
146
+
147
+        self.assertEqual(getattr(falcon, 'HTTP_204'), res.status)

+ 208
- 0
kiloeyes/v2/elasticsearch/meters.py View File

@@ -0,0 +1,208 @@
1
+# Copyright 2013 IBM Corp
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+import datetime
16
+import falcon
17
+from oslo_config import cfg
18
+from oslo_log import log
19
+import requests
20
+from stevedore import driver
21
+
22
+from kiloeyes.common import es_conn
23
+from kiloeyes.common import kafka_conn
24
+from kiloeyes.common import namespace
25
+from kiloeyes.common import resource_api
26
+from kiloeyes.v2.elasticsearch import metrics
27
+
28
+try:
29
+    import ujson as json
30
+except ImportError:
31
+    import json
32
+
33
+METERS_OPTS = [
34
+    cfg.StrOpt('topic', default='metrics',
35
+               help='The topic that meters will be published to.'),
36
+    cfg.StrOpt('doc_type', default='metrics',
37
+               help='The doc type that meters will be saved into.'),
38
+    cfg.StrOpt('index_strategy', default='fixed',
39
+               help='The index strategy used to create index name.'),
40
+    cfg.StrOpt('index_prefix', default='data_',
41
+               help='The index prefix where meters were saved to.'),
42
+    cfg.StrOpt('index_template', default='/etc/kiloeyes/metrics.template',
43
+               help='The index template which meters index should use.'),
44
+    cfg.IntOpt('size', default=10000,
45
+               help=('The query result limit. Any result set more than '
46
+                     'the limit will be discarded. To see all the matching '
47
+                     'result, narrow your search by using a small time '
48
+                     'window or strong matching name')),
49
+]
50
+
51
+cfg.CONF.register_opts(METERS_OPTS, group="meters")
52
+
53
+LOG = log.getLogger(__name__)
54
+
55
+UPDATED = str(datetime.datetime(2014, 1, 1, 0, 0, 0))
56
+
57
+
58
+class MeterDispatcher(object):
59
+    def __init__(self, global_conf):
60
+        LOG.debug('initializing V2API!')
61
+        super(MeterDispatcher, self).__init__()
62
+        self.topic = cfg.CONF.meters.topic
63
+        self.doc_type = cfg.CONF.meters.doc_type
64
+        self.index_template = cfg.CONF.meters.index_template
65
+        self.size = cfg.CONF.meters.size
66
+        self._kafka_conn = kafka_conn.KafkaConnection(self.topic)
67
+
68
+        # load index strategy
69
+        if cfg.CONF.meters.index_strategy:
70
+            self.index_strategy = driver.DriverManager(
71
+                namespace.STRATEGY_NS,
72
+                cfg.CONF.meters.index_strategy,
73
+                invoke_on_load=True,
74
+                invoke_kwds={}).driver
75
+            LOG.debug(dir(self.index_strategy))
76
+        else:
77
+            self.index_strategy = None
78
+
79
+        self.index_prefix = cfg.CONF.meters.index_prefix
80
+
81
+        self._es_conn = es_conn.ESConnection(
82
+            self.doc_type, self.index_strategy, self.index_prefix)
83
+
84
+        # Setup the get meters query body pattern
85
+        self._query_body = {
86
+            "query": {"bool": {"must": []}},
87
+            "size": self.size}
88
+
89
+        self._aggs_body = {}
90
+        self._stats_body = {}
91
+        self._sort_clause = []
92
+
93
+        # Setup the get meters query url, the url should be similar to this:
94
+        # http://host:port/data_20141201/meters/_search
95
+        # the url should be made of es_conn uri, the index prefix, meters
96
+        # dispatcher topic, then add the key word _search.
97
+        self._query_url = ''.join([self._es_conn.uri,
98
+                                  self._es_conn.index_prefix, '*/',
99
+                                  cfg.CONF.meters.topic,
100
+                                  '/_search?search_type=count'])
101
+
102
+        # Setup meters query aggregation command. To see the structure of
103
+        # the aggregation, copy and paste it to a json formatter.
104
+        self._meters_agg = """
105
+        {"by_name":{"terms":{"field":"name","size":%(size)d},
106
+        "aggs":{"by_dim":{"terms":{"field":"dimensions_hash","size":%(size)d},
107
+        "aggs":{"meters":{"top_hits":{"_source":{"exclude":
108
+        ["dimensions_hash","timestamp","value"]},"size":1}}}}}}}
109
+        """
110
+
111
+        self.setup_index_template()
112
+
113
+    def setup_index_template(self):
114
+        status = '400'
115
+        with open(self.index_template) as template_file:
116
+            template_path = ''.join([self._es_conn.uri,
117
+                                     '/_template/metrics'])
118
+            es_res = requests.put(template_path, data=template_file.read())
119
+            status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
120
+
121
+        if status == '400':
122
+            LOG.error('Metrics template can not be created. Status code %s'
123
+                      % status)
124
+            exit(1)
125
+        else:
126
+            LOG.debug('Index template set successfully! Status %s' % status)
127
+
128
+    def post_data(self, req, res):
129
+        msg = ""
130
+        LOG.debug('@$Post Message is %s' % msg)
131
+        LOG.debug('Getting the call.')
132
+        msg = req.stream.read()
133
+
134
+        code = self._kafka_conn.send_messages(msg)
135
+        res.status = getattr(falcon, 'HTTP_' + str(code))
136
+
137
+    def _get_agg_response(self, res):
138
+        if res and res.status_code == 200:
139
+            obj = res.json()
140
+            if obj:
141
+                return obj.get('aggregations')
142
+            return None
143
+        else:
144
+            return None
145
+
146
+    @resource_api.Restify('/v2.0/meters', method='get')
147
+    def get_meter(self, req, res):
148
+        LOG.debug('The meters GET request is received')
149
+
150
+        # process query condition
151
+        query = []
152
+        metrics.ParamUtil.common(req, query)
153
+        _meters_ag = self._meters_agg % {"size": self.size}
154
+        if query:
155
+            body = ('{"query":{"bool":{"must":' + json.dumps(query) + '}},'
156
+                    '"size":' + str(self.size) + ','
157
+                    '"aggs":' + _meters_ag + '}')
158
+        else:
159
+            body = '{"aggs":' + _meters_ag + '}'
160
+
161
+        LOG.debug('Request body:' + body)
162
+        LOG.debug('Request url:' + self._query_url)
163
+        es_res = requests.post(self._query_url, data=body)
164
+        res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
165
+
166
+        LOG.debug('Query to ElasticSearch returned: %s' % es_res.status_code)
167
+        res_data = self._get_agg_response(es_res)
168
+        if res_data:
169
+            # convert the response into ceilometer meter format
170
+            aggs = res_data['by_name']['buckets']
171
+            flag = {'is_first': True}
172
+
173
+            def _render_hits(item):
174
+                _id = item['meters']['hits']['hits'][0]['_id']
175
+                _type = item['meters']['hits']['hits'][0]['_type']
176
+                _source = item['meters']['hits']['hits'][0]['_source']
177
+                rslt = ('{"meter_id":' + json.dumps(_id) + ','
178
+                        '"name":' + json.dumps(_source['name']) + ','
179
+                        '"project_id":' +
180
+                        json.dumps(_source['project_id']) + ','
181
+                        '"resource_id":' +
182
+                        json.dumps(_source['tenant_id']) + ','
183
+                        '"source":' + json.dumps(_source['user_agent']) + ','
184
+                        '"type":' + json.dumps(_type) + ','
185
+                        '"unit":null,'
186
+                        '"user_id":' + json.dumps(_source['user_id']) + '}')
187
+                if flag['is_first']:
188
+                    flag['is_first'] = False
189
+                    return rslt
190
+                else:
191
+                    return ',' + rslt
192
+
193
+            def _make_body(buckets):
194
+                yield '['
195
+                for by_name in buckets:
196
+                    if by_name['by_dim']:
197
+                        for by_dim in by_name['by_dim']['buckets']:
198
+                            yield _render_hits(by_dim)
199
+                yield ']'
200
+
201
+            res.body = ''.join(_make_body(aggs))
202
+            res.content_type = 'application/json;charset=utf-8'
203
+        else:
204
+            res.body = ''
205
+
206
+    @resource_api.Restify('/v2.0/meters/', method='post')
207
+    def post_meters(self, req, res):
208
+        self.post_data(req, res)

+ 2
- 0
setup.cfg View File

@@ -50,6 +50,7 @@ kiloeyes.dispatcher =
50 50
     alarmdefinitions = kiloeyes.v2.elasticsearch.alarmdefinitions:AlarmDefinitionDispatcher
51 51
     notificationmethods = kiloeyes.v2.elasticsearch.notificationmethods:NotificationMethodDispatcher
52 52
 	alarms = kiloeyes.v2.elasticsearch.alarms:AlarmDispatcher
53
+    meters = kiloeyes.v2.elasticsearch.meters:MeterDispatcher
53 54
 
54 55
 kiloeyes.index.strategy =
55 56
     timed = kiloeyes.microservice.timed_strategy:TimedStrategy
@@ -64,6 +65,7 @@ paste.filter_factory =
64 65
     login = kiloeyes.middleware.login:filter_factory
65 66
     inspector = kiloeyes.middleware.inspector:filter_factory
66 67
     metric_validator = kiloeyes.middleware.metric_validator:filter_factory
68
+    meter_validator = kiloeyes.middleware.meter_validator:filter_factory
67 69
 
68 70
 [pbr]
69 71
 warnerrors = True

Loading…
Cancel
Save