Remove events storage and API

This now has been moved to Panko.

Change-Id: I179eb0d436752e3bb8abaed714664cf74f5615e6
This commit is contained in:
Julien Danjou 2016-12-12 19:06:30 +01:00
parent 407b726fc2
commit 8d23f431ab
49 changed files with 111 additions and 4431 deletions

View File

@ -41,8 +41,6 @@ class Capabilities(base.Base):
"A flattened dictionary of API capabilities" "A flattened dictionary of API capabilities"
storage = {wtypes.text: bool} storage = {wtypes.text: bool}
"A flattened dictionary of storage capabilities" "A flattened dictionary of storage capabilities"
event_storage = {wtypes.text: bool}
"A flattened dictionary of event storage capabilities"
@classmethod @classmethod
def sample(cls): def sample(cls):
@ -68,12 +66,9 @@ class Capabilities(base.Base):
'stddev': True, 'stddev': True,
'cardinality': True, 'cardinality': True,
'quartile': False}}}, 'quartile': False}}},
'events': {'query': {'simple': True}},
}), }),
storage=_flatten_capabilities( storage=_flatten_capabilities(
{'storage': {'production_ready': True}}), {'storage': {'production_ready': True}}),
event_storage=_flatten_capabilities(
{'storage': {'production_ready': True}}),
) )
@ -89,12 +84,7 @@ class CapabilitiesController(rest.RestController):
# variation in API capabilities is effectively determined by # variation in API capabilities is effectively determined by
# the lack of strict feature parity across storage drivers # the lack of strict feature parity across storage drivers
conn = pecan.request.storage_conn conn = pecan.request.storage_conn
event_conn = pecan.request.event_storage_conn
driver_capabilities = conn.get_capabilities().copy() driver_capabilities = conn.get_capabilities().copy()
driver_capabilities['events'] = event_conn.get_capabilities()['events']
driver_perf = conn.get_storage_capabilities() driver_perf = conn.get_storage_capabilities()
event_driver_perf = event_conn.get_storage_capabilities()
return Capabilities(api=_flatten_capabilities(driver_capabilities), return Capabilities(api=_flatten_capabilities(driver_capabilities),
storage=_flatten_capabilities(driver_perf), storage=_flatten_capabilities(driver_perf))
event_storage=_flatten_capabilities(
event_driver_perf))

View File

@ -1,327 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_log import log
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.api import rbac
from ceilometer.event import storage
from ceilometer.event.storage import models as event_models
from ceilometer.i18n import _, _LE
LOG = log.getLogger(__name__)
class TraitDescription(base.Base):
"""A description of a trait, with no associated value."""
type = wtypes.text
"the data type, defaults to string"
name = wtypes.text
"the name of the trait"
@classmethod
def sample(cls):
return cls(name='service',
type='string'
)
class EventQuery(base.Query):
"""Query arguments for Event Queries."""
_supported_types = ['integer', 'float', 'string', 'datetime']
type = wsme.wsattr(wtypes.text, default='string')
"the type of the trait filter, defaults to string"
def __repr__(self):
# for logging calls
return '<EventQuery %r %s %r %s>' % (self.field,
self.op,
self._get_value_as_type(),
self.type)
@classmethod
def sample(cls):
return cls(field="event_type",
type="string",
op="eq",
value="compute.instance.create.start")
class Trait(base.Base):
"""A Trait associated with an event."""
name = wtypes.text
"The name of the trait"
value = wtypes.text
"the value of the trait"
type = wtypes.text
"the type of the trait (string, integer, float or datetime)"
@staticmethod
def _convert_storage_trait(trait):
"""Helper method to convert a storage model into an API trait instance.
If an API trait instance is passed in, just return it.
"""
if isinstance(trait, Trait):
return trait
value = (six.text_type(trait.value)
if not trait.dtype == event_models.Trait.DATETIME_TYPE
else trait.value.isoformat())
trait_type = event_models.Trait.get_name_by_type(trait.dtype)
return Trait(name=trait.name, type=trait_type, value=value)
@classmethod
def sample(cls):
return cls(name='service',
type='string',
value='compute.hostname'
)
class Event(base.Base):
"""A System event."""
message_id = wtypes.text
"The message ID for the notification"
event_type = wtypes.text
"The type of the event"
_traits = None
def get_traits(self):
return self._traits
def set_traits(self, traits):
self._traits = map(Trait._convert_storage_trait, traits)
traits = wsme.wsproperty(wtypes.ArrayType(Trait),
get_traits,
set_traits)
"Event specific properties"
generated = datetime.datetime
"The time the event occurred"
raw = base.JsonType()
"The raw copy of notification"
@classmethod
def sample(cls):
return cls(
event_type='compute.instance.update',
generated=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0',
traits={
Trait(name='request_id',
value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'),
Trait(name='service',
value='conductor.tem-devstack-01'),
Trait(name='tenant_id',
value='7f13f2b17917463b9ee21aa92c4b36d6')
},
raw={'status': {'nested': 'started'}}
)
def _build_rbac_query_filters():
filters = {'t_filter': [], 'admin_proj': None}
# Returns user_id, proj_id for non-admins
user_id, proj_id = rbac.get_limited_to(pecan.request.headers)
# If non-admin, filter events by user and project
if user_id and proj_id:
filters['t_filter'].append({"key": "project_id", "string": proj_id,
"op": "eq"})
filters['t_filter'].append({"key": "user_id", "string": user_id,
"op": "eq"})
elif not user_id and not proj_id:
filters['admin_proj'] = pecan.request.headers.get('X-Project-Id')
return filters
def _event_query_to_event_filter(q):
evt_model_filter = {
'event_type': None,
'message_id': None,
'start_timestamp': None,
'end_timestamp': None
}
filters = _build_rbac_query_filters()
traits_filter = filters['t_filter']
admin_proj = filters['admin_proj']
for i in q:
if not i.op:
i.op = 'eq'
elif i.op not in base.operation_kind:
error = (_('Operator %(operator)s is not supported. The supported'
' operators are: %(supported)s') %
{'operator': i.op, 'supported': base.operation_kind})
raise base.ClientSideError(error)
if i.field in evt_model_filter:
if i.op != 'eq' and i.field in ('event_type', 'message_id'):
error = (_('Operator %(operator)s is not supported. Only'
' `eq\' operator is available for field'
' %(field)s') %
{'operator': i.op, 'field': i.field})
raise base.ClientSideError(error)
if i.op != 'ge' and i.field == 'start_timestamp':
error = (_('Operator %(operator)s is not supported. Only'
' `ge\' operator is available for field'
' %(field)s') %
{'operator': i.op, 'field': i.field})
raise base.ClientSideError(error)
if i.op != 'le' and i.field == 'end_timestamp':
error = (_('Operator %(operator)s is not supported. Only'
' `le\' operator is available for field'
' %(field)s') %
{'operator': i.op, 'field': i.field})
raise base.ClientSideError(error)
evt_model_filter[i.field] = i.value
else:
trait_type = i.type or 'string'
traits_filter.append({"key": i.field,
trait_type: i._get_value_as_type(),
"op": i.op})
return storage.EventFilter(traits_filter=traits_filter,
admin_proj=admin_proj, **evt_model_filter)
class TraitsController(rest.RestController):
"""Works on Event Traits."""
@v2_utils.requires_admin
@wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text)
def get_one(self, event_type, trait_name):
"""Return all instances of a trait for an event type.
:param event_type: Event type to filter traits by
:param trait_name: Trait to return values for
"""
LOG.debug("Getting traits for %s", event_type)
return [Trait._convert_storage_trait(t)
for t in pecan.request.event_storage_conn
.get_traits(event_type, trait_name)]
@v2_utils.requires_admin
@wsme_pecan.wsexpose([TraitDescription], wtypes.text)
def get_all(self, event_type):
"""Return all trait names for an event type.
:param event_type: Event type to filter traits by
"""
get_trait_name = event_models.Trait.get_name_by_type
return [TraitDescription(name=t['name'],
type=get_trait_name(t['data_type']))
for t in pecan.request.event_storage_conn
.get_trait_types(event_type)]
class EventTypesController(rest.RestController):
"""Works on Event Types in the system."""
traits = TraitsController()
@v2_utils.requires_admin
@wsme_pecan.wsexpose(None, wtypes.text)
def get_one(self, event_type):
"""Unused API, will always return 404.
:param event_type: A event type
"""
pecan.abort(404)
@v2_utils.requires_admin
@wsme_pecan.wsexpose([six.text_type])
def get_all(self):
"""Get all event types."""
return list(pecan.request.event_storage_conn.get_event_types())
class EventsController(rest.RestController):
"""Works on Events."""
@v2_utils.requires_context
@wsme_pecan.wsexpose([Event], [EventQuery], int)
def get_all(self, q=None, limit=None):
"""Return all events matching the query filters.
:param q: Filter arguments for which Events to return
:param limit: Maximum number of samples to be returned.
"""
rbac.enforce("events:index", pecan.request)
q = q or []
limit = v2_utils.enforce_limit(limit)
event_filter = _event_query_to_event_filter(q)
return [Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)
for event in
pecan.request.event_storage_conn.get_events(event_filter,
limit)]
@v2_utils.requires_context
@wsme_pecan.wsexpose(Event, wtypes.text)
def get_one(self, message_id):
"""Return a single event with the given message id.
:param message_id: Message ID of the Event to be returned
"""
rbac.enforce("events:show", pecan.request)
filters = _build_rbac_query_filters()
t_filter = filters['t_filter']
admin_proj = filters['admin_proj']
event_filter = storage.EventFilter(traits_filter=t_filter,
admin_proj=admin_proj,
message_id=message_id)
events = [event for event
in pecan.request.event_storage_conn.get_events(event_filter)]
if not events:
raise base.EntityNotFound(_("Event"), message_id)
if len(events) > 1:
LOG.error(_LE("More than one event with "
"id %s returned from storage driver") % message_id)
event = events[0]
return Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)

View File

@ -25,7 +25,6 @@ from oslo_utils import strutils
import pecan import pecan
from ceilometer.api.controllers.v2 import capabilities from ceilometer.api.controllers.v2 import capabilities
from ceilometer.api.controllers.v2 import events
from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import meters
from ceilometer.api.controllers.v2 import query from ceilometer.api.controllers.v2 import query
from ceilometer.api.controllers.v2 import resources from ceilometer.api.controllers.v2 import resources
@ -209,14 +208,10 @@ class V2Controller(object):
aodh_abort() aodh_abort()
elif kind == 'alarms' and self.aodh_url: elif kind == 'alarms' and self.aodh_url:
_redirect(self.aodh_url) _redirect(self.aodh_url)
elif kind == 'events': elif kind == 'events' and self.panko_url:
if self.panko_url:
return _redirect(self.panko_url) return _redirect(self.panko_url)
return events.EventsController(), remainder elif kind == 'event_types' and self.panko_url:
elif kind == 'event_types':
if self.panko_url:
return _redirect(self.panko_url) return _redirect(self.panko_url)
return events.EventTypesController(), remainder
else: else:
pecan.abort(404) pecan.abort(404)

View File

@ -45,27 +45,22 @@ class ConfigHook(hooks.PecanHook):
class DBHook(hooks.PecanHook): class DBHook(hooks.PecanHook):
def __init__(self, conf): def __init__(self, conf):
self.storage_connection = self.get_connection(conf, 'metering') self.storage_connection = self.get_connection(conf)
self.event_storage_connection = self.get_connection(conf, 'event')
if (not self.storage_connection if not self.storage_connection:
and not self.event_storage_connection): raise Exception(
raise Exception("Api failed to start. Failed to connect to " "API failed to start. Failed to connect to database")
"databases, purpose: %s" %
', '.join(['metering', 'event']))
def before(self, state): def before(self, state):
state.request.storage_conn = self.storage_connection state.request.storage_conn = self.storage_connection
state.request.event_storage_conn = self.event_storage_connection
@staticmethod @staticmethod
def get_connection(conf, purpose): def get_connection(conf):
try: try:
return storage.get_connection_from_config(conf, purpose) return storage.get_connection_from_config(conf)
except Exception as err: except Exception as err:
params = {"purpose": purpose, "err": err} LOG.exception(_LE("Failed to connect to db" "retry later: %s"),
LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " err)
"retry later: %(err)s") % params)
class NotifierHook(hooks.PecanHook): class NotifierHook(hooks.PecanHook):

View File

@ -33,9 +33,6 @@ def upgrade():
cfg.BoolOpt('skip-metering-database', cfg.BoolOpt('skip-metering-database',
help='Skip metering database upgrade.', help='Skip metering database upgrade.',
default=False), default=False),
cfg.BoolOpt('skip-event-database',
help='Skip event database upgrade.',
default=False),
cfg.BoolOpt('skip-gnocchi-resource-types', cfg.BoolOpt('skip-gnocchi-resource-types',
help='Skip gnocchi resource-types upgrade.', help='Skip gnocchi resource-types upgrade.',
default=False), default=False),
@ -46,13 +43,7 @@ def upgrade():
LOG.info("Skipping metering database upgrade") LOG.info("Skipping metering database upgrade")
else: else:
LOG.debug("Upgrading metering database") LOG.debug("Upgrading metering database")
storage.get_connection_from_config(conf, 'metering').upgrade() storage.get_connection_from_config(conf).upgrade()
if conf.skip_event_database:
LOG.info("Skipping event database upgrade")
else:
LOG.debug("Upgrading event database")
storage.get_connection_from_config(conf, 'event').upgrade()
if conf.skip_gnocchi_resource_types: if conf.skip_gnocchi_resource_types:
LOG.info("Skipping Gnocchi resource types upgrade") LOG.info("Skipping Gnocchi resource types upgrade")
@ -67,40 +58,32 @@ def expirer():
if conf.database.metering_time_to_live > 0: if conf.database.metering_time_to_live > 0:
LOG.debug("Clearing expired metering data") LOG.debug("Clearing expired metering data")
storage_conn = storage.get_connection_from_config(conf, 'metering') storage_conn = storage.get_connection_from_config(conf)
storage_conn.clear_expired_metering_data( storage_conn.clear_expired_metering_data(
conf.database.metering_time_to_live) conf.database.metering_time_to_live)
else: else:
LOG.info(_LI("Nothing to clean, database metering time to live " LOG.info(_LI("Nothing to clean, database metering time to live "
"is disabled")) "is disabled"))
if conf.database.event_time_to_live > 0:
LOG.debug("Clearing expired event data")
event_conn = storage.get_connection_from_config(conf, 'event')
event_conn.clear_expired_event_data(
conf.database.event_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database event time to live "
"is disabled"))
def db_clean_legacy(): def db_clean_legacy():
conf = cfg.ConfigOpts() conf = cfg.ConfigOpts()
conf.register_cli_opts([ conf.register_cli_opts([
cfg.strOpt('confirm-drop-alarm-table', cfg.strOpt('confirm-drop-table',
short='n', short='n',
help='confirm to drop the legacy alarm tables')]) help='confirm to drop the legacy tables')])
if not conf.confirm_drop_alarm_table: if not conf.confirm_drop_table:
confirm = moves.input("Do you really want to drop the legacy alarm " confirm = moves.input("Do you really want to drop the legacy "
"tables? This will destroy data definitely " "alarm and event tables? This will destroy "
"if it exist. Please type 'YES' to confirm: ") "data definitively if it exist. Please type "
"'YES' to confirm: ")
if confirm != 'YES': if confirm != 'YES':
print("DB legacy cleanup aborted!") print("DB legacy cleanup aborted!")
return return
service.prepare_service(conf=conf) service.prepare_service(conf=conf)
for purpose in ['metering', 'event']:
url = (getattr(conf.database, '%s_connection' % purpose) or url = (getattr(conf.database, "metering_connection") or
conf.database.connection) conf.database.connection)
parsed = urlparse.urlparse(url) parsed = urlparse.urlparse(url)
@ -110,20 +93,22 @@ def db_clean_legacy():
masked_url = urlparse.urlunparse(masked_url) masked_url = urlparse.urlunparse(masked_url)
else: else:
masked_url = url masked_url = url
LOG.info(_LI('Starting to drop alarm and alarm history tables in ' LOG.info(_LI('Starting to drop event, alarm and alarm history tables in '
'%(purpose)s backend: %(url)s'), { 'backend: %s'), masked_url)
'purpose': purpose, 'url': masked_url})
connection_scheme = parsed.scheme connection_scheme = parsed.scheme
conn = storage.get_connection_from_config(conf, purpose) conn = storage.get_connection_from_config(conf)
if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql', if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql',
'sqlite'): 'sqlite'):
engine = conn._engine_facade.get_engine() engine = conn._engine_facade.get_engine()
meta = sa.MetaData(bind=engine) meta = sa.MetaData(bind=engine)
for table_name in ['alarm', 'alarm_history']: for table_name in ('alarm', 'alarm_history',
'trait_text', 'trait_int',
'trait_float', 'trait_datetime',
'event', 'event_type'):
if engine.has_table(table_name): if engine.has_table(table_name):
alarm = sa.Table(table_name, meta, autoload=True) table = sa.Table(table_name, meta, autoload=True)
alarm.drop() table.drop()
LOG.info(_LI("Legacy %s table of SQL backend has been " LOG.info(_LI("Legacy %s table of SQL backend has been "
"dropped."), table_name) "dropped."), table_name)
else: else:
@ -133,8 +118,9 @@ def db_clean_legacy():
with conn.conn_pool.connection() as h_conn: with conn.conn_pool.connection() as h_conn:
tables = h_conn.tables() tables = h_conn.tables()
table_name_mapping = {'alarm': 'alarm', table_name_mapping = {'alarm': 'alarm',
'alarm_h': 'alarm history'} 'alarm_h': 'alarm history',
for table_name in ['alarm', 'alarm_h']: 'event': 'event'}
for table_name in ('alarm', 'alarm_h', 'event'):
try: try:
if table_name in tables: if table_name in tables:
h_conn.disable_table(table_name) h_conn.disable_table(table_name)
@ -150,11 +136,11 @@ def db_clean_legacy():
'tables of Hbase, %s'), e) 'tables of Hbase, %s'), e)
elif connection_scheme == 'mongodb': elif connection_scheme == 'mongodb':
for table_name in ['alarm', 'alarm_history']: for table_name in ('alarm', 'alarm_history', 'event'):
if table_name in conn.db.conn.collection_names(): if table_name in conn.db.conn.collection_names():
conn.db.conn.drop_collection(table_name) conn.db.conn.drop_collection(table_name)
LOG.info(_LI("Legacy %s table of Mongodb backend has been " LOG.info(_LI("Legacy %s table of Mongodb backend has been "
"dropped."), table_name) "dropped."), table_name)
else: else:
LOG.info(_LI('%s table does not exist.'), table_name) LOG.info(_LI('%s table does not exist.'), table_name)
LOG.info('Legacy alarm tables cleanup done.') LOG.info('Legacy alarm and event tables cleanup done.')

View File

@ -13,19 +13,17 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from debtcollector import removals
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
from ceilometer import dispatcher from ceilometer import dispatcher
from ceilometer.event.storage import models
from ceilometer.i18n import _LE from ceilometer.i18n import _LE
from ceilometer import storage from ceilometer import storage
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
class DatabaseDispatcher(dispatcher.Base): class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase):
"""Dispatcher class for recording metering data into database. """Dispatcher class for recording metering data into database.
The dispatcher class which records each meter into a database configured The dispatcher class which records each meter into a database configured
@ -36,21 +34,15 @@ class DatabaseDispatcher(dispatcher.Base):
[DEFAULT] [DEFAULT]
meter_dispatchers = database meter_dispatchers = database
event_dispatchers = database
""" """
@property @property
def conn(self): def conn(self):
if not hasattr(self, "_conn"): if not hasattr(self, "_conn"):
self._conn = storage.get_connection_from_config( self._conn = storage.get_connection_from_config(
self.conf, self.CONNECTION_TYPE) self.conf)
return self._conn return self._conn
class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase,
DatabaseDispatcher):
CONNECTION_TYPE = 'metering'
def record_metering_data(self, data): def record_metering_data(self, data):
# We may have receive only one counter on the wire # We may have receive only one counter on the wire
if not data: if not data:
@ -78,35 +70,3 @@ class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase,
LOG.error(_LE('Failed to record %(len)s: %(err)s.'), LOG.error(_LE('Failed to record %(len)s: %(err)s.'),
{'len': len(data), 'err': err}) {'len': len(data), 'err': err})
raise raise
@removals.removed_class("EventDatabaseDispatcher",
message="Use panko instead",
removal_version="8.0.0")
class EventDatabaseDispatcher(dispatcher.EventDispatcherBase,
DatabaseDispatcher):
CONNECTION_TYPE = 'event'
def record_events(self, events):
if not isinstance(events, list):
events = [events]
event_list = []
for ev in events:
try:
event_list.append(
models.Event(
message_id=ev['message_id'],
event_type=ev['event_type'],
generated=timeutils.normalize_time(
timeutils.parse_isotime(ev['generated'])),
traits=[models.Trait(
name, dtype,
models.Trait.convert_value(dtype, value))
for name, dtype, value in ev['traits']],
raw=ev.get('raw', {}))
)
except Exception:
LOG.exception(_LE("Error processing event and it will be "
"dropped: %s"), ev)
self.conn.record_events(event_list)

View File

@ -1,57 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ceilometer import utils
class EventFilter(object):
"""Properties for building an Event query.
:param start_timestamp: UTC start datetime (mandatory)
:param end_timestamp: UTC end datetime (mandatory)
:param event_type: the name of the event. None for all.
:param message_id: the message_id of the event. None for all.
:param admin_proj: the project_id of admin role. None if non-admin user.
:param traits_filter: the trait filter dicts, all of which are optional.
This parameter is a list of dictionaries that specify trait values:
.. code-block:: python
{'key': <key>,
'string': <value>,
'integer': <value>,
'datetime': <value>,
'float': <value>,
'op': <eq, lt, le, ne, gt or ge> }
"""
def __init__(self, start_timestamp=None, end_timestamp=None,
event_type=None, message_id=None, traits_filter=None,
admin_proj=None):
self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
self.message_id = message_id
self.event_type = event_type
self.traits_filter = traits_filter or []
self.admin_proj = admin_proj
def __repr__(self):
return ("<EventFilter(start_timestamp: %s,"
" end_timestamp: %s,"
" event_type: %s,"
" traits: %s)>" %
(self.start_timestamp,
self.end_timestamp,
self.event_type,
six.text_type(self.traits_filter)))

View File

@ -1,99 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ceilometer
class Connection(object):
"""Base class for event storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'events': {'query': {'simple': False}},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, conf, url):
self.conf = conf
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def record_events(events):
"""Write the events to the backend storage system.
:param events: a list of model.Event objects.
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_events(event_filter, limit=None):
"""Return an iterable of model.Event objects."""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_event_types():
"""Return all event types as an iterable of strings."""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_trait_types(event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are
returned.
:param event_type: the type of the Event
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_traits(event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
raise ceilometer.NotImplementedError('Clearing events not implemented')

View File

@ -1,288 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import elasticsearch as es
from elasticsearch import helpers
from oslo_log import log
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the event data into an ElasticSearch db.
Events in ElasticSearch are indexed by day and stored by event_type.
An example document::
{"_index":"events_2014-10-21",
"_type":"event_type0",
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
"_score":1.0,
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
"id3_0": 0.7510790937279408,
"id2_0": 5,
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
}
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
index_name = 'events'
# NOTE(gordc): mainly for testing, data is not searchable after write,
# it is only searchable after periodic refreshes.
_refresh_on_write = False
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
url_split = netutils.urlsplit(url)
self.conn = es.Elasticsearch(url_split.netloc)
def upgrade(self):
iclient = es.client.IndicesClient(self.conn)
ts_template = {
'template': '*',
'mappings': {'_default_':
{'_timestamp': {'enabled': True,
'store': True},
'properties': {'traits': {'type': 'nested'}}}}}
iclient.put_template(name='enable_timestamp', body=ts_template)
def record_events(self, events):
def _build_bulk_index(event_list):
for ev in event_list:
traits = {t.name: t.value for t in ev.traits}
yield {'_op_type': 'create',
'_index': '%s_%s' % (self.index_name,
ev.generated.date().isoformat()),
'_type': ev.event_type,
'_id': ev.message_id,
'_source': {'timestamp': ev.generated.isoformat(),
'traits': traits,
'raw': ev.raw}}
error = None
for ok, result in helpers.streaming_bulk(
self.conn, _build_bulk_index(events)):
if not ok:
__, result = result.popitem()
if result['status'] == 409:
LOG.info(_LI('Duplicate event detected, skipping it: %s'),
result)
else:
LOG.exception(_LE('Failed to record event: %s'), result)
error = storage.StorageUnknownWriteError(result)
if self._refresh_on_write:
self.conn.indices.refresh(index='%s_*' % self.index_name)
while self.conn.cluster.pending_tasks(local=True)['tasks']:
pass
if error:
raise error
def _make_dsl_from_filter(self, indices, ev_filter):
q_args = {}
filters = []
if ev_filter.start_timestamp:
filters.append({'range': {'timestamp':
{'ge': ev_filter.start_timestamp.isoformat()}}})
while indices[0] < (
'%s_%s' % (self.index_name,
ev_filter.start_timestamp.date().isoformat())):
del indices[0]
if ev_filter.end_timestamp:
filters.append({'range': {'timestamp':
{'le': ev_filter.end_timestamp.isoformat()}}})
while indices[-1] > (
'%s_%s' % (self.index_name,
ev_filter.end_timestamp.date().isoformat())):
del indices[-1]
q_args['index'] = indices
if ev_filter.event_type:
q_args['doc_type'] = ev_filter.event_type
if ev_filter.message_id:
filters.append({'term': {'_id': ev_filter.message_id}})
if ev_filter.traits_filter or ev_filter.admin_proj:
trait_filters = []
or_cond = []
for t_filter in ev_filter.traits_filter or []:
value = None
for val_type in ['integer', 'string', 'float', 'datetime']:
if t_filter.get(val_type):
value = t_filter.get(val_type)
if isinstance(value, six.string_types):
value = value.lower()
elif isinstance(value, datetime.datetime):
value = value.isoformat()
break
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
op = (t_filter.get('op').replace('ge', 'gte')
.replace('le', 'lte'))
trait_filters.append(
{'range': {t_filter['key']: {op: value}}})
else:
tf = {"query": {"query_string": {
"query": "%s: \"%s\"" % (t_filter['key'], value)}}}
if t_filter.get('op') == 'ne':
tf = {"not": tf}
trait_filters.append(tf)
if ev_filter.admin_proj:
or_cond = [{'missing': {'field': 'project_id'}},
{'term': {'project_id': ev_filter.admin_proj}}]
filters.append(
{'nested': {'path': 'traits', 'query': {'filtered': {
'filter': {'bool': {'must': trait_filters,
'should': or_cond}}}}}})
q_args['body'] = {'query': {'filtered':
{'filter': {'bool': {'must': filters}}}}}
return q_args
def get_events(self, event_filter, limit=None):
if limit == 0:
return
iclient = es.client.IndicesClient(self.conn)
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
if indices:
filter_args = self._make_dsl_from_filter(indices, event_filter)
if limit is not None:
filter_args['size'] = limit
results = self.conn.search(fields=['_id', 'timestamp',
'_type', '_source'],
sort='timestamp:asc',
**filter_args)
trait_mappings = {}
for record in results['hits']['hits']:
trait_list = []
if not record['_type'] in trait_mappings:
trait_mappings[record['_type']] = list(
self.get_trait_types(record['_type']))
for key in record['_source']['traits'].keys():
value = record['_source']['traits'][key]
for t_map in trait_mappings[record['_type']]:
if t_map['name'] == key:
dtype = t_map['data_type']
break
else:
dtype = models.Trait.TEXT_TYPE
trait_list.append(models.Trait(
name=key, dtype=dtype,
value=models.Trait.convert_value(dtype, value)))
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
record['_source']['timestamp']))
yield models.Event(message_id=record['_id'],
event_type=record['_type'],
generated=gen_ts,
traits=sorted(
trait_list,
key=operator.attrgetter('dtype')),
raw=record['_source']['raw'])
def get_event_types(self):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = set()
for index in es_mappings.keys():
for ev_type in es_mappings[index]['mappings'].keys():
seen_types.add(ev_type)
# TODO(gordc): tests assume sorted ordering but backends are not
# explicitly ordered.
# NOTE: _default_ is a type that appears in all mappings but is not
# real 'type'
seen_types.discard('_default_')
return sorted(list(seen_types))
@staticmethod
def _remap_es_types(d_type):
if d_type == 'string':
d_type = 'text'
elif d_type == 'long':
d_type = 'int'
elif d_type == 'double':
d_type = 'float'
elif d_type == 'date' or d_type == 'date_time':
d_type = 'datetime'
return d_type
def get_trait_types(self, event_type):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = []
for index in es_mappings.keys():
# if event_type exists in index and has traits
if (es_mappings[index]['mappings'].get(event_type) and
es_mappings[index]['mappings'][event_type]['properties']
['traits'].get('properties')):
for t_type in (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties'].keys()):
d_type = (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties']
[t_type]['type'])
d_type = models.Trait.get_type_by_name(
self._remap_es_types(d_type))
if (t_type, d_type) not in seen_types:
yield {'name': t_type, 'data_type': d_type}
seen_types.append((t_type, d_type))
def get_traits(self, event_type, trait_type=None):
t_types = dict((res['name'], res['data_type'])
for res in self.get_trait_types(event_type))
if not t_types or (trait_type and trait_type not in t_types.keys()):
return
result = self.conn.search('%s_*' % self.index_name, event_type)
for ev in result['hits']['hits']:
if trait_type and ev['_source']['traits'].get(trait_type):
yield models.Trait(
name=trait_type,
dtype=t_types[trait_type],
value=models.Trait.convert_value(
t_types[trait_type],
ev['_source']['traits'][trait_type]))
else:
for trait in ev['_source']['traits'].keys():
yield models.Trait(
name=trait,
dtype=t_types[trait],
value=models.Trait.convert_value(
t_types[trait],
ev['_source']['traits'][trait]))

View File

@ -1,221 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import utils as hbase_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(hbase_base.Connection, base.Connection):
"""Put the event data into a HBase database
Collections:
- events:
- row_key: timestamp of event's generation + uuid of event
in format: "%s:%s" % (ts, Event.message_id)
- Column Families:
f: contains the following qualifiers:
- event_type: description of event's type
- timestamp: time stamp of event generation
- all traits for this event in format:
.. code-block:: python
"%s:%s" % (trait_name, trait_type)
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
_memory_instance = None
EVENT_TABLE = "event"
def upgrade(self):
tables = [self.EVENT_TABLE]
column_families = {'f': dict(max_versions=1)}
with self.conn_pool.connection() as conn:
hbase_utils.create_tables(conn, tables, column_families)
def clear(self):
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.EVENT_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug('Cannot delete table but ignoring error')
def record_events(self, event_models):
"""Write the events to Hbase.
:param event_models: a list of models.Event objects.
"""
error = None
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
for event_model in event_models:
# Row key consists of timestamp and message_id from
# models.Event or purposes of storage event sorted by
# timestamp in the database.
ts = event_model.generated
row = hbase_utils.prepare_key(
hbase_utils.timestamp(ts, reverse=False),
event_model.message_id)
event_type = event_model.event_type
traits = {}
if event_model.traits:
for trait in event_model.traits:
key = hbase_utils.prepare_key(trait.name, trait.dtype)
traits[key] = trait.value
record = hbase_utils.serialize_entry(traits,
event_type=event_type,
timestamp=ts,
raw=event_model.raw)
try:
events_table.put(row, record)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
"""
if limit == 0:
return
q, start, stop = hbase_utils.make_events_query_from_filter(
event_filter)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q, row_start=start, row_stop=stop,
limit=limit)
for event_id, data in gen:
traits = []
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_dtype = key
traits.append(models.Trait(name=trait_name,
dtype=int(trait_dtype),
value=value))
ts, mess = event_id.split(':')
yield models.Event(
message_id=hbase_utils.unquote(mess),
event_type=events_dict['event_type'],
generated=events_dict['timestamp'],
traits=sorted(traits,
key=operator.attrgetter('dtype')),
raw=events_dict['raw']
)
def get_event_types(self):
"""Return all event types as an iterable of strings."""
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan()
event_types = set()
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if not isinstance(key, tuple) and key.startswith('event_type'):
if value not in event_types:
event_types.add(value)
yield value
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
q = hbase_utils.make_query(event_type=event_type)
trait_names = set()
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types, for ex. if it is found the same trait
# types in different events with equal event_type,
# method will return only one trait type. It is
# proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
data_type = models.Trait.type_names[int(trait_type)]
yield {'name': trait_name, 'data_type': data_type}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
q = hbase_utils.make_query(event_type=event_type,
trait_type=trait_type)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
yield models.Trait(name=trait_name,
dtype=int(trait_type), value=value)

View File

@ -1,33 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.i18n import _LI
LOG = log.getLogger(__name__)
class Connection(base.Connection):
"""Log event data."""
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.info(_LI("Dropping event data with TTL %d"), ttl)

View File

@ -1,85 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
from oslo_log import log
import pymongo
from ceilometer.event.storage import pymongo_base
from ceilometer import storage
from ceilometer.storage import impl_mongodb
from ceilometer.storage.mongo import utils as pymongo_utils
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""Put the event data into a MongoDB database."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(conf, url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
def upgrade(self):
# create collection if not present
if 'event' not in self.db.conn.collection_names():
self.db.conn.create_collection('event')
# Establish indexes
# NOTE(idegtiarov): This indexes cover get_events, get_event_types, and
# get_trait_types requests based on event_type and timestamp fields.
self.db.event.create_index(
[('event_type', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING)],
name='event_type_idx'
)
ttl = self.conf.database.event_time_to_live
impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp',
self.db.event)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.debug("Clearing expired event data is based on native "
"MongoDB time to live feature and going in background.")

View File

@ -1,456 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import datetime
import os
from oslo_db import exception as dbexc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_utils import timeutils
import sqlalchemy as sa
from ceilometer.event.storage import base
from ceilometer.event.storage import models as api_models
from ceilometer.i18n import _LE, _LI
from ceilometer import storage
from ceilometer.storage.sqlalchemy import models
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText),
(api_models.Trait.TEXT_TYPE, models.TraitText),
(api_models.Trait.INT_TYPE, models.TraitInt),
(api_models.Trait.FLOAT_TYPE, models.TraitFloat),
(api_models.Trait.DATETIME_TYPE, models.TraitDatetime)]
TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST)
TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST)
trait_models_dict = {'string': models.TraitText,
'integer': models.TraitInt,
'datetime': models.TraitDatetime,
'float': models.TraitFloat}
def _build_trait_query(session, trait_type, key, value, op='eq'):
trait_model = trait_models_dict[trait_type]
op_dict = {'eq': (trait_model.value == value),
'lt': (trait_model.value < value),
'le': (trait_model.value <= value),
'gt': (trait_model.value > value),
'ge': (trait_model.value >= value),
'ne': (trait_model.value != value)}
conditions = [trait_model.key == key, op_dict[op]]
return (session.query(trait_model.event_id.label('ev_id'))
.filter(*conditions))
class Connection(base.Connection):
"""Put the event data into a SQLAlchemy database.
Tables::
- EventType
- event definition
- { id: event type id
desc: description of event
}
- Event
- event data
- { id: event id
message_id: message id
generated = timestamp of event
event_type_id = event type -> eventtype.id
}
- TraitInt
- int trait value
- { event_id: event -> event.id
key: trait name
value: integer value
}
- TraitDatetime
- datetime trait value
- { event_id: event -> event.id
key: trait name
value: datetime value
}
- TraitText
- text trait value
- { event_id: event -> event.id
key: trait name
value: text value
}
- TraitFloat
- float trait value
- { event_id: event -> event.id
key: trait name
value: float value
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
# Set max_retries to 0, since oslo.db in certain cases may attempt
# to retry making the db connection retried max_retries ^ 2 times
# in failure case and db reconnection has already been implemented
# in storage.__init__.get_connection_from_config function
options = dict(self.conf.database.items())
options['max_retries'] = 0
# oslo.db doesn't support options defined by Ceilometer
for opt in storage.OPTS:
options.pop(opt.name, None)
self._engine_facade = db_session.EngineFacade(url, **options)
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo_db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', 'storage', 'sqlalchemy',
'migrate_repo')
engine = self._engine_facade.get_engine()
from migrate import exceptions as migrate_exc
from migrate.versioning import api
from migrate.versioning import repository
repo = repository.Repository(path)
try:
api.db_version(engine, repo)
except migrate_exc.DatabaseNotControlledError:
models.Base.metadata.create_all(engine)
api.version_control(engine, repo, repo.latest)
else:
migration.db_sync(engine, path)
def clear(self):
engine = self._engine_facade.get_engine()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
engine.dispose()
def _get_or_create_event_type(self, event_type, session=None):
"""Check if an event type with the supplied name is already exists.
If not, we create it and return the record. This may result in a flush.
"""
try:
if session is None:
session = self._engine_facade.get_session()
with session.begin(subtransactions=True):
et = session.query(models.EventType).filter(
models.EventType.desc == event_type).first()
if not et:
et = models.EventType(event_type)
session.add(et)
except dbexc.DBDuplicateEntry:
et = self._get_or_create_event_type(event_type, session)
return et
def record_events(self, event_models):
"""Write the events to SQL database via sqlalchemy.
:param event_models: a list of model.Event objects.
"""
session = self._engine_facade.get_session()
error = None
for event_model in event_models:
event = None
try:
with session.begin():
event_type = self._get_or_create_event_type(
event_model.event_type, session=session)
event = models.Event(event_model.message_id, event_type,
event_model.generated,
event_model.raw)
session.add(event)
session.flush()
if event_model.traits:
trait_map = {}
for trait in event_model.traits:
if trait_map.get(trait.dtype) is None:
trait_map[trait.dtype] = []
trait_map[trait.dtype].append(
{'event_id': event.id,
'key': trait.name,
'value': trait.value})
for dtype in trait_map.keys():
model = TRAIT_ID_TO_MODEL[dtype]
session.execute(model.__table__.insert(),
trait_map[dtype])
except dbexc.DBDuplicateEntry as e:
LOG.info(_LI("Duplicate event detected, skipping it: %s"), e)
except KeyError as e:
LOG.exception(_LE('Failed to record event: %s'), e)
except Exception as e:
LOG.exception(_LE('Failed to record event: %s'), e)
error = e
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iterable of model.Event objects.
:param event_filter: EventFilter instance
"""
if limit == 0:
return
session = self._engine_facade.get_session()
with session.begin():
# Build up the join conditions
event_join_conditions = [models.EventType.id ==
models.Event.event_type_id]
if event_filter.event_type:
event_join_conditions.append(models.EventType.desc ==
event_filter.event_type)
# Build up the where conditions
event_filter_conditions = []
if event_filter.message_id:
event_filter_conditions.append(
models.Event.message_id == event_filter.message_id)
if event_filter.start_timestamp:
event_filter_conditions.append(
models.Event.generated >= event_filter.start_timestamp)
if event_filter.end_timestamp:
event_filter_conditions.append(
models.Event.generated <= event_filter.end_timestamp)
trait_subq = None
# Build trait filter
if event_filter.traits_filter:
filters = list(event_filter.traits_filter)
trait_filter = filters.pop()
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
trait_subq = _build_trait_query(session, trait_type,
key, value, op)
for trait_filter in filters:
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
q = _build_trait_query(session, trait_type,
key, value, op)
trait_subq = trait_subq.filter(
trait_subq.subquery().c.ev_id == q.subquery().c.ev_id)
trait_subq = trait_subq.subquery()
query = (session.query(models.Event.id)
.join(models.EventType,
sa.and_(*event_join_conditions)))
if trait_subq is not None:
query = query.join(trait_subq,
trait_subq.c.ev_id == models.Event.id)
if event_filter.admin_proj:
no_proj_q = session.query(models.TraitText.event_id).filter(
models.TraitText.key == 'project_id')
admin_q = (session.query(models.TraitText.event_id).filter(
~sa.exists().where(models.TraitText.event_id ==
no_proj_q.subquery().c.event_id)).union(
session.query(models.TraitText.event_id).filter(sa.and_(
models.TraitText.key == 'project_id',
models.TraitText.value == event_filter.admin_proj,
models.Event.id == models.TraitText.event_id))))
query = query.filter(sa.exists().where(
models.Event.id ==
admin_q.subquery().c.trait_text_event_id))
if event_filter_conditions:
query = query.filter(sa.and_(*event_filter_conditions))
query = query.order_by(models.Event.generated).limit(limit)
event_list = {}
# get a list of all events that match filters
for (id_, generated, message_id,
desc, raw) in query.add_columns(
models.Event.generated, models.Event.message_id,
models.EventType.desc, models.Event.raw).all():
event_list[id_] = api_models.Event(message_id, desc,
generated, [], raw)
# Query all traits related to events.
# NOTE (gordc): cast is done because pgsql defaults to TEXT when
# handling unknown values such as null.
trait_q = (
session.query(
models.TraitDatetime.event_id,
models.TraitDatetime.key, models.TraitDatetime.value,
sa.cast(sa.null(), sa.Integer),
sa.cast(sa.null(), sa.Float(53)),
sa.cast(sa.null(), sa.String(255)))
.filter(sa.exists().where(
models.TraitDatetime.event_id == query.subquery().c.id))
).union_all(
session.query(
models.TraitInt.event_id,
models.TraitInt.key, sa.null(),
models.TraitInt.value, sa.null(), sa.null())
.filter(sa.exists().where(
models.TraitInt.event_id == query.subquery().c.id)),
session.query(
models.TraitFloat.event_id,
models.TraitFloat.key, sa.null(), sa.null(),
models.TraitFloat.value, sa.null())
.filter(sa.exists().where(
models.TraitFloat.event_id == query.subquery().c.id)),
session.query(
models.TraitText.event_id,
models.TraitText.key, sa.null(), sa.null(), sa.null(),
models.TraitText.value)
.filter(sa.exists().where(
models.TraitText.event_id == query.subquery().c.id)))
for id_, key, t_date, t_int, t_float, t_text in (
trait_q.order_by(models.TraitDatetime.key)).all():
if t_int is not None:
dtype = api_models.Trait.INT_TYPE
val = t_int
elif t_float is not None:
dtype = api_models.Trait.FLOAT_TYPE
val = t_float
elif t_date is not None:
dtype = api_models.Trait.DATETIME_TYPE
val = t_date
else:
dtype = api_models.Trait.TEXT_TYPE
val = t_text
try:
trait_model = api_models.Trait(key, dtype, val)
event_list[id_].append_trait(trait_model)
except KeyError:
# NOTE(gordc): this is expected as we do not set REPEATABLE
# READ (bug 1506717). if query is run while recording new
# event data, trait query may return more data than event
# query. they can be safely discarded.
pass
return event_list.values()
def get_event_types(self):
"""Return all event types as an iterable of strings."""
session = self._engine_facade.get_session()
with session.begin():
query = (session.query(models.EventType.desc).
order_by(models.EventType.desc))
for name in query.all():
# The query returns a tuple with one element.
yield name[0]
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.distinct())
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for row in query.all():
yield {'name': row[0], 'data_type': dtype}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key, trait_model.value)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.order_by(trait_model.key))
if trait_type:
query = query.filter(trait_model.key == trait_type)
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for k, v in query.all():
yield api_models.Trait(name=k,
dtype=dtype,
value=v)
def clear_expired_event_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
session = self._engine_facade.get_session()
with session.begin():
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
event_q = (session.query(models.Event.id)
.filter(models.Event.generated < end))
event_subq = event_q.subquery()
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
(session.query(trait_model)
.filter(trait_model.event_id.in_(event_subq))
.delete(synchronize_session="fetch"))
event_rows = event_q.delete()
# remove EventType and TraitType with no corresponding
# matching events and traits
(session.query(models.EventType)
.filter(~models.EventType.events.any())
.delete(synchronize_session="fetch"))
LOG.info(_LI("%d events are removed from database"), event_rows)

View File

@ -1,147 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB backend
"""
from oslo_log import log
import pymongo
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
COMMON_AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base event Connection class for MongoDB driver."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def record_events(self, event_models):
"""Write the events to database.
:param event_models: a list of models.Event objects.
"""
error = None
for event_model in event_models:
traits = []
if event_model.traits:
for trait in event_model.traits:
traits.append({'trait_name': trait.name,
'trait_type': trait.dtype,
'trait_value': trait.value})
try:
self.db.event.insert_one(
{'_id': event_model.message_id,
'event_type': event_model.event_type,
'timestamp': event_model.generated,
'traits': traits, 'raw': event_model.raw})
except pymongo.errors.DuplicateKeyError as ex:
LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
q = pymongo_utils.make_events_query_from_filter(event_filter)
if limit is not None:
results = self.db.event.find(q, limit=limit)
else:
results = self.db.event.find(q)
for event in results:
traits = []
for trait in event['traits']:
traits.append(models.Trait(name=trait['trait_name'],
dtype=int(trait['trait_type']),
value=trait['trait_value']))
yield models.Event(message_id=event['_id'],
event_type=event['event_type'],
generated=event['timestamp'],
traits=traits, raw=event.get('raw'))
def get_event_types(self):
"""Return all event types as an iter of strings."""
return self.db.event.distinct('event_type')
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event.
"""
trait_names = set()
events = self.db.event.find({'event_type': event_type})
for event in events:
for trait in event['traits']:
trait_name = trait['trait_name']
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types. Method will return only one trait type. It
# is proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
yield {'name': trait_name,
'data_type': trait['trait_type']}
def get_traits(self, event_type, trait_name=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_name: the name of the Trait to filter by
"""
if not trait_name:
events = self.db.event.find({'event_type': event_type})
else:
# We choose events that simultaneously have event_type and certain
# trait_name, and retrieve events contains only mentioned traits.
events = self.db.event.find({'$and': [{'event_type': event_type},
{'traits.trait_name': trait_name}]},
{'traits': {'$elemMatch':
{'trait_name': trait_name}}
})
for event in events:
for trait in event['traits']:
yield models.Trait(name=trait['trait_name'],
dtype=trait['trait_type'],
value=trait['trait_value'])

View File

@ -34,18 +34,10 @@ OPTS = [
"in the database for (<= 0 means forever).", "in the database for (<= 0 means forever).",
deprecated_opts=[cfg.DeprecatedOpt('time_to_live', deprecated_opts=[cfg.DeprecatedOpt('time_to_live',
'database')]), 'database')]),
cfg.IntOpt('event_time_to_live',
default=-1,
help=("Number of seconds that events are kept "
"in the database for (<= 0 means forever).")),
cfg.StrOpt('metering_connection', cfg.StrOpt('metering_connection',
secret=True, secret=True,
help='The connection string used to connect to the metering ' help='The connection string used to connect to the metering '
'database. (if unset, connection is used)'), 'database. (if unset, connection is used)'),
cfg.StrOpt('event_connection',
secret=True,
help='The connection string used to connect to the event '
'database. (if unset, connection is used)'),
cfg.BoolOpt('sql_expire_samples_only', cfg.BoolOpt('sql_expire_samples_only',
default=False, default=False,
help="Indicates if expirer expires only samples. If set true," help="Indicates if expirer expires only samples. If set true,"
@ -67,7 +59,7 @@ class StorageBadAggregate(Exception):
code = 400 code = 400
def get_connection_from_config(conf, purpose='metering'): def get_connection_from_config(conf):
retries = conf.database.max_retries retries = conf.database.max_retries
@tenacity.retry( @tenacity.retry(
@ -76,20 +68,20 @@ def get_connection_from_config(conf, purpose='metering'):
else tenacity.stop_never), else tenacity.stop_never),
reraise=True) reraise=True)
def _inner(): def _inner():
namespace = 'ceilometer.%s.storage' % purpose url = (getattr(conf.database, 'metering_connection') or
url = (getattr(conf.database, '%s_connection' % purpose) or
conf.database.connection) conf.database.connection)
return get_connection(conf, url, namespace) return get_connection(conf, url)
return _inner() return _inner()
def get_connection(conf, url, namespace): def get_connection(conf, url):
"""Return an open connection to the database.""" """Return an open connection to the database."""
connection_scheme = urlparse.urlparse(url).scheme connection_scheme = urlparse.urlparse(url).scheme
# SqlAlchemy connections specify may specify a 'dialect' or # SqlAlchemy connections specify may specify a 'dialect' or
# 'dialect+driver'. Handle the case where driver is specified. # 'dialect+driver'. Handle the case where driver is specified.
engine_name = connection_scheme.split('+')[0] engine_name = connection_scheme.split('+')[0]
namespace = 'ceilometer.metering.storage'
# NOTE: translation not applied bug #1446983 # NOTE: translation not applied bug #1446983
LOG.debug('looking for %(name)r driver in %(namespace)r', LOG.debug('looking for %(name)r driver in %(namespace)r',
{'name': engine_name, 'namespace': namespace}) {'name': engine_name, 'namespace': namespace})

View File

@ -13,8 +13,6 @@
"""HBase storage backend migrations """HBase storage backend migrations
""" """
import re
from ceilometer.storage.hbase import utils as hbase_utils from ceilometer.storage.hbase import utils as hbase_utils
@ -64,35 +62,8 @@ def migrate_meter_table(conn, table):
meter_table.delete(row) meter_table.delete(row)
def migrate_event_table(conn, table):
"""Migrate table 'event' in HBase.
Change row format from ""%d_%s" % timestamp, event_id,
to new separator format "%s:%s" % timestamp, event_id
Also change trait columns from %s+%s % trait.name, trait.dtype
to %s:%s % trait.name, trait.dtype
"""
event_table = conn.table(table)
event_filter = "RowFilter(=, 'regexstring:\\d*_\\w*')"
gen = event_table.scan(filter=event_filter)
trait_pattern = re.compile("f:[\w\-_]*\+\w")
column_prefix = "f:"
for row, data in gen:
row_parts = row.split("_", 1)
update_data = {}
for column, value in data.items():
if trait_pattern.match(column):
trait_parts = column[2:].rsplit('+', 1)
column = hbase_utils.prepare_key(*trait_parts)
update_data[column_prefix + column] = value
new_row = hbase_utils.prepare_key(*row_parts)
event_table.put(new_row, update_data)
event_table.delete(row)
TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table, TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table,
'meter': migrate_meter_table, 'meter': migrate_meter_table}
'event': migrate_event_table}
def migrate_tables(conn, tables): def migrate_tables(conn, tables):

View File

@ -25,8 +25,6 @@ from ceilometer import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='} OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
# We need this additional dictionary because we have reverted timestamp in # We need this additional dictionary because we have reverted timestamp in
# row-keys for stored metrics # row-keys for stored metrics
@ -58,31 +56,6 @@ def timestamp(dt, reverse=True):
return 0x7fffffffffffffff - ts if reverse else ts return 0x7fffffffffffffff - ts if reverse else ts
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False)
if event_filter.start_timestamp else "")
stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False)
if event_filter.end_timestamp else "")
kwargs = {'event_type': event_filter.event_type,
'event_id': event_filter.message_id}
res_q = make_query(**kwargs)
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
q_trait = make_query(trait_query=True, **trait_filter)
if q_trait:
if res_q:
res_q += " AND " + q_trait
else:
res_q = q_trait
return res_q, start, stop
def make_timestamp_query(func, start=None, start_op=None, end=None, def make_timestamp_query(func, start=None, start_op=None, end=None,
end_op=None, bounds_only=False, **kwargs): end_op=None, bounds_only=False, **kwargs):
"""Return a filter start and stop row for filtering and a query. """Return a filter start and stop row for filtering and a query.
@ -127,31 +100,16 @@ def get_start_end_rts(start, end):
return rts_start, rts_end return rts_start, rts_end
def make_query(metaquery=None, trait_query=None, **kwargs): def make_query(metaquery=None, **kwargs):
"""Return a filter query string based on the selected parameters. """Return a filter query string based on the selected parameters.
:param metaquery: optional metaquery dict :param metaquery: optional metaquery dict
:param trait_query: optional boolean, for trait_query from kwargs
:param kwargs: key-value pairs to filter on. Key should be a real :param kwargs: key-value pairs to filter on. Key should be a real
column name in db column name in db
""" """
q = [] q = []
res_q = None res_q = None
# Query for traits differs from others. It is constructed with
# SingleColumnValueFilter with the possibility to choose comparison
# operator
if trait_query:
trait_name = kwargs.pop('key')
op = kwargs.pop('op', 'eq')
for k, v in kwargs.items():
if v is not None:
res_q = ("SingleColumnValueFilter "
"('f', '%s', %s, 'binary:%s', true, true)" %
(prepare_key(trait_name, EVENT_TRAIT_TYPES[k]),
OP_SIGN[op], dump(v)))
return res_q
# Note: we use extended constructor for SingleColumnValueFilter here. # Note: we use extended constructor for SingleColumnValueFilter here.
# It is explicitly specified that entry should not be returned if CF is not # It is explicitly specified that entry should not be returned if CF is not
# found in table. # found in table.
@ -161,10 +119,6 @@ def make_query(metaquery=None, trait_query=None, **kwargs):
q.append("SingleColumnValueFilter " q.append("SingleColumnValueFilter "
"('f', 's_%s', =, 'binary:%s', true, true)" % "('f', 's_%s', =, 'binary:%s', true, true)" %
(value, dump('1'))) (value, dump('1')))
elif key == 'trait_type':
q.append("ColumnPrefixFilter('%s')" % value)
elif key == 'event_id':
q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value)
else: else:
q.append("SingleColumnValueFilter " q.append("SingleColumnValueFilter "
"('f', '%s', =, 'binary:%s', true, true)" % "('f', '%s', =, 'binary:%s', true, true)" %

View File

@ -32,10 +32,6 @@ ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'}
MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4]
COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6]
@ -73,54 +69,6 @@ def make_timestamp_range(start, end,
return ts_range return ts_range
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
query = {}
q_list = []
ts_range = make_timestamp_range(event_filter.start_timestamp,
event_filter.end_timestamp)
if ts_range:
q_list.append({'timestamp': ts_range})
if event_filter.event_type:
q_list.append({'event_type': event_filter.event_type})
if event_filter.message_id:
q_list.append({'_id': event_filter.message_id})
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
op = trait_filter.pop('op', 'eq')
dict_query = {}
for k, v in six.iteritems(trait_filter):
if v is not None:
# All parameters in EventFilter['traits'] are optional, so
# we need to check if they are in the query or no.
if k == 'key':
dict_query.setdefault('trait_name', v)
elif k in ['string', 'integer', 'datetime', 'float']:
dict_query.setdefault('trait_type',
EVENT_TRAIT_TYPES[k])
dict_query.setdefault('trait_value',
v if op == 'eq'
else {OP_SIGN[op]: v})
dict_query = {'$elemMatch': dict_query}
q_list.append({'traits': dict_query})
if event_filter.admin_proj:
q_list.append({'$or': [
{'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}},
{'traits': {
'$elemMatch': {'trait_name': 'project_id',
'trait_value': event_filter.admin_proj}}}]})
if q_list:
query = {'$and': q_list}
return query
def make_query_from_filter(sample_filter, require_meter=True): def make_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter. """Return a query dictionary based on the settings in the filter.

View File

@ -248,98 +248,3 @@ class FullSample(object):
project_id = Resource.project_id project_id = Resource.project_id
resource_metadata = Resource.resource_metadata resource_metadata = Resource.resource_metadata
internal_id = Resource.internal_id internal_id = Resource.internal_id
class EventType(Base):
"""Types of event records."""
__tablename__ = 'event_type'
id = Column(Integer, primary_key=True)
desc = Column(String(255), unique=True)
def __init__(self, event_type):
self.desc = event_type
def __repr__(self):
return "<EventType: %s>" % self.desc
class Event(Base):
__tablename__ = 'event'
__table_args__ = (
Index('ix_event_message_id', 'message_id'),
Index('ix_event_type_id', 'event_type_id'),
Index('ix_event_generated', 'generated'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, primary_key=True)
message_id = Column(String(50), unique=True)
generated = Column(PreciseTimestamp())
raw = deferred(Column(JSONEncodedDict()))
event_type_id = Column(Integer, ForeignKey('event_type.id'))
event_type = relationship("EventType", backref='events')
def __init__(self, message_id, event_type, generated, raw):
self.message_id = message_id
self.event_type = event_type
self.generated = generated
self.raw = raw
def __repr__(self):
return "<Event %d('Event: %s %s, Generated: %s')>" % (self.id,
self.message_id,
self.event_type,
self.generated)
class TraitText(Base):
"""Event text traits."""
__tablename__ = 'trait_text'
__table_args__ = (
Index('ix_trait_text_event_id_key', 'event_id', 'key'),
_COMMON_TABLE_ARGS,
)
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(String(255))
class TraitInt(Base):
"""Event integer traits."""
__tablename__ = 'trait_int'
__table_args__ = (
Index('ix_trait_int_event_id_key', 'event_id', 'key'),
_COMMON_TABLE_ARGS,
)
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(Integer)
class TraitFloat(Base):
"""Event float traits."""
__tablename__ = 'trait_float'
__table_args__ = (
Index('ix_trait_float_event_id_key', 'event_id', 'key'),
_COMMON_TABLE_ARGS,
)
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(Float(53))
class TraitDatetime(Base):
"""Event datetime traits."""
__tablename__ = 'trait_datetime'
__table_args__ = (
Index('ix_trait_datetime_event_id_key', 'event_id', 'key'),
_COMMON_TABLE_ARGS,
)
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
key = Column(String(255), primary_key=True)
value = Column(PreciseTimestamp())

View File

@ -52,10 +52,7 @@ class MongoDbManager(DBManager):
action='ignore', action='ignore',
message='.*you must provide a username and password.*') message='.*you must provide a username and password.*')
try: try:
self.connection = storage.get_connection( self.connection = storage.get_connection(self._conf, self.url)
self._conf, self.url, 'ceilometer.metering.storage')
self.event_connection = storage.get_connection(
self._conf, self.url, 'ceilometer.event.storage')
except storage.StorageBadVersion as e: except storage.StorageBadVersion as e:
raise testcase.TestSkipped(six.text_type(e)) raise testcase.TestSkipped(six.text_type(e))
@ -82,10 +79,7 @@ class SQLManager(DBManager):
def setUp(self): def setUp(self):
super(SQLManager, self).setUp() super(SQLManager, self).setUp()
self.connection = storage.get_connection( self.connection = storage.get_connection(self._conf, self.url)
self._conf, self.url, 'ceilometer.metering.storage')
self.event_connection = storage.get_connection(
self._conf, self.url, 'ceilometer.event.storage')
class PgSQLManager(SQLManager): class PgSQLManager(SQLManager):
@ -102,26 +96,10 @@ class MySQLManager(SQLManager):
conn.execute('CREATE DATABASE %s;' % db_name) conn.execute('CREATE DATABASE %s;' % db_name)
class ElasticSearchManager(DBManager):
def setUp(self):
super(ElasticSearchManager, self).setUp()
self.connection = storage.get_connection(
self._conf, 'sqlite://', 'ceilometer.metering.storage')
self.event_connection = storage.get_connection(
self._conf, self.url, 'ceilometer.event.storage')
# prefix each test with unique index name
self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex
# force index on write so data is queryable right away
self.event_connection._refresh_on_write = True
class HBaseManager(DBManager): class HBaseManager(DBManager):
def setUp(self): def setUp(self):
super(HBaseManager, self).setUp() super(HBaseManager, self).setUp()
self.connection = storage.get_connection( self.connection = storage.get_connection(self._conf, self.url)
self._conf, self.url, 'ceilometer.metering.storage')
self.event_connection = storage.get_connection(
self._conn, self.url, 'ceilometer.event.storage')
# Unique prefix for each test to keep data is distinguished because # Unique prefix for each test to keep data is distinguished because
# all test data is stored in one table # all test data is stored in one table
data_prefix = str(uuid.uuid4().hex) data_prefix = str(uuid.uuid4().hex)
@ -155,10 +133,7 @@ class SQLiteManager(DBManager):
def setUp(self): def setUp(self):
super(SQLiteManager, self).setUp() super(SQLiteManager, self).setUp()
self.url = self._url self.url = self._url
self.connection = storage.get_connection( self.connection = storage.get_connection(self._conf, self._url)
self._conf, self._url, 'ceilometer.metering.storage')
self.event_connection = storage.get_connection(
self._conf, self._url, 'ceilometer.event.storage')
@six.add_metaclass(test_base.SkipNotImplementedMeta) @six.add_metaclass(test_base.SkipNotImplementedMeta)
@ -169,7 +144,6 @@ class TestBase(test_base.BaseTestCase):
'mysql': MySQLManager, 'mysql': MySQLManager,
'postgresql': PgSQLManager, 'postgresql': PgSQLManager,
'sqlite': SQLiteManager, 'sqlite': SQLiteManager,
'es': ElasticSearchManager,
} }
if mocks is not None: if mocks is not None:
DRIVER_MANAGERS['hbase'] = HBaseManager DRIVER_MANAGERS['hbase'] = HBaseManager
@ -205,9 +179,6 @@ class TestBase(test_base.BaseTestCase):
self.conn = self.db_manager.connection self.conn = self.db_manager.connection
self.conn.upgrade() self.conn.upgrade()
self.event_conn = self.db_manager.event_connection
self.event_conn.upgrade()
self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection', self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection',
side_effect=self._get_connection)) side_effect=self._get_connection))
@ -221,15 +192,11 @@ class TestBase(test_base.BaseTestCase):
) )
def tearDown(self): def tearDown(self):
self.event_conn.clear()
self.event_conn = None
self.conn.clear() self.conn.clear()
self.conn = None self.conn = None
super(TestBase, self).tearDown() super(TestBase, self).tearDown()
def _get_connection(self, conf, url, namespace): def _get_connection(self, conf, url):
if namespace == "ceilometer.event.storage":
return self.event_conn
return self.conn return self.conn

View File

@ -15,16 +15,12 @@
"""Test ACL.""" """Test ACL."""
import datetime import datetime
import os
import uuid import uuid
from keystonemiddleware import fixture as ksm_fixture from keystonemiddleware import fixture as ksm_fixture
from oslo_utils import fileutils
import six
import webtest import webtest
from ceilometer.api import app from ceilometer.api import app
from ceilometer.event.storage import models as ev_model
from ceilometer.publisher import utils from ceilometer.publisher import utils
from ceilometer import sample from ceilometer import sample
from ceilometer.tests.functional.api import v2 from ceilometer.tests.functional.api import v2
@ -182,103 +178,3 @@ class TestAPIACL(v2.FunctionalTest):
'value': 'project-naughty', 'value': 'project-naughty',
}]) }])
self.assertEqual(401, data.status_int) self.assertEqual(401, data.status_int)
class TestAPIEventACL(TestAPIACL):
PATH = '/events'
def test_non_admin_get_event_types(self):
data = self.get_json('/event_types', expect_errors=True,
headers={"X-Roles": "Member",
"X-Auth-Token": VALID_TOKEN2,
"X-Project-Id": "project-good"})
self.assertEqual(401, data.status_int)
class TestBaseApiEventRBAC(v2.FunctionalTest):
PATH = '/events'
def setUp(self):
super(TestBaseApiEventRBAC, self).setUp()
traits = [ev_model.Trait('project_id', 1, 'project-good'),
ev_model.Trait('user_id', 1, 'user-good')]
self.message_id = str(uuid.uuid4())
ev = ev_model.Event(self.message_id, 'event_type',
datetime.datetime.now(), traits, {})
self.event_conn.record_events([ev])
def test_get_events_without_project(self):
headers_no_proj = {"X-Roles": "admin", "X-User-Id": "user-good"}
resp = self.get_json(self.PATH, expect_errors=True,
headers=headers_no_proj, status=403)
self.assertEqual(403, resp.status_int)
def test_get_events_without_user(self):
headers_no_user = {"X-Roles": "admin", "X-Project-Id": "project-good"}
resp = self.get_json(self.PATH, expect_errors=True,
headers=headers_no_user, status=403)
self.assertEqual(403, resp.status_int)
def test_get_events_without_scope(self):
headers_no_user_proj = {"X-Roles": "admin"}
resp = self.get_json(self.PATH,
expect_errors=True,
headers=headers_no_user_proj,
status=403)
self.assertEqual(403, resp.status_int)
def test_get_events(self):
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH, headers=headers, status=200)
def test_get_event(self):
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
status=200)
class TestApiEventAdminRBAC(TestBaseApiEventRBAC):
def _make_app(self, enable_acl=False):
content = ('{"context_is_admin": "role:admin",'
'"telemetry:events:index": "rule:context_is_admin",'
'"telemetry:events:show": "rule:context_is_admin"}')
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='policy',
suffix='.json')
self.CONF.set_override("policy_file", self.tempfile,
group='oslo_policy')
return super(TestApiEventAdminRBAC, self)._make_app()
def tearDown(self):
os.remove(self.tempfile)
super(TestApiEventAdminRBAC, self).tearDown()
def test_get_events(self):
headers_rbac = {"X-Roles": "admin", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH, headers=headers_rbac, status=200)
def test_get_events_bad(self):
headers_rbac = {"X-Roles": "Member", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH, headers=headers_rbac, status=403)
def test_get_event(self):
headers = {"X-Roles": "admin", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
status=200)
def test_get_event_bad(self):
headers = {"X-Roles": "Member", "X-User-Id": "user-good",
"X-Project-Id": "project-good"}
self.get_json(self.PATH + "/" + self.message_id, headers=headers,
status=403)

View File

@ -1,703 +0,0 @@
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test event, event_type and trait retrieval."""
import datetime
import uuid
import webtest.app
from ceilometer.event.storage import models
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
USER_ID = uuid.uuid4().hex
PROJ_ID = uuid.uuid4().hex
HEADERS = {"X-Roles": "admin",
"X-User-Id": USER_ID,
"X-Project-Id": PROJ_ID}
class EventTestBase(v2.FunctionalTest):
def setUp(self):
super(EventTestBase, self).setUp()
self._generate_models()
def _generate_models(self):
event_models = []
base = 0
self.s_time = datetime.datetime(2013, 12, 31, 5, 0)
self.trait_time = datetime.datetime(2013, 12, 31, 5, 0)
for event_type in ['Foo', 'Bar', 'Zoo']:
trait_models = [models.Trait(name, type, value)
for name, type, value in [
('trait_A', models.Trait.TEXT_TYPE,
"my_%s_text" % event_type),
('trait_B', models.Trait.INT_TYPE,
base + 1),
('trait_C', models.Trait.FLOAT_TYPE,
float(base) + 0.123456),
('trait_D', models.Trait.DATETIME_TYPE,
self.trait_time)]]
# Message ID for test will be 'base'. So, message ID for the first
# event will be '0', the second '100', and so on.
# trait_time in first event will be equal to self.trait_time
# (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so
# second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on.
event_models.append(
models.Event(message_id=str(base),
event_type=event_type,
generated=self.trait_time,
traits=trait_models,
raw={'status': {'nested': 'started'}}))
base += 100
self.trait_time += datetime.timedelta(days=1)
self.event_conn.record_events(event_models)
class TestEventTypeAPI(EventTestBase):
PATH = '/event_types'
def test_event_types(self):
data = self.get_json(self.PATH, headers=HEADERS)
for event_type in ['Foo', 'Bar', 'Zoo']:
self.assertIn(event_type, data)
class TestTraitAPI(EventTestBase):
PATH = '/event_types/%s/traits'
def test_get_traits_for_event(self):
path = self.PATH % "Foo"
data = self.get_json(path, headers=HEADERS)
self.assertEqual(4, len(data))
def test_get_event_invalid_path(self):
data = self.get_json('/event_types/trait_A/', headers=HEADERS,
expect_errors=True)
self.assertEqual(404, data.status_int)
def test_get_traits_for_non_existent_event(self):
path = self.PATH % "NO_SUCH_EVENT_TYPE"
data = self.get_json(path, headers=HEADERS)
self.assertEqual([], data)
def test_get_trait_data_for_event(self):
path = (self.PATH % "Foo") + "/trait_A"
data = self.get_json(path, headers=HEADERS)
self.assertEqual(1, len(data))
self.assertEqual("trait_A", data[0]['name'])
path = (self.PATH % "Foo") + "/trait_B"
data = self.get_json(path, headers=HEADERS)
self.assertEqual(1, len(data))
self.assertEqual("trait_B", data[0]['name'])
self.assertEqual("1", data[0]['value'])
path = (self.PATH % "Foo") + "/trait_D"
data = self.get_json(path, headers=HEADERS)
self.assertEqual(1, len(data))
self.assertEqual("trait_D", data[0]['name'])
self.assertEqual((self.trait_time - datetime.timedelta(days=3)).
isoformat(), data[0]['value'])
def test_get_trait_data_for_non_existent_event(self):
path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A"
data = self.get_json(path, headers=HEADERS)
self.assertEqual([], data)
def test_get_trait_data_for_non_existent_trait(self):
path = (self.PATH % "Foo") + "/no_such_trait"
data = self.get_json(path, headers=HEADERS)
self.assertEqual([], data)
class TestEventAPI(EventTestBase):
PATH = '/events'
def test_get_events(self):
data = self.get_json(self.PATH, headers=HEADERS)
self.assertEqual(3, len(data))
# We expect to get native UTC generated time back
trait_time = self.s_time
for event in data:
expected_generated = trait_time.isoformat()
self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo'])
self.assertEqual(4, len(event['traits']))
self.assertEqual({'status': {'nested': 'started'}}, event['raw']),
self.assertEqual(expected_generated, event['generated'])
for trait_name in ['trait_A', 'trait_B',
'trait_C', 'trait_D']:
self.assertIn(trait_name, map(lambda x: x['name'],
event['traits']))
trait_time += datetime.timedelta(days=1)
def test_get_event_by_message_id(self):
event = self.get_json(self.PATH + "/100", headers=HEADERS)
expected_traits = [{'name': 'trait_A',
'type': 'string',
'value': 'my_Bar_text'},
{'name': 'trait_B',
'type': 'integer',
'value': '101'},
{'name': 'trait_C',
'type': 'float',
'value': '100.123456'},
{'name': 'trait_D',
'type': 'datetime',
'value': '2014-01-01T05:00:00'}]
self.assertEqual('100', event['message_id'])
self.assertEqual('Bar', event['event_type'])
self.assertEqual('2014-01-01T05:00:00', event['generated'])
self.assertEqual(expected_traits, event['traits'])
def test_get_event_by_message_id_no_such_id(self):
data = self.get_json(self.PATH + "/DNE", headers=HEADERS,
expect_errors=True)
self.assertEqual(404, data.status_int)
def test_get_events_filter_event_type(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'event_type',
'value': 'Foo'}])
self.assertEqual(1, len(data))
def test_get_events_filter_trait_no_type(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text'}])
self.assertEqual(1, len(data))
self.assertEqual('Foo', data[0]['event_type'])
def test_get_events_filter_trait_empty_type(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': ''}])
self.assertEqual(1, len(data))
self.assertEqual('Foo', data[0]['event_type'])
def test_get_events_filter_trait_invalid_type(self):
resp = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'whats-up'}],
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual("The data type whats-up is not supported. The "
"supported data type list is: [\'integer\', "
"\'float\', \'string\', \'datetime\']",
resp.json['error_message']['faultstring'])
def test_get_events_filter_operator_invalid_type(self):
resp = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'op': 'whats-up'}],
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual("Operator whats-up is not supported. The "
"supported operators are: (\'lt\', \'le\', "
"\'eq\', \'ne\', \'ge\', \'gt\')",
resp.json['error_message']['faultstring'])
def test_get_events_filter_start_timestamp(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'start_timestamp',
'op': 'ge',
'value': '2014-01-01T00:00:00'}])
self.assertEqual(2, len(data))
sorted_types = sorted([d['event_type'] for d in data])
event_types = ['Foo', 'Bar', 'Zoo']
self.assertEqual(sorted_types, sorted(event_types[1:]))
def test_get_events_filter_start_timestamp_invalid_op(self):
resp = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'start_timestamp',
'op': 'gt',
'value': '2014-01-01T00:00:00'}],
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual(u'Operator gt is not supported. Only'
' `ge\' operator is available for field'
' start_timestamp',
resp.json['error_message']['faultstring'])
def test_get_events_filter_end_timestamp(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'end_timestamp',
'op': 'le',
'value': '2014-01-03T00:00:00'}])
self.assertEqual(3, len(data))
event_types = ['Foo', 'Bar', 'Zoo']
sorted_types = sorted([d['event_type'] for d in data])
self.assertEqual(sorted_types, sorted(event_types[:3]))
def test_get_events_filter_end_timestamp_invalid_op(self):
resp = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'end_timestamp',
'op': 'gt',
'value': '2014-01-03T00:00:00'}],
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual(u'Operator gt is not supported. Only'
' `le\' operator is available for field'
' end_timestamp',
resp.json['error_message']['faultstring'])
def test_get_events_filter_start_end_timestamp(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'start_timestamp',
'op': 'ge',
'value': '2014-01-02T00:00:00'},
{'field': 'end_timestamp',
'op': 'le',
'value': '2014-01-03T10:00:00'}])
self.assertEqual(1, len(data))
sorted_types = sorted([d['event_type'] for d in data])
event_types = ['Foo', 'Bar', 'Zoo']
self.assertEqual(sorted_types, sorted(event_types[2:3]))
def test_get_events_filter_text_trait(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string'}])
self.assertEqual(1, len(data))
self.assertEqual('Foo', data[0]['event_type'])
def test_get_events_filter_int_trait(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '101',
'type': 'integer'}])
self.assertEqual(1, len(data))
self.assertEqual('Bar', data[0]['event_type'])
traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B']
self.assertEqual(1, len(traits))
self.assertEqual('integer', traits[0]['type'])
self.assertEqual('101', traits[0]['value'])
def test_get_events_filter_float_trait(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '200.123456',
'type': 'float'}])
self.assertEqual(1, len(data))
self.assertEqual('Zoo', data[0]['event_type'])
traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C']
self.assertEqual(1, len(traits))
self.assertEqual('float', traits[0]['type'])
self.assertEqual('200.123456', traits[0]['value'])
def test_get_events_filter_datetime_trait(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2014-01-01T05:00:00',
'type': 'datetime'}])
self.assertEqual(1, len(data))
traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D']
self.assertEqual(1, len(traits))
self.assertEqual('datetime', traits[0]['type'])
self.assertEqual('2014-01-01T05:00:00', traits[0]['value'])
def test_get_events_multiple_filters(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '1',
'type': 'integer'},
{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string'}])
self.assertEqual(1, len(data))
self.assertEqual('Foo', data[0]['event_type'])
def test_get_events_multiple_filters_no_matches(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '101',
'type': 'integer'},
{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string'}])
self.assertEqual(0, len(data))
def test_get_events_multiple_filters_same_field_different_values(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string'},
{'field': 'trait_A',
'value': 'my_Bar_text',
'type': 'string'}])
self.assertEqual(0, len(data))
def test_get_events_not_filters(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[])
self.assertEqual(3, len(data))
def test_get_events_filter_op_string(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string',
'op': 'eq'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Bar_text',
'type': 'string',
'op': 'lt'}])
self.assertEqual(0, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Zoo_text',
'type': 'string',
'op': 'le'}])
self.assertEqual(3, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Foo_text',
'type': 'string',
'op': 'ne'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Bar_text',
'type': 'string',
'op': 'gt'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_A',
'value': 'my_Zoo_text',
'type': 'string',
'op': 'ge'}])
self.assertEqual(1, len(data))
def test_get_events_filter_op_integer(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '101',
'type': 'integer',
'op': 'eq'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '201',
'type': 'integer',
'op': 'lt'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '1',
'type': 'integer',
'op': 'le'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '101',
'type': 'integer',
'op': 'ne'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '201',
'type': 'integer',
'op': 'gt'}])
self.assertEqual(0, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '1',
'type': 'integer',
'op': 'ge'}])
self.assertEqual(3, len(data))
def test_get_events_filter_op_float(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '100.123456',
'type': 'float',
'op': 'eq'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '200.123456',
'type': 'float',
'op': 'lt'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '0.123456',
'type': 'float',
'op': 'le'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '100.123456',
'type': 'float',
'op': 'ne'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '200.123456',
'type': 'float',
'op': 'gt'}])
self.assertEqual(0, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_C',
'value': '0.123456',
'type': 'float',
'op': 'ge'}])
self.assertEqual(3, len(data))
def test_get_events_filter_op_datatime(self):
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2014-01-01T05:00:00',
'type': 'datetime',
'op': 'eq'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2014-01-02T05:00:00',
'type': 'datetime',
'op': 'lt'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2013-12-31T05:00:00',
'type': 'datetime',
'op': 'le'}])
self.assertEqual(1, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2014-01-01T05:00:00',
'type': 'datetime',
'op': 'ne'}])
self.assertEqual(2, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2014-01-02T05:00:00',
'type': 'datetime',
'op': 'gt'}])
self.assertEqual(0, len(data))
data = self.get_json(self.PATH, headers=HEADERS,
q=[{'field': 'trait_D',
'value': '2013-12-31T05:00:00',
'type': 'datetime',
'op': 'ge'}])
self.assertEqual(3, len(data))
def test_get_events_filter_wrong_op(self):
self.assertRaises(webtest.app.AppError,
self.get_json, self.PATH, headers=HEADERS,
q=[{'field': 'trait_B',
'value': '1',
'type': 'integer',
'op': 'el'}])
class AclRestrictedEventTestBase(v2.FunctionalTest):
def setUp(self):
super(AclRestrictedEventTestBase, self).setUp()
self.admin_user_id = uuid.uuid4().hex
self.admin_proj_id = uuid.uuid4().hex
self.user_id = uuid.uuid4().hex
self.proj_id = uuid.uuid4().hex
self._generate_models()
def _generate_models(self):
event_models = []
self.s_time = datetime.datetime(2013, 12, 31, 5, 0)
event_models.append(
models.Event(message_id='1',
event_type='empty_ev',
generated=self.s_time,
traits=[models.Trait('random',
models.Trait.TEXT_TYPE,
'blah')],
raw={}))
event_models.append(
models.Event(message_id='2',
event_type='admin_ev',
generated=self.s_time,
traits=[models.Trait('project_id',
models.Trait.TEXT_TYPE,
self.admin_proj_id),
models.Trait('user_id',
models.Trait.TEXT_TYPE,
self.admin_user_id)],
raw={}))
event_models.append(
models.Event(message_id='3',
event_type='user_ev',
generated=self.s_time,
traits=[models.Trait('project_id',
models.Trait.TEXT_TYPE,
self.proj_id),
models.Trait('user_id',
models.Trait.TEXT_TYPE,
self.user_id)],
raw={}))
self.event_conn.record_events(event_models)
def test_non_admin_access(self):
a_headers = {"X-Roles": "member",
"X-User-Id": self.user_id,
"X-Project-Id": self.proj_id}
data = self.get_json('/events', headers=a_headers)
self.assertEqual(1, len(data))
self.assertEqual('user_ev', data[0]['event_type'])
def test_non_admin_access_single(self):
a_headers = {"X-Roles": "member",
"X-User-Id": self.user_id,
"X-Project-Id": self.proj_id}
data = self.get_json('/events/3', headers=a_headers)
self.assertEqual('user_ev', data['event_type'])
def test_non_admin_access_incorrect_user(self):
a_headers = {"X-Roles": "member",
"X-User-Id": 'blah',
"X-Project-Id": self.proj_id}
data = self.get_json('/events', headers=a_headers)
self.assertEqual(0, len(data))
def test_non_admin_access_incorrect_proj(self):
a_headers = {"X-Roles": "member",
"X-User-Id": self.user_id,
"X-Project-Id": 'blah'}
data = self.get_json('/events', headers=a_headers)
self.assertEqual(0, len(data))
def test_non_admin_access_single_invalid(self):
a_headers = {"X-Roles": "member",
"X-User-Id": self.user_id,
"X-Project-Id": self.proj_id}
data = self.get_json('/events/1', headers=a_headers,
expect_errors=True)
self.assertEqual(404, data.status_int)
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es')
def test_admin_access(self):
a_headers = {"X-Roles": "admin",
"X-User-Id": self.admin_user_id,
"X-Project-Id": self.admin_proj_id}
data = self.get_json('/events', headers=a_headers)
self.assertEqual(2, len(data))
self.assertEqual(set(['empty_ev', 'admin_ev']),
set(ev['event_type'] for ev in data))
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es')
def test_admin_access_trait_filter(self):
a_headers = {"X-Roles": "admin",
"X-User-Id": self.admin_user_id,
"X-Project-Id": self.admin_proj_id}
data = self.get_json('/events', headers=a_headers,
q=[{'field': 'random',
'value': 'blah',
'type': 'string',
'op': 'eq'}])
self.assertEqual(1, len(data))
self.assertEqual('empty_ev', data[0]['event_type'])
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es')
def test_admin_access_single(self):
a_headers = {"X-Roles": "admin",
"X-User-Id": self.admin_user_id,
"X-Project-Id": self.admin_proj_id}
data = self.get_json('/events/1', headers=a_headers)
self.assertEqual('empty_ev', data['event_type'])
data = self.get_json('/events/2', headers=a_headers)
self.assertEqual('admin_ev', data['event_type'])
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es')
def test_admin_access_trait_filter_no_access(self):
a_headers = {"X-Roles": "admin",
"X-User-Id": self.admin_user_id,
"X-Project-Id": self.admin_proj_id}
data = self.get_json('/events', headers=a_headers,
q=[{'field': 'user_id',
'value': self.user_id,
'type': 'string',
'op': 'eq'}])
self.assertEqual(0, len(data))
class EventRestrictionTestBase(v2.FunctionalTest):
def setUp(self):
super(EventRestrictionTestBase, self).setUp()
self.CONF.set_override('default_api_return_limit', 10, group='api')
self._generate_models()
def _generate_models(self):
event_models = []
base = 0
self.s_time = datetime.datetime(2013, 12, 31, 5, 0)
self.trait_time = datetime.datetime(2013, 12, 31, 5, 0)
for i in range(20):
trait_models = [models.Trait(name, type, value)
for name, type, value in [
('trait_A', models.Trait.TEXT_TYPE,
"my_text"),
('trait_B', models.Trait.INT_TYPE,
base + 1),
('trait_C', models.Trait.FLOAT_TYPE,
float(base) + 0.123456),
('trait_D', models.Trait.DATETIME_TYPE,
self.trait_time)]]
event_models.append(
models.Event(message_id=str(uuid.uuid4()),
event_type='foo.bar',
generated=self.trait_time,
traits=trait_models,
raw={'status': {'nested': 'started'}}))
self.trait_time += datetime.timedelta(seconds=1)
self.event_conn.record_events(event_models)
class TestEventRestriction(EventRestrictionTestBase):
def test_get_limit(self):
data = self.get_json('/events?limit=1', headers=HEADERS)
self.assertEqual(1, len(data))
def test_get_limit_negative(self):
self.assertRaises(webtest.app.AppError,
self.get_json, '/events?limit=-2', headers=HEADERS)
def test_get_limit_bigger(self):
data = self.get_json('/events?limit=100', headers=HEADERS)
self.assertEqual(20, len(data))
def test_get_default_limit(self):
data = self.get_json('/events', headers=HEADERS)
self.assertEqual(10, len(data))

View File

@ -29,7 +29,6 @@ import six
from six.moves.urllib import parse as urlparse from six.moves.urllib import parse as urlparse
from ceilometer.api import app from ceilometer.api import app
from ceilometer.event.storage import models
from ceilometer.publisher import utils from ceilometer.publisher import utils
from ceilometer import sample from ceilometer import sample
from ceilometer import service from ceilometer import service
@ -98,7 +97,6 @@ class ConfigFixture(fixture.GabbiFixture):
database_name = '%s-%s' % (db_url, str(uuid.uuid4())) database_name = '%s-%s' % (db_url, str(uuid.uuid4()))
conf.set_override('connection', database_name, group='database') conf.set_override('connection', database_name, group='database')
conf.set_override('metering_connection', '', group='database') conf.set_override('metering_connection', '', group='database')
conf.set_override('event_connection', '', group='database')
conf.set_override('gnocchi_is_enabled', False, group='api') conf.set_override('gnocchi_is_enabled', False, group='api')
conf.set_override('aodh_is_enabled', False, group='api') conf.set_override('aodh_is_enabled', False, group='api')
@ -153,33 +151,6 @@ class SampleDataFixture(fixture.GabbiFixture):
print('meter', self.conn.db.meter.remove({'source': self.source})) print('meter', self.conn.db.meter.remove({'source': self.source}))
class EventDataFixture(fixture.GabbiFixture):
"""Instantiate some sample event data for use in testing."""
def start_fixture(self):
"""Create some events."""
global LOAD_APP_KWARGS
conf = LOAD_APP_KWARGS['conf']
self.conn = storage.get_connection_from_config(conf, 'event')
events = []
name_list = ['chocolate.chip', 'peanut.butter', 'sugar']
for ix, name in enumerate(name_list):
timestamp = datetime.datetime.utcnow()
message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix)
traits = [models.Trait('type', 1, name),
models.Trait('ate', 2, ix)]
event = models.Event(message_id,
'cookies_{}'.format(name),
timestamp,
traits, {'nested': {'inside': 'value'}})
events.append(event)
self.conn.record_events(events)
def stop_fixture(self):
"""Destroy the events."""
self.conn.db.event.remove({'event_type': '/^cookies_/'})
class CORSConfigFixture(fixture.GabbiFixture): class CORSConfigFixture(fixture.GabbiFixture):
"""Inject mock configuration for the CORS middleware.""" """Inject mock configuration for the CORS middleware."""

View File

@ -1,210 +0,0 @@
# These test run against the Events API with no data preloaded into the
# datastore. This allows us to verify that requests are still processed
# normally even if data is missing for that endpoint.
fixtures:
- ConfigFixture
tests:
# this attempts to get all the events and expects an empty list back
- name: get all events
url: /v2/events
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to get all the events with no role/user/project
# info in header and expects a 403
- name: get events with bad headers
url: /v2/events
status: 403
# this attempts to get all the events with no user/project
# info in header and expects a 403
- name: get events with admin only header
url: /v2/events
request_headers:
X-Roles: admin
status: 403
# this attempts to get all the events with no project
# info in header and expects a 403
- name: get events with no project header
url: /v2/events
request_headers:
X-Roles: admin
X-User-Id: user1
status: 403
# this attempts to get all the events with no user
# info in header and expects a 403
- name: get events with no user header
url: /v2/events
request_headers:
X-Roles: admin
X-Project-Id: project1
status: 403
# this attempts to get all the events with invalid parameters and expects a 400
- name: get events with bad params
url: /v2/events?bad_Stuff_here
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 400
# this attempts to query the events with the correct parameterized query syntax
# and expects an empty list
- name: get events that match query
url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the correct data query syntax and
# expects an empty list
- name: get events that match query via request data
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
q:
- field: event_type
op: eq
type: string
value: cookies_chocolate.chip
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the correct parameterized query syntax
# but a bad field name and expects an empty list
- name: get events that match bad query
url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the correct data query syntax and
# a bad field name and expects an empty list
- name: get events that match bad query via request data
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
q:
- field: bad_field
op: eq
type: string
value: cookies_chocolate.chip
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the wrong data query syntax missing the
# q object but supplying the field list and a bad field name and expects a 400
- name: get events that match bad query via request data malformed list
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
- field: bad_field
op: eq
type: string
value: cookies_chocolate.chip
status: 400
# this attempts to query the events with the wrong data query syntax missing the
# q object but supplying the field list along with a bad content-type. Should
# return a 400
- name: get events that match bad query via request data wrong type
url: /v2/events
request_headers:
content-type: text/plain
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
"field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True"
status: 415
# Get a single event by message_id no data is present so should return a 404
- name: get a single event
url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 404
# Get all the event types should return an empty list
- name: get all event types
url: /v2/event_types
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# Get a single event type by name, this API is unused and should return a 404
- name: get event types for good event_type unused api
url: /v2/event_types/cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 404
# Get all traits for an event type should return an empty list
- name: get all traits for event type
url: /v2/event_types/cookies_chocolate.chip/traits
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# Get all traits named ate for an event type should return an empty list
- name: get all traits named ate for event type
url: /v2/event_types/cookies_chocolate.chip/traits/ate
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"

View File

@ -1,242 +0,0 @@
# These test run against the Events API with data preloaded into the datastore.
fixtures:
- ConfigFixture
- EventDataFixture
tests:
# this attempts to get all the events and checks to make sure they are valid
- name: get all events
url: /v2/events
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_json_paths:
$.[0].event_type: cookies_chocolate.chip
$.[0].traits.[0].value: chocolate.chip
$.[0].traits.[1].value: '0'
$.[0].raw.nested.inside: value
$.[1].event_type: cookies_peanut.butter
$.[1].traits.[0].name: type
$.[1].traits.[1].name: ate
$.[1].raw.nested.inside: value
$.[2].event_type: cookies_sugar
$.[2].traits.[0].type: string
$.[2].traits.[1].type: integer
$.[2].raw.nested.inside: value
# this attempts to get all the events with invalid parameters and expects a 400
- name: get events with bad params
url: /v2/events?bad_Stuff_here
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 400
# this attempts to query the events with the correct parameterized query syntax
# and expects a matching event
- name: get events that match query
url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_json_paths:
$.[0].event_type: cookies_chocolate.chip
$.[0].traits.[0].value: chocolate.chip
# this attempts to query the events with the correct data query syntax and
# expects a matching event
- name: get events that match query via data
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
q:
- field: event_type
op: eq
type: string
value: cookies_chocolate.chip
response_headers:
content-type: application/json
response_json_paths:
$.[0].event_type: cookies_chocolate.chip
$.[0].traits.[0].value: chocolate.chip
# this attempts to query the events with the correct parameterized query syntax
# but a bad field name and expects an empty list
- name: get events that match bad query
url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the correct data query syntax and
# a bad field name and expects an empty list
- name: get events that match bad query via data
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
q:
- field: bad_field
op: eq
type: string
value: cookies_chocolate.chip
response_headers:
content-type: application/json
response_strings:
- "[]"
# this attempts to query the events with the wrong data query syntax missing the
# q object but supplying the field list and a bad field name and expects a 400
- name: get events that match bad query via data list
url: /v2/events
request_headers:
content-type: application/json
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
data:
- field: bad_field
op: eq
type: string
value: cookies_chocolate.chip
status: 400
# Get a single event by message_id should return an event
- name: get a single event
url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_json_paths:
$.event_type: cookies_chocolate.chip
$.traits.[0].value: chocolate.chip
$.traits.[1].value: '0'
# Get a single event by message_id no data is present so should return a 404
- name: get a single event that does not exist
url: /v2/events/bad-id
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 404
# Get all the event types should return a list of event types
- name: get all event types
url: /v2/event_types
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- cookies_chocolate.chip
- cookies_peanut.butter
- cookies_sugar
# Get a single event type by valid name, this API is unused and should return a 404
- name: get event types for good event_type unused api
url: /v2/event_types/cookies_chocolate.chip
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 404
# Get a single event type by invalid name, this API is unused and should return a 404
- name: get event types for bad event_type unused api
url: /v2/event_types/bad_event_type
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
status: 404
# Get all traits for a valid event type should return an list of traits
- name: get all traits for event type
url: /v2/event_types/cookies_chocolate.chip/traits
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_json_paths:
$.[0].type: string
$.[1].name: ate
# Get all traits for an invalid event type should return an empty list
- name: get all traits names for event type bad event type
url: /v2/event_types/bad_event_type/traits
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# Get all traits of type ate for a valid event type should return an list of
# traits
- name: get all traits of type ate for event type
url: /v2/event_types/cookies_chocolate.chip/traits/ate
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_json_paths:
$.[0].name: ate
$.[0].value: '0'
# Get all traits of type ate for an invalid event type should return an empty
# list
- name: get all traits of type for event type bad event type
url: /v2/event_types/bad_event_type/traits/ate
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"
# Get all traits of type bad_trait_name for a valid event type should return an
# empty list
- name: get all traits of type instances for event type bad trait name
url: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name
request_headers:
X-Roles: admin
X-User-Id: user1
X-Project-Id: project1
response_headers:
content-type: application/json
response_strings:
- "[]"

View File

@ -10,5 +10,4 @@ tests:
desc: retrieve capabilities for the mongo store desc: retrieve capabilities for the mongo store
url: /v2/capabilities url: /v2/capabilities
response_json_paths: response_json_paths:
$.event_storage.['storage:production_ready']: true
$.storage.['storage:production_ready']: true $.storage.['storage:production_ready']: true

View File

@ -20,7 +20,6 @@ import uuid
from oslo_utils import netutils from oslo_utils import netutils
from ceilometer.event.storage import models as event
from ceilometer.publisher import direct from ceilometer.publisher import direct
from ceilometer import sample from ceilometer import sample
from ceilometer.tests import db as tests_db from ceilometer.tests import db as tests_db
@ -79,21 +78,3 @@ class TestDirectPublisher(tests_db.TestBase):
self.assertEqual(3, len(meters), 'There should be 3 samples') self.assertEqual(3, len(meters), 'There should be 3 samples')
self.assertEqual(['alpha', 'beta', 'gamma'], names) self.assertEqual(['alpha', 'beta', 'gamma'], names)
class TestEventDirectPublisher(tests_db.TestBase):
test_data = [event.Event(message_id=str(uuid.uuid4()),
event_type='event_%d' % i,
generated=datetime.datetime.utcnow(),
traits=[], raw={})
for i in range(0, 5)]
def test_direct_publisher(self):
parsed_url = netutils.urlsplit('direct://dispatcher=database')
publisher = direct.DirectPublisher(self.CONF, parsed_url)
publisher.publish_events(self.test_data)
e_types = list(self.event_conn.get_event_types())
self.assertEqual(5, len(e_types))
self.assertEqual(['event_%d' % i for i in range(0, 5)],
sorted(e_types))

View File

@ -29,7 +29,6 @@ except ImportError:
import testtools.testcase import testtools.testcase
raise testtools.testcase.TestSkipped("happybase is needed") raise testtools.testcase.TestSkipped("happybase is needed")
from ceilometer.event.storage import impl_hbase as hbase_event
from ceilometer.storage import impl_hbase as hbase from ceilometer.storage import impl_hbase as hbase
from ceilometer.tests import base as test_base from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db from ceilometer.tests import db as tests_db
@ -87,14 +86,6 @@ class CapabilitiesTest(test_base.BaseTestCase):
actual_capabilities = hbase.Connection.get_capabilities() actual_capabilities = hbase.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities) self.assertEqual(expected_capabilities, actual_capabilities)
def test_event_capabilities(self):
expected_capabilities = {
'events': {'query': {'simple': True}},
}
actual_capabilities = hbase_event.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self): def test_storage_capabilities(self):
expected_capabilities = { expected_capabilities = {
'storage': {'production_ready': True}, 'storage': {'production_ready': True},

View File

@ -21,7 +21,6 @@
""" """
from ceilometer.event.storage import impl_mongodb as impl_mongodb_event
from ceilometer.storage import impl_mongodb from ceilometer.storage import impl_mongodb
from ceilometer.tests import base as test_base from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db from ceilometer.tests import db as tests_db
@ -60,10 +59,6 @@ class IndexTest(tests_db.TestBase):
self._test_ttl_index_absent(self.conn, 'meter', self._test_ttl_index_absent(self.conn, 'meter',
'metering_time_to_live') 'metering_time_to_live')
def test_event_ttl_index_absent(self):
self._test_ttl_index_absent(self.event_conn, 'event',
'event_time_to_live')
def _test_ttl_index_present(self, conn, coll_name, ttl_opt): def _test_ttl_index_present(self, conn, coll_name, ttl_opt):
coll = getattr(conn.db, coll_name) coll = getattr(conn.db, coll_name)
self.CONF.set_override(ttl_opt, 456789, group='database') self.CONF.set_override(ttl_opt, 456789, group='database')
@ -81,10 +76,6 @@ class IndexTest(tests_db.TestBase):
self._test_ttl_index_present(self.conn, 'meter', self._test_ttl_index_present(self.conn, 'meter',
'metering_time_to_live') 'metering_time_to_live')
def test_event_ttl_index_present(self):
self._test_ttl_index_present(self.event_conn, 'event',
'event_time_to_live')
class CapabilitiesTest(test_base.BaseTestCase): class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB # Check the returned capabilities list, which is specific to each DB
@ -117,13 +108,6 @@ class CapabilitiesTest(test_base.BaseTestCase):
actual_capabilities = impl_mongodb.Connection.get_capabilities() actual_capabilities = impl_mongodb.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities) self.assertEqual(expected_capabilities, actual_capabilities)
def test_event_capabilities(self):
expected_capabilities = {
'events': {'query': {'simple': True}},
}
actual_capabilities = impl_mongodb_event.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self): def test_storage_capabilities(self):
expected_capabilities = { expected_capabilities = {
'storage': {'production_ready': True}, 'storage': {'production_ready': True},

View File

@ -24,10 +24,7 @@ import warnings
import mock import mock
from oslo_db import exception from oslo_db import exception
from oslo_utils import timeutils from oslo_utils import timeutils
from six.moves import reprlib
from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event
from ceilometer.event.storage import models
from ceilometer.publisher import utils from ceilometer.publisher import utils
from ceilometer import sample from ceilometer import sample
from ceilometer.storage import impl_sqlalchemy from ceilometer.storage import impl_sqlalchemy
@ -53,71 +50,10 @@ class EngineFacadeTest(tests_db.TestBase):
@mock.patch.object(warnings, 'warn') @mock.patch.object(warnings, 'warn')
def test_no_not_supported_warning(self, mocked): def test_no_not_supported_warning(self, mocked):
impl_sqlalchemy.Connection(self.CONF, 'sqlite://') impl_sqlalchemy.Connection(self.CONF, 'sqlite://')
impl_sqla_event.Connection(self.CONF, 'sqlite://')
self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning), self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning),
mocked.call_args_list) mocked.call_args_list)
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class EventTypeTest(tests_db.TestBase):
# EventType is a construct specific to sqlalchemy
# Not applicable to other drivers.
def test_event_type_exists(self):
et1 = self.event_conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.event_conn._get_or_create_event_type("foo")
self.assertEqual(et2.id, et1.id)
self.assertEqual(et2.desc, et1.desc)
def test_event_type_unique(self):
et1 = self.event_conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.event_conn._get_or_create_event_type("blah")
self.assertNotEqual(et1.id, et2.id)
self.assertNotEqual(et1.desc, et2.desc)
# Test the method __repr__ returns a string
self.assertTrue(reprlib.repr(et2))
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class EventTest(tests_db.TestBase):
def _verify_data(self, trait, trait_table):
now = datetime.datetime.utcnow()
ev = models.Event('1', 'name', now, [trait], {})
self.event_conn.record_events([ev])
session = self.event_conn._engine_facade.get_session()
t_tables = [sql_models.TraitText, sql_models.TraitFloat,
sql_models.TraitInt, sql_models.TraitDatetime]
for table in t_tables:
if table == trait_table:
self.assertEqual(1, session.query(table).count())
else:
self.assertEqual(0, session.query(table).count())
def test_string_traits(self):
model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text")
self._verify_data(model, sql_models.TraitText)
def test_int_traits(self):
model = models.Trait("Foo", models.Trait.INT_TYPE, 100)
self._verify_data(model, sql_models.TraitInt)
def test_float_traits(self):
model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456)
self._verify_data(model, sql_models.TraitFloat)
def test_datetime_traits(self):
now = datetime.datetime.utcnow()
model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now)
self._verify_data(model, sql_models.TraitDatetime)
def test_event_repr(self):
ev = sql_models.Event('msg_id', None, False, {})
ev.id = 100
self.assertTrue(reprlib.repr(ev))
@tests_db.run_with('sqlite', 'mysql', 'pgsql') @tests_db.run_with('sqlite', 'mysql', 'pgsql')
class RelationshipTest(scenarios.DBTestBase): class RelationshipTest(scenarios.DBTestBase):
# Note: Do not derive from SQLAlchemyEngineTestBase, since we # Note: Do not derive from SQLAlchemyEngineTestBase, since we
@ -175,13 +111,6 @@ class CapabilitiesTest(test_base.BaseTestCase):
actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() actual_capabilities = impl_sqlalchemy.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities) self.assertEqual(expected_capabilities, actual_capabilities)
def test_event_capabilities(self):
expected_capabilities = {
'events': {'query': {'simple': True}},
}
actual_capabilities = impl_sqla_event.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self): def test_storage_capabilities(self):
expected_capabilities = { expected_capabilities = {
'storage': {'production_ready': True}, 'storage': {'production_ready': True},

View File

@ -15,7 +15,6 @@
"""Base classes for DB backend implementation test""" """Base classes for DB backend implementation test"""
import datetime import datetime
import operator
import mock import mock
from oslo_db import api from oslo_db import api
@ -24,8 +23,6 @@ from oslo_utils import timeutils
import pymongo import pymongo
import ceilometer import ceilometer
from ceilometer.event import storage as event_storage
from ceilometer.event.storage import models as event_models
from ceilometer.publisher import utils from ceilometer.publisher import utils
from ceilometer import sample from ceilometer import sample
from ceilometer import storage from ceilometer import storage
@ -2633,440 +2630,6 @@ class CounterDataTypeTest(DBTestBase):
self.assertEqual(1938495037.53697, results[0].counter_volume) self.assertEqual(1938495037.53697, results[0].counter_volume)
class EventTestBase(tests_db.TestBase):
"""Separate test base class.
We don't want to inherit all the Meter stuff.
"""
def setUp(self):
super(EventTestBase, self).setUp()
self.prepare_data()
def prepare_data(self):
self.event_models = []
base = 0
self.start = datetime.datetime(2013, 12, 31, 5, 0)
now = self.start
for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']:
trait_models = [event_models.Trait(name, dtype, value)
for name, dtype, value in [
('trait_A', event_models.Trait.TEXT_TYPE,
"my_%s_text" % event_type),
('trait_B', event_models.Trait.INT_TYPE,
base + 1),
('trait_C', event_models.Trait.FLOAT_TYPE,
float(base) + 0.123456),
('trait_D', event_models.Trait.DATETIME_TYPE,
now)]]
self.event_models.append(
event_models.Event("id_%s_%d" % (event_type, base),
event_type, now, trait_models,
{'status': {'nested': 'started'}}))
base += 100
now = now + datetime.timedelta(hours=1)
self.end = now
self.event_conn.record_events(self.event_models)
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class EventTTLTest(EventTestBase):
@mock.patch.object(timeutils, 'utcnow')
def test_clear_expired_event_data(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0)
self.event_conn.clear_expired_event_data(3600)
events = list(self.event_conn.get_events(event_storage.EventFilter()))
self.assertEqual(2, len(events))
event_types = list(self.event_conn.get_event_types())
self.assertEqual(['Bar', 'Zoo'], event_types)
for event_type in event_types:
trait_types = list(self.event_conn.get_trait_types(event_type))
self.assertEqual(4, len(trait_types))
traits = list(self.event_conn.get_traits(event_type))
self.assertEqual(4, len(traits))
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb')
class EventTest(EventTestBase):
def test_duplicate_message_id(self):
now = datetime.datetime.utcnow()
m = [event_models.Event("1", "Foo", now, None, {}),
event_models.Event("1", "Zoo", now, [], {})]
with mock.patch('%s.LOG' %
self.event_conn.record_events.__module__) as log:
self.event_conn.record_events(m)
self.assertEqual(1, log.info.call_count)
def test_bad_event(self):
now = datetime.datetime.utcnow()
broken_event = event_models.Event("1", "Foo", now, None, {})
del(broken_event.__dict__['raw'])
m = [broken_event, broken_event]
with mock.patch('%s.LOG' %
self.event_conn.record_events.__module__) as log:
self.assertRaises(AttributeError, self.event_conn.record_events, m)
# ensure that record_events does not break on first error but
# delays exception and tries to record each event.
self.assertEqual(2, log.exception.call_count)
class GetEventTest(EventTestBase):
def test_generated_is_datetime(self):
event_filter = event_storage.EventFilter(self.start, self.end)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(6, len(events))
for i, event in enumerate(events):
self.assertIsInstance(event.generated, datetime.datetime)
self.assertEqual(event.generated,
self.event_models[i].generated)
model_traits = self.event_models[i].traits
for j, trait in enumerate(event.traits):
if trait.dtype == event_models.Trait.DATETIME_TYPE:
self.assertIsInstance(trait.value, datetime.datetime)
self.assertEqual(trait.value, model_traits[j].value)
def test_simple_get(self):
event_filter = event_storage.EventFilter(self.start, self.end)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(6, len(events))
start_time = None
for i, type in enumerate(['Foo', 'Bar', 'Zoo']):
self.assertEqual(type, events[i].event_type)
self.assertEqual(4, len(events[i].traits))
# Ensure sorted results ...
if start_time is not None:
# Python 2.6 has no assertLess :(
self.assertTrue(start_time < events[i].generated)
start_time = events[i].generated
def test_simple_get_event_type(self):
expected_trait_values = {
'id_Bar_100': {
'trait_A': 'my_Bar_text',
'trait_B': 101,
'trait_C': 100.123456,
'trait_D': self.start + datetime.timedelta(hours=1)
},
'id_Bar_400': {
'trait_A': 'my_Bar_text',
'trait_B': 401,
'trait_C': 400.123456,
'trait_D': self.start + datetime.timedelta(hours=4)
}
}
event_filter = event_storage.EventFilter(self.start, self.end, "Bar")
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Bar", events[0].event_type)
self.assertEqual("Bar", events[1].event_type)
self.assertEqual(4, len(events[0].traits))
self.assertEqual(4, len(events[1].traits))
for event in events:
trait_values = expected_trait_values.get(event.message_id,
None)
if not trait_values:
self.fail("Unexpected event ID returned:" % event.message_id)
for trait in event.traits:
expected_val = trait_values.get(trait.name)
if not expected_val:
self.fail("Unexpected trait type: %s" % trait.dtype)
self.assertEqual(expected_val, trait.value)
def test_get_event_trait_filter(self):
trait_filters = [{'key': 'trait_B', 'integer': 101}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Bar", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
def test_get_event_trait_filter_op_string(self):
trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text',
'op': 'eq'}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Foo", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
trait_filters[0].update({'key': 'trait_A', 'op': 'lt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Bar", events[0].event_type)
trait_filters[0].update({'key': 'trait_A', 'op': 'le'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Bar", events[1].event_type)
trait_filters[0].update({'key': 'trait_A', 'op': 'ne'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Zoo", events[3].event_type)
trait_filters[0].update({'key': 'trait_A', 'op': 'gt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Zoo", events[0].event_type)
trait_filters[0].update({'key': 'trait_A', 'op': 'ge'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Foo", events[2].event_type)
def test_get_event_trait_filter_op_integer(self):
trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Bar", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
trait_filters[0].update({'key': 'trait_B', 'op': 'lt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Foo", events[0].event_type)
trait_filters[0].update({'key': 'trait_B', 'op': 'le'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Bar", events[1].event_type)
trait_filters[0].update({'key': 'trait_B', 'op': 'ne'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(5, len(events))
self.assertEqual("Zoo", events[4].event_type)
trait_filters[0].update({'key': 'trait_B', 'op': 'gt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Zoo", events[0].event_type)
trait_filters[0].update({'key': 'trait_B', 'op': 'ge'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(5, len(events))
self.assertEqual("Foo", events[2].event_type)
def test_get_event_trait_filter_op_float(self):
trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Foo", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
trait_filters[0].update({'key': 'trait_C', 'op': 'lt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(3, len(events))
self.assertEqual("Zoo", events[2].event_type)
trait_filters[0].update({'key': 'trait_C', 'op': 'le'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Bar", events[1].event_type)
trait_filters[0].update({'key': 'trait_C', 'op': 'ne'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(5, len(events))
self.assertEqual("Zoo", events[2].event_type)
trait_filters[0].update({'key': 'trait_C', 'op': 'gt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
self.assertEqual("Bar", events[0].event_type)
trait_filters[0].update({'key': 'trait_C', 'op': 'ge'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(3, len(events))
self.assertEqual("Zoo", events[2].event_type)
def test_get_event_trait_filter_op_datetime(self):
trait_filters = [{'key': 'trait_D',
'datetime': self.start + datetime.timedelta(hours=2),
'op': 'eq'}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Zoo", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
trait_filters[0].update({'key': 'trait_D', 'op': 'lt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(2, len(events))
trait_filters[0].update({'key': 'trait_D', 'op': 'le'})
self.assertEqual("Bar", events[1].event_type)
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(3, len(events))
self.assertEqual("Bar", events[1].event_type)
trait_filters[0].update({'key': 'trait_D', 'op': 'ne'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(5, len(events))
self.assertEqual("Foo", events[2].event_type)
trait_filters[0].update({'key': 'trait_D', 'op': 'gt'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(3, len(events))
self.assertEqual("Zoo", events[2].event_type)
trait_filters[0].update({'key': 'trait_D', 'op': 'ge'})
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(4, len(events))
self.assertEqual("Bar", events[2].event_type)
def test_get_event_multiple_trait_filter(self):
trait_filters = [{'key': 'trait_B', 'integer': 1},
{'key': 'trait_A', 'string': 'my_Foo_text'},
{'key': 'trait_C', 'float': 0.123456}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("Foo", events[0].event_type)
self.assertEqual(4, len(events[0].traits))
def test_get_event_multiple_trait_filter_expect_none(self):
trait_filters = [{'key': 'trait_B', 'integer': 1},
{'key': 'trait_A', 'string': 'my_Zoo_text'}]
event_filter = event_storage.EventFilter(self.start, self.end,
traits_filter=trait_filters)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(0, len(events))
def test_get_event_types(self):
event_types = [e for e in
self.event_conn.get_event_types()]
self.assertEqual(3, len(event_types))
self.assertIn("Bar", event_types)
self.assertIn("Foo", event_types)
self.assertIn("Zoo", event_types)
def test_get_trait_types(self):
trait_types = [tt for tt in
self.event_conn.get_trait_types("Foo")]
self.assertEqual(4, len(trait_types))
trait_type_names = map(lambda x: x['name'], trait_types)
self.assertIn("trait_A", trait_type_names)
self.assertIn("trait_B", trait_type_names)
self.assertIn("trait_C", trait_type_names)
self.assertIn("trait_D", trait_type_names)
def test_get_trait_types_unknown_event(self):
trait_types = [tt for tt in
self.event_conn.get_trait_types("Moo")]
self.assertEqual(0, len(trait_types))
def test_get_traits(self):
traits = self.event_conn.get_traits("Bar")
# format results in a way that makes them easier to work with
trait_dict = {}
for trait in traits:
trait_dict[trait.name] = trait.dtype
self.assertIn("trait_A", trait_dict)
self.assertEqual(event_models.Trait.TEXT_TYPE, trait_dict["trait_A"])
self.assertIn("trait_B", trait_dict)
self.assertEqual(event_models.Trait.INT_TYPE, trait_dict["trait_B"])
self.assertIn("trait_C", trait_dict)
self.assertEqual(event_models.Trait.FLOAT_TYPE, trait_dict["trait_C"])
self.assertIn("trait_D", trait_dict)
self.assertEqual(event_models.Trait.DATETIME_TYPE,
trait_dict["trait_D"])
def test_get_all_traits(self):
traits = self.event_conn.get_traits("Foo")
traits = sorted([t for t in traits], key=operator.attrgetter('dtype'))
self.assertEqual(8, len(traits))
trait = traits[0]
self.assertEqual("trait_A", trait.name)
self.assertEqual(event_models.Trait.TEXT_TYPE, trait.dtype)
def test_simple_get_event_no_traits(self):
new_events = [event_models.Event("id_notraits", "NoTraits",
self.start, [], {})]
self.event_conn.record_events(new_events)
event_filter = event_storage.EventFilter(
self.start, self.end, "NoTraits")
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
self.assertEqual("id_notraits", events[0].message_id)
self.assertEqual("NoTraits", events[0].event_type)
self.assertEqual(0, len(events[0].traits))
def test_simple_get_no_filters(self):
event_filter = event_storage.EventFilter(None, None, None)
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(6, len(events))
def test_get_by_message_id(self):
new_events = [event_models.Event("id_testid",
"MessageIDTest",
self.start,
[], {})]
self.event_conn.record_events(new_events)
event_filter = event_storage.EventFilter(message_id="id_testid")
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertEqual(1, len(events))
event = events[0]
self.assertEqual("id_testid", event.message_id)
def test_simple_get_raw(self):
event_filter = event_storage.EventFilter()
events = [event for event in self.event_conn.get_events(event_filter)]
self.assertTrue(events)
self.assertEqual({'status': {'nested': 'started'}}, events[0].raw)
def test_trait_type_enforced_on_none(self):
new_events = [event_models.Event(
"id_testid", "MessageIDTest", self.start,
[event_models.Trait('text', event_models.Trait.TEXT_TYPE, ''),
event_models.Trait('int', event_models.Trait.INT_TYPE, 0),
event_models.Trait('float', event_models.Trait.FLOAT_TYPE, 0.0)],
{})]
self.event_conn.record_events(new_events)
event_filter = event_storage.EventFilter(message_id="id_testid")
events = [event for event in self.event_conn.get_events(event_filter)]
options = [(event_models.Trait.TEXT_TYPE, ''),
(event_models.Trait.INT_TYPE, 0.0),
(event_models.Trait.FLOAT_TYPE, 0.0)]
for trait in events[0].traits:
options.remove((trait.dtype, trait.value))
class BigIntegerTest(tests_db.TestBase): class BigIntegerTest(tests_db.TestBase):
def test_metadata_bigint(self): def test_metadata_bigint(self):
metadata = {'bigint': 99999999999999} metadata = {'bigint': 99999999999999}

View File

@ -54,8 +54,6 @@ class BinTestCase(base.BaseTestCase):
self.assertEqual(0, subp.poll()) self.assertEqual(0, subp.poll())
self.assertIn(b"Nothing to clean, database metering " self.assertIn(b"Nothing to clean, database metering "
b"time to live is disabled", stdout) b"time to live is disabled", stdout)
self.assertIn(b"Nothing to clean, database event "
b"time to live is disabled", stdout)
def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): def _test_run_expirer_ttl_enabled(self, ttl_name, data_name):
content = ("[DEFAULT]\n" content = ("[DEFAULT]\n"
@ -83,7 +81,6 @@ class BinTestCase(base.BaseTestCase):
self._test_run_expirer_ttl_enabled('metering_time_to_live', self._test_run_expirer_ttl_enabled('metering_time_to_live',
'metering') 'metering')
self._test_run_expirer_ttl_enabled('time_to_live', 'metering') self._test_run_expirer_ttl_enabled('time_to_live', 'metering')
self._test_run_expirer_ttl_enabled('event_time_to_live', 'event')
class BinSendSampleTestCase(base.BaseTestCase): class BinSendSampleTestCase(base.BaseTestCase):

View File

@ -24,7 +24,6 @@ from oslotest import mockpatch
import wsme import wsme
from ceilometer.api.controllers.v2 import base as v2_base from ceilometer.api.controllers.v2 import base as v2_base
from ceilometer.api.controllers.v2 import events
from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import meters
from ceilometer.api.controllers.v2 import utils from ceilometer.api.controllers.v2 import utils
from ceilometer import storage from ceilometer import storage
@ -37,10 +36,6 @@ class TestQuery(base.BaseTestCase):
super(TestQuery, self).setUp() super(TestQuery, self).setUp()
self.useFixture(fixtures.MonkeyPatch( self.useFixture(fixtures.MonkeyPatch(
'pecan.response', mock.MagicMock())) 'pecan.response', mock.MagicMock()))
self.useFixture(mockpatch.Patch('ceilometer.api.controllers.v2.events'
'._build_rbac_query_filters',
return_value={'t_filter': [],
'admin_proj': None}))
def test_get_value_as_type_with_integer(self): def test_get_value_as_type_with_integer(self):
query = v2_base.Query(field='metadata.size', query = v2_base.Query(field='metadata.size',
@ -158,15 +153,6 @@ class TestQuery(base.BaseTestCase):
expected = value expected = value
self.assertEqual(expected, query._get_value_as_type()) self.assertEqual(expected, query._get_value_as_type())
def test_event_query_to_event_filter_with_bad_op(self):
# bug 1511592
query = v2_base.Query(field='event_type',
op='ne',
value='compute.instance.create.end',
type='string')
self.assertRaises(v2_base.ClientSideError,
events._event_query_to_event_filter, [query])
class TestValidateGroupByFields(base.BaseTestCase): class TestValidateGroupByFields(base.BaseTestCase):

View File

@ -13,14 +13,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import datetime import datetime
import uuid
import mock import mock
from oslo_config import fixture as fixture_config from oslo_config import fixture as fixture_config
from oslotest import base from oslotest import base
from ceilometer.dispatcher import database from ceilometer.dispatcher import database
from ceilometer.event.storage import models as event_models
from ceilometer.publisher import utils from ceilometer.publisher import utils
@ -31,18 +29,6 @@ class TestDispatcherDB(base.BaseTestCase):
self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF.set_override('connection', 'sqlite://', group='database') self.CONF.set_override('connection', 'sqlite://', group='database')
self.meter_dispatcher = database.MeterDatabaseDispatcher(self.CONF) self.meter_dispatcher = database.MeterDatabaseDispatcher(self.CONF)
self.event_dispatcher = database.EventDatabaseDispatcher(self.CONF)
def test_event_conn(self):
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(self.event_dispatcher.conn,
'record_events') as record_events:
self.event_dispatcher.record_events(event)
self.assertEqual(1, len(record_events.call_args_list[0][0][0]))
def test_valid_message(self): def test_valid_message(self):
msg = {'counter_name': 'test', msg = {'counter_name': 'test',

View File

@ -24,11 +24,6 @@ class FakeMeterDispatcher(dispatcher.MeterDispatcherBase):
pass pass
class FakeEventDispatcher(dispatcher.EventDispatcherBase):
def record_events(self, events):
pass
class TestDispatchManager(base.BaseTestCase): class TestDispatchManager(base.BaseTestCase):
def setUp(self): def setUp(self):
super(TestDispatchManager, self).setUp() super(TestDispatchManager, self).setUp()
@ -42,11 +37,7 @@ class TestDispatchManager(base.BaseTestCase):
self.useFixture(mockpatch.Patch( self.useFixture(mockpatch.Patch(
'ceilometer.dispatcher.database.MeterDatabaseDispatcher', 'ceilometer.dispatcher.database.MeterDatabaseDispatcher',
new=FakeMeterDispatcher)) new=FakeMeterDispatcher))
self.useFixture(mockpatch.Patch(
'ceilometer.dispatcher.database.EventDatabaseDispatcher',
new=FakeEventDispatcher))
def test_load(self): def test_load(self):
sample_mg, event_mg = dispatcher.load_dispatcher_manager(self.CONF) sample_mg, event_mg = dispatcher.load_dispatcher_manager(self.CONF)
self.assertEqual(2, len(list(sample_mg))) self.assertEqual(2, len(list(sample_mg)))
self.assertEqual(1, len(list(event_mg)))

View File

@ -14,16 +14,10 @@
# under the License. # under the License.
"""Tests for ceilometer/storage/ """Tests for ceilometer/storage/
""" """
import unittest
import mock import mock
from oslo_config import fixture as fixture_config from oslo_config import fixture as fixture_config
from oslotest import base from oslotest import base
try:
from ceilometer.event.storage import impl_hbase as impl_hbase_event
except ImportError:
impl_hbase_event = None
from ceilometer import storage from ceilometer import storage
from ceilometer.storage import impl_log from ceilometer.storage import impl_log
from ceilometer.storage import impl_sqlalchemy from ceilometer.storage import impl_sqlalchemy
@ -38,15 +32,13 @@ class EngineTest(base.BaseTestCase):
def test_get_connection(self): def test_get_connection(self):
engine = storage.get_connection(self.CONF, engine = storage.get_connection(self.CONF,
'log://localhost', 'log://localhost')
'ceilometer.metering.storage')
self.assertIsInstance(engine, impl_log.Connection) self.assertIsInstance(engine, impl_log.Connection)
def test_get_connection_no_such_engine(self): def test_get_connection_no_such_engine(self):
try: try:
storage.get_connection(self.CONF, storage.get_connection(self.CONF,
'no-such-engine://localhost', 'no-such-engine://localhost')
'ceilometer.metering.storage')
except RuntimeError as err: except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err)) self.assertIn('no-such-engine', six.text_type(err))
@ -77,44 +69,14 @@ class ConnectionConfigTest(base.BaseTestCase):
self.CONF.set_override("connection", "log://", group="database") self.CONF.set_override("connection", "log://", group="database")
conn = storage.get_connection_from_config(self.CONF) conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection) self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
def test_two_urls(self): def test_two_urls(self):
self.CONF.set_override("connection", "log://", group="database") self.CONF.set_override("connection", "log://", group="database")
conn = storage.get_connection_from_config(self.CONF) conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection) self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
@unittest.skipUnless(impl_hbase_event, 'need hbase implementation')
def test_three_urls(self):
self.CONF.set_override("connection", "log://", group="database")
self.CONF.set_override("event_connection", "hbase://__test__",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'event')
self.assertIsInstance(conn, impl_hbase_event.Connection)
@unittest.skipUnless(impl_hbase_event, 'need hbase implementation')
def test_three_urls_no_default(self):
self.CONF.set_override("connection", None, group="database")
self.CONF.set_override("metering_connection", "log://",
group="database")
self.CONF.set_override("event_connection", "hbase://__test__",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'event')
self.assertIsInstance(conn, impl_hbase_event.Connection)
def test_sqlalchemy_driver(self): def test_sqlalchemy_driver(self):
self.CONF.set_override("connection", "sqlite+pysqlite://", self.CONF.set_override("connection", "sqlite+pysqlite://",
group="database") group="database")
conn = storage.get_connection_from_config(self.CONF) conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_sqlalchemy.Connection) self.assertIsInstance(conn, impl_sqlalchemy.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_sqlalchemy.Connection)

View File

@ -1,94 +0,0 @@
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslotest import base as testbase
import six
from ceilometer.event.storage import models as event_models
from ceilometer.storage import base
from ceilometer.storage import models
class FakeModel(base.Model):
def __init__(self, arg1, arg2):
base.Model.__init__(self, arg1=arg1, arg2=arg2)
class ModelTest(testbase.BaseTestCase):
def test_create_attributes(self):
m = FakeModel(1, 2)
self.assertEqual(1, m.arg1)
self.assertEqual(2, m.arg2)
def test_as_dict(self):
m = FakeModel(1, 2)
d = m.as_dict()
self.assertEqual({'arg1': 1, 'arg2': 2}, d)
def test_as_dict_recursive(self):
m = FakeModel(1, FakeModel('a', 'b'))
d = m.as_dict()
self.assertEqual({'arg1': 1,
'arg2': {'arg1': 'a',
'arg2': 'b'}},
d)
def test_as_dict_recursive_list(self):
m = FakeModel(1, [FakeModel('a', 'b')])
d = m.as_dict()
self.assertEqual({'arg1': 1,
'arg2': [{'arg1': 'a',
'arg2': 'b'}]},
d)
def test_event_repr_no_traits(self):
x = event_models.Event("1", "name", "now", None, {})
self.assertEqual("<Event: 1, name, now, >", repr(x))
def test_get_field_names_of_sample(self):
sample_fields = ["source", "counter_name", "counter_type",
"counter_unit", "counter_volume", "user_id",
"project_id", "resource_id", "timestamp",
"resource_metadata", "message_id",
"message_signature", "recorded_at"]
self.assertEqual(set(sample_fields),
set(models.Sample.get_field_names()))
class TestTraitModel(testbase.BaseTestCase):
def test_convert_value(self):
v = event_models.Trait.convert_value(
event_models.Trait.INT_TYPE, '10')
self.assertEqual(10, v)
self.assertIsInstance(v, int)
v = event_models.Trait.convert_value(
event_models.Trait.FLOAT_TYPE, '10')
self.assertEqual(10.0, v)
self.assertIsInstance(v, float)
v = event_models.Trait.convert_value(
event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456')
self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v)
self.assertIsInstance(v, datetime.datetime)
v = event_models.Trait.convert_value(
event_models.Trait.TEXT_TYPE, 10)
self.assertEqual("10", v)
self.assertIsInstance(v, six.text_type)

View File

@ -36,9 +36,9 @@ class TestEventDispatcherVerifier(base.BaseTestCase):
self.conf.import_opt('telemetry_secret', self.conf.import_opt('telemetry_secret',
'ceilometer.publisher.utils', 'ceilometer.publisher.utils',
'publisher') 'publisher')
self.conf.set_override("event_dispatchers", ['database']) self.conf.set_override("event_dispatchers", ['file'])
self.useFixture(mockpatch.Patch( self.useFixture(mockpatch.Patch(
'ceilometer.dispatcher.database.EventDatabaseDispatcher', 'ceilometer.dispatcher.file.FileDispatcher',
new=FakeDispatcher)) new=FakeDispatcher))
@mock.patch('ceilometer.publisher.utils.verify_signature') @mock.patch('ceilometer.publisher.utils.verify_signature')
@ -50,9 +50,9 @@ class TestEventDispatcherVerifier(base.BaseTestCase):
manager = dispatcher.load_dispatcher_manager(self.conf)[1] manager = dispatcher.load_dispatcher_manager(self.conf)[1]
v = collector.EventEndpoint("secret", manager) v = collector.EventEndpoint("secret", manager)
v.sample([sample]) v.sample([sample])
self.assertEqual([], manager['database'].obj.events) self.assertEqual([], manager['file'].obj.events)
del sample['payload'][0]['message_signature'] del sample['payload'][0]['message_signature']
sample['payload'][0]['message_signature'] = utils.compute_signature( sample['payload'][0]['message_signature'] = utils.compute_signature(
sample['payload'][0], "secret") sample['payload'][0], "secret")
v.sample([sample]) v.sample([sample])
self.assertEqual(sample['payload'], manager['database'].obj.events) self.assertEqual(sample['payload'], manager['file'].obj.events)

View File

@ -34,7 +34,7 @@
# of Ceilometer (see within for additional settings): # of Ceilometer (see within for additional settings):
# #
# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600.
# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es', 'gnocchi', 'none') # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'gnocchi', 'none')
# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz.
# CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming
@ -138,11 +138,6 @@ function _ceilometer_prepare_storage_backend {
pip_install_gr pymongo pip_install_gr pymongo
_ceilometer_install_mongodb _ceilometer_install_mongodb
fi fi
if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
${TOP_DIR}/pkg/elasticsearch.sh download
${TOP_DIR}/pkg/elasticsearch.sh install
fi
} }
@ -205,8 +200,6 @@ function _ceilometer_drop_database {
if is_service_enabled ceilometer-collector ceilometer-api ; then if is_service_enabled ceilometer-collector ceilometer-api ; then
if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
mongo ceilometer --eval "db.dropDatabase();" mongo ceilometer --eval "db.dropDatabase();"
elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
curl -XDELETE "localhost:9200/events_*"
fi fi
fi fi
} }
@ -245,16 +238,9 @@ function _ceilometer_configure_storage_backend {
fi fi
elif [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then elif [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
iniset $CEILOMETER_CONF DEFAULT meter_dispatchers database iniset $CEILOMETER_CONF DEFAULT meter_dispatchers database
iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
# es is only supported for events. we will use sql for metering.
iniset $CEILOMETER_CONF database event_connection es://localhost:9200
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
${TOP_DIR}/pkg/elasticsearch.sh start
elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
iniset $CEILOMETER_CONF DEFAULT meter_dispatchers database iniset $CEILOMETER_CONF DEFAULT meter_dispatchers database
iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer
elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then
iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi
@ -371,7 +357,7 @@ function init_ceilometer {
if is_service_enabled gnocchi ; then if is_service_enabled gnocchi ; then
if [ "$CEILOMETER_BACKEND" = 'gnocchi' ]; then if [ "$CEILOMETER_BACKEND" = 'gnocchi' ]; then
set -e set -e
$CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-metering-database --skip-event-database $CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-metering-database
set +e set +e
fi fi
fi fi

View File

@ -194,7 +194,7 @@ Currently, processed data can be published using 7 different transports:
5. kafka, which publishes data to a Kafka message queue to be consumed by any system 5. kafka, which publishes data to a Kafka message queue to be consumed by any system
that supports Kafka. that supports Kafka.
6. file, which publishes samples to a file with specified name and location; 6. file, which publishes samples to a file with specified name and location;
7. database, which stores samples/events to the legacy ceilometer database system. 7. database, which stores samples to the legacy ceilometer database system.
Storing/Accessing the data Storing/Accessing the data

View File

@ -102,26 +102,6 @@ available in the backend.
.. autotype:: ceilometer.api.controllers.v2.capabilities.Capabilities .. autotype:: ceilometer.api.controllers.v2.capabilities.Capabilities
:members: :members:
Events and Traits
=================
.. rest-controller:: ceilometer.api.controllers.v2.events:EventTypesController
:webprefix: /v2/event_types
.. rest-controller:: ceilometer.api.controllers.v2.events:TraitsController
:webprefix: /v2/event_types/(event_type)/traits
.. rest-controller:: ceilometer.api.controllers.v2.events:EventsController
:webprefix: /v2/events
.. autotype:: ceilometer.api.controllers.v2.events.Event
:members:
.. autotype:: ceilometer.api.controllers.v2.events.Trait
:members:
.. autotype:: ceilometer.api.controllers.v2.events.TraitDescription
:members:
Filtering Queries Filtering Queries
================= =================
@ -158,18 +138,6 @@ field of *Sample*). See :ref:`api-queries` for how to query the API.
.. autotype:: ceilometer.api.controllers.v2.base.Query .. autotype:: ceilometer.api.controllers.v2.base.Query
:members: :members:
Event Query
+++++++++++
Event query is similar to simple query, its type EventQuery is actually
a subclass of Query, so EventQuery has every attribute Query has.
But there are some differences. If a field is one of the following:
event_type, message_id, start_timestamp, end_timestamp, then this field
will be applied on event, otherwise it will be treated as trait name and
applied on trait. See :ref:`api-queries` for how to query the API.
.. autotype:: ceilometer.api.controllers.v2.events.EventQuery
:members:
Complex Query Complex Query
+++++++++++++ +++++++++++++

View File

@ -12,7 +12,4 @@
"telemetry:get_resource": "", "telemetry:get_resource": "",
"telemetry:get_resources": "", "telemetry:get_resources": "",
"telemetry:events:index": "",
"telemetry:events:show": ""
} }

View File

@ -0,0 +1,6 @@
---
other:
- >-
The Events API (exposed at /v2/events) which was deprecated has been
removed. The Panko project is now responsible for providing this API and can
be installed separately.

View File

@ -202,15 +202,6 @@ ceilometer.poll.central =
ceilometer.builder.poll.central = ceilometer.builder.poll.central =
hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster
ceilometer.event.storage =
es = ceilometer.event.storage.impl_elasticsearch:Connection
log = ceilometer.event.storage.impl_log:Connection
mongodb = ceilometer.event.storage.impl_mongodb:Connection
mysql = ceilometer.event.storage.impl_sqlalchemy:Connection
postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection
sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection
hbase = ceilometer.event.storage.impl_hbase:Connection
ceilometer.metering.storage = ceilometer.metering.storage =
log = ceilometer.storage.impl_log:Connection log = ceilometer.storage.impl_log:Connection
mongodb = ceilometer.storage.impl_mongodb:Connection mongodb = ceilometer.storage.impl_mongodb:Connection
@ -285,7 +276,6 @@ ceilometer.dispatcher.meter =
gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher
ceilometer.dispatcher.event = ceilometer.dispatcher.event =
database = ceilometer.dispatcher.database:EventDatabaseDispatcher
file = ceilometer.dispatcher.file:FileDispatcher file = ceilometer.dispatcher.file:FileDispatcher
http = ceilometer.dispatcher.http:HttpDispatcher http = ceilometer.dispatcher.http:HttpDispatcher
gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher

View File

@ -4,7 +4,6 @@
contextlib2>=0.4.0 # PSF License contextlib2>=0.4.0 # PSF License
coverage>=3.6 # Apache-2.0 coverage>=3.6 # Apache-2.0
elasticsearch>=1.3.0 # Apache-2.0
fixtures<2.0,>=1.3.1 # Apache-2.0/BSD fixtures<2.0,>=1.3.1 # Apache-2.0/BSD
happybase!=0.7,>=0.5,<1.0.0;python_version=='2.7' # MIT happybase!=0.7,>=0.5,<1.0.0;python_version=='2.7' # MIT
mock>=1.2 # BSD mock>=1.2 # BSD

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool help you debug your event definitions.
Feed it a list of test notifications in json format, and it will show
you what events will be generated.
"""
import json
import sys
from oslo_config import cfg
from stevedore import extension
from ceilometer.event import converter
from ceilometer import service
cfg.CONF.register_cli_opts([
cfg.StrOpt('input-file',
short='i',
help='File to read test notifications from.'
' (Containing a json list of notifications.)'
' defaults to stdin.'),
cfg.StrOpt('output-file',
short='o',
help='File to write results to. Defaults to stdout.'),
])
TYPES = {1: 'text',
2: 'int',
3: 'float',
4: 'datetime'}
service.prepare_service()
output_file = cfg.CONF.output_file
input_file = cfg.CONF.input_file
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
if input_file is None:
notifications = json.load(sys.stdin)
else:
with open(input_file, 'r') as f:
notifications = json.load(f)
out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file)
out.write("Notifications tested: %s\n" % len(notifications))
event_converter = converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
for notification in notifications:
event = event_converter.to_event(notification)
if event is None:
out.write("Dropped notification: %s\n" %
notification['message_id'])
continue
out.write("Event: %s at %s\n" % (event.event_type, event.generated))
for trait in event.traits:
dtype = TYPES[trait.dtype]
out.write(" Trait: name: %s, type: %s, value: %s\n" % (
trait.name, dtype, trait.value))

View File

@ -1,116 +0,0 @@
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating event test data for Ceilometer.
Usage:
Generate testing data for e.g. for default time span
source .tox/py27/bin/activate
./tools/make_test_event_data.py --event_types 3
"""
import argparse
import datetime
import random
import uuid
from oslo_config import cfg
from oslo_utils import timeutils
from ceilometer.event.storage import models
from ceilometer import storage
def make_test_data(conn, start, end, interval, event_types):
# Compute start and end timestamps for the new data.
if isinstance(start, datetime.datetime):
timestamp = start
else:
timestamp = timeutils.parse_strtime(start)
if not isinstance(end, datetime.datetime):
end = timeutils.parse_strtime(end)
increment = datetime.timedelta(minutes=interval)
print('Adding new events')
n = 0
while timestamp <= end:
data = []
for i in range(event_types):
traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
models.Trait('id2_%d' % i, 2, random.randint(1, 10)),
models.Trait('id3_%d' % i, 3, random.random()),
models.Trait('id4_%d' % i, 4, timestamp)]
data.append(models.Event(str(uuid.uuid4()),
'event_type%d' % i,
timestamp,
traits,
{}))
n += 1
conn.record_events(data)
timestamp = timestamp + increment
print('Added %d new events' % n)
def main():
cfg.CONF([], project='ceilometer')
parser = argparse.ArgumentParser(
description='generate event data',
)
parser.add_argument(
'--interval',
default=10,
type=int,
help='The period between events, in minutes.',
)
parser.add_argument(
'--start',
default=31,
type=int,
help='The number of days in the past to start timestamps.',
)
parser.add_argument(
'--end',
default=2,
type=int,
help='The number of days into the future to continue timestamps.',
)
parser.add_argument(
'--event_types',
default=3,
type=int,
help='The number of unique event_types.',
)
args = parser.parse_args()
# Connect to the event database
conn = storage.get_connection_from_config(cfg.CONF, 'event')
# Compute the correct time span
start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)
make_test_data(conn=conn,
start=start,
end=end,
interval=args.interval,
event_types=args.event_types)
if __name__ == '__main__':
main()

View File

@ -27,14 +27,11 @@ def main(argv):
(os.getenv("CEILOMETER_TEST_STORAGE_URL"), (os.getenv("CEILOMETER_TEST_STORAGE_URL"),
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"))) os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test")))
conn = storage.get_connection(url, 'ceilometer.metering.storage') conn = storage.get_connection(url, 'ceilometer.metering.storage')
event_conn = storage.get_connection(url, 'ceilometer.event.storage')
for arg in argv: for arg in argv:
if arg == "--upgrade": if arg == "--upgrade":
conn.upgrade() conn.upgrade()
event_conn.upgrade()
if arg == "--clear": if arg == "--clear":
conn.clear() conn.clear()
event_conn.clear()
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,7 +1,7 @@
[tox] [tox]
minversion = 1.8 minversion = 1.8
skipsdist = True skipsdist = True
envlist = py{34,27,35},{debug,py,py34,py27,py35}-{mongodb,mysql,postgresql,elastic,functional},pep8 envlist = py{34,27,35},{debug,py,py34,py27,py35}-{mongodb,mysql,postgresql,functional},pep8
[testenv] [testenv]
deps = .[mongo,mysql,postgresql,gnocchi] deps = .[mongo,mysql,postgresql,gnocchi]
@ -14,11 +14,10 @@ setenv = VIRTUAL_ENV={envdir}
CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none} CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none}
CEILOMETER_TEST_DEBUG={env:CEILOMETER_TEST_DEBUG:} CEILOMETER_TEST_DEBUG={env:CEILOMETER_TEST_DEBUG:}
debug: CEILOMETER_TEST_DEBUG=True debug: CEILOMETER_TEST_DEBUG=True
{mongodb,mysql,postgresql,elastic,functional}: OS_TEST_PATH=ceilometer/tests/functional/ {mongodb,mysql,postgresql,functional}: OS_TEST_PATH=ceilometer/tests/functional/
mongodb: CEILOMETER_TEST_BACKEND=mongodb mongodb: CEILOMETER_TEST_BACKEND=mongodb
mysql: CEILOMETER_TEST_BACKEND=mysql mysql: CEILOMETER_TEST_BACKEND=mysql
postgresql: CEILOMETER_TEST_BACKEND=postgresql postgresql: CEILOMETER_TEST_BACKEND=postgresql
elastic: CEILOMETER_TEST_BACKEND=elasticsearch
functional: CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:mongodb} functional: CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:mongodb}
passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_* passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_*
commands = commands =