Remove deprecated storage drivers

Change-Id: I6b262dd440a72f25662b64d938ab9e5328709a97
This commit is contained in:
Julien Danjou 2017-10-17 18:14:01 +02:00
parent 22138b5988
commit 9323f07f97
131 changed files with 47 additions and 13098 deletions

View File

@ -1,50 +1,3 @@
- job:
name: ceilometer-dsvm-functional-mongodb
parent: legacy-dsvm-base
run: playbooks/legacy/ceilometer-dsvm-functional-mongodb/run
post-run: playbooks/legacy/ceilometer-dsvm-functional-mongodb/post
timeout: 7800
required-projects:
- openstack-infra/devstack-gate
- openstack/ceilometer
- job:
name: ceilometer-dsvm-functional-mysql
parent: legacy-dsvm-base
run: playbooks/legacy/ceilometer-dsvm-functional-mysql/run
post-run: playbooks/legacy/ceilometer-dsvm-functional-mysql/post
timeout: 7800
required-projects:
- openstack-infra/devstack-gate
- openstack/ceilometer
- job:
name: ceilometer-tox-py27-mongodb
parent: legacy-base
run: playbooks/legacy/ceilometer-tox-py27-mongodb/run
post-run: playbooks/legacy/ceilometer-tox-py27-mongodb/post
timeout: 2400
required-projects:
- openstack/requirements
- job:
name: ceilometer-tox-py27-mysql
parent: legacy-base
run: playbooks/legacy/ceilometer-tox-py27-mysql/run
post-run: playbooks/legacy/ceilometer-tox-py27-mysql/post
timeout: 2400
required-projects:
- openstack/requirements
- job:
name: ceilometer-tox-py27-postgresql
parent: legacy-base
run: playbooks/legacy/ceilometer-tox-py27-postgresql/run
post-run: playbooks/legacy/ceilometer-tox-py27-postgresql/post
timeout: 2400
required-projects:
- openstack/requirements
- job:
name: grenade-dsvm-ceilometer
parent: legacy-dsvm-base
@ -76,16 +29,6 @@
name: openstack/ceilometer
check:
jobs:
- ceilometer-dsvm-functional-mongodb:
branches: ^stable/newton$
- ceilometer-dsvm-functional-mysql:
branches: ^stable/newton$
- ceilometer-tox-py27-mongodb:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-mysql:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-postgresql:
branches: ^(?!stable/newton)
- grenade-dsvm-ceilometer:
branches: ^(?!stable/newton).*$
irrelevant-files:
@ -94,16 +37,6 @@
- telemetry-dsvm-integration-ceilometer
gate:
jobs:
- ceilometer-dsvm-functional-mongodb:
branches: ^stable/newton$
- ceilometer-dsvm-functional-mysql:
branches: ^stable/newton$
- ceilometer-tox-py27-mongodb:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-mysql:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-postgresql:
branches: ^(?!stable/newton)
- grenade-dsvm-ceilometer:
branches: ^(?!stable/newton).*$
irrelevant-files:

View File

@ -1,11 +1,6 @@
libpq-dev [platform:dpkg]
libxml2-dev [platform:dpkg test]
libxslt-devel [platform:rpm test]
libxslt1-dev [platform:dpkg test]
postgresql [platform:dpkg]
mysql-client [platform:dpkg]
mysql-server [platform:dpkg]
build-essential [platform:dpkg]
libffi-dev [platform:dpkg]
mongodb [platform:dpkg]
gettext [platform:dpkg]

View File

@ -16,13 +16,9 @@
from oslo_config import cfg
from oslo_log import log
from six import moves
import six.moves.urllib.parse as urlparse
import sqlalchemy as sa
import tenacity
from ceilometer import service
from ceilometer import storage
LOG = log.getLogger(__name__)
@ -30,9 +26,6 @@ LOG = log.getLogger(__name__)
def upgrade():
conf = cfg.ConfigOpts()
conf.register_cli_opts([
cfg.BoolOpt('skip-metering-database',
help='Skip metering database upgrade.',
default=False),
cfg.BoolOpt('skip-gnocchi-resource-types',
help='Skip gnocchi resource-types upgrade.',
default=False),
@ -43,19 +36,6 @@ def upgrade():
])
service.prepare_service(conf=conf)
if conf.skip_metering_database:
LOG.info("Skipping metering database upgrade")
else:
url = (getattr(conf.database, 'metering_connection') or
conf.database.connection)
if url:
LOG.debug("Upgrading metering database")
storage.get_connection(conf, url).upgrade()
else:
LOG.info("Skipping metering database upgrade, "
"legacy database backend not configured.")
if conf.skip_gnocchi_resource_types:
LOG.info("Skipping Gnocchi resource types upgrade")
else:
@ -75,96 +55,3 @@ def upgrade():
exceptions.SSLError,
))
)(gnocchi_client.upgrade_resource_types, conf)
def expirer():
conf = service.prepare_service()
if conf.database.metering_time_to_live > 0:
LOG.debug("Clearing expired metering data")
storage_conn = storage.get_connection_from_config(conf)
storage_conn.clear_expired_metering_data(
conf.database.metering_time_to_live)
else:
LOG.info("Nothing to clean, database metering time to live "
"is disabled")
def db_clean_legacy():
conf = cfg.ConfigOpts()
conf.register_cli_opts([
cfg.strOpt('confirm-drop-table',
short='n',
help='confirm to drop the legacy tables')])
if not conf.confirm_drop_table:
confirm = moves.input("Do you really want to drop the legacy "
"alarm and event tables? This will destroy "
"data definitively if it exist. Please type "
"'YES' to confirm: ")
if confirm != 'YES':
print("DB legacy cleanup aborted!")
return
service.prepare_service(conf=conf)
url = (getattr(conf.database, "metering_connection") or
conf.database.connection)
parsed = urlparse.urlparse(url)
if parsed.password:
masked_netloc = '****'.join(parsed.netloc.rsplit(parsed.password))
masked_url = parsed._replace(netloc=masked_netloc)
masked_url = urlparse.urlunparse(masked_url)
else:
masked_url = url
LOG.info('Starting to drop event, alarm and alarm history tables in '
'backend: %s', masked_url)
connection_scheme = parsed.scheme
conn = storage.get_connection_from_config(conf)
if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql',
'sqlite'):
engine = conn._engine_facade.get_engine()
meta = sa.MetaData(bind=engine)
for table_name in ('alarm', 'alarm_history',
'trait_text', 'trait_int',
'trait_float', 'trait_datetime',
'event', 'event_type'):
if engine.has_table(table_name):
table = sa.Table(table_name, meta, autoload=True)
table.drop()
LOG.info("Legacy %s table of SQL backend has been "
"dropped.", table_name)
else:
LOG.info('%s table does not exist.', table_name)
elif connection_scheme == 'hbase':
with conn.conn_pool.connection() as h_conn:
tables = h_conn.tables()
table_name_mapping = {'alarm': 'alarm',
'alarm_h': 'alarm history',
'event': 'event'}
for table_name in ('alarm', 'alarm_h', 'event'):
try:
if table_name in tables:
h_conn.disable_table(table_name)
h_conn.delete_table(table_name)
LOG.info("Legacy %s table of Hbase backend "
"has been dropped.",
table_name_mapping[table_name])
else:
LOG.info('%s table does not exist.',
table_name_mapping[table_name])
except Exception as e:
LOG.error('Error occurred while dropping alarm '
'tables of Hbase, %s', e)
elif connection_scheme == 'mongodb':
for table_name in ('alarm', 'alarm_history', 'event'):
if table_name in conn.db.conn.collection_names():
conn.db.conn.drop_collection(table_name)
LOG.info("Legacy %s table of Mongodb backend has been "
"dropped.", table_name)
else:
LOG.info('%s table does not exist.', table_name)
LOG.info('Legacy alarm and event tables cleanup done.')

View File

@ -1,92 +0,0 @@
#
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import log
import six
from stevedore import named
LOG = log.getLogger(__name__)
OPTS = [
cfg.MultiStrOpt('meter_dispatchers',
deprecated_name='dispatcher',
default=[],
deprecated_for_removal=True,
deprecated_reason='This option only be used in collector '
'service, the collector service has '
'been deprecated and will be removed '
'in the future, this should also be '
'deprecated for removal with collector '
'service.',
help='Dispatchers to process metering data.'),
cfg.MultiStrOpt('event_dispatchers',
default=[],
deprecated_name='dispatcher',
deprecated_for_removal=True,
deprecated_reason='This option only be used in collector '
'service, the collector service has '
'been deprecated and will be removed '
'in the future, this should also be '
'deprecated for removal with collector '
'service.',
help='Dispatchers to process event data.'),
]
def _load_dispatcher_manager(conf, dispatcher_type):
namespace = 'ceilometer.dispatcher.%s' % dispatcher_type
conf_name = '%s_dispatchers' % dispatcher_type
LOG.debug('loading dispatchers from %s', namespace)
# set propagate_map_exceptions to True to enable stevedore
# to propagate exceptions.
dispatcher_manager = named.NamedExtensionManager(
namespace=namespace,
names=getattr(conf, conf_name),
invoke_on_load=True,
invoke_args=[conf],
propagate_map_exceptions=True)
if not list(dispatcher_manager):
LOG.warning('Failed to load any dispatchers for %s',
namespace)
return dispatcher_manager
def load_dispatcher_manager(conf):
return (_load_dispatcher_manager(conf, 'meter'),
_load_dispatcher_manager(conf, 'event'))
class Base(object):
def __init__(self, conf):
self.conf = conf
@six.add_metaclass(abc.ABCMeta)
class MeterDispatcherBase(Base):
@abc.abstractmethod
def record_metering_data(self, data):
"""Recording metering data interface."""
@six.add_metaclass(abc.ABCMeta)
class EventDispatcherBase(Base):
@abc.abstractmethod
def record_events(self, events):
"""Record events."""

View File

@ -1,71 +0,0 @@
#
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import timeutils
from ceilometer import dispatcher
from ceilometer import storage
LOG = log.getLogger(__name__)
class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase):
"""Dispatcher class for recording metering data into database.
The dispatcher class which records each meter into a database configured
in ceilometer configuration file.
To enable this dispatcher, the following section needs to be present in
ceilometer.conf file
[DEFAULT]
meter_dispatchers = database
"""
@property
def conn(self):
if not hasattr(self, "_conn"):
self._conn = storage.get_connection_from_config(
self.conf)
return self._conn
def record_metering_data(self, data):
# We may have receive only one counter on the wire
if not data:
return
if not isinstance(data, list):
data = [data]
for meter in data:
LOG.debug(
'metering data %(counter_name)s '
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
{'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']})
# Convert the timestamp to a datetime instance.
# Storage engines are responsible for converting
# that value to something they can store.
if meter.get('timestamp'):
ts = timeutils.parse_isotime(meter['timestamp'])
meter['timestamp'] = timeutils.normalize_time(ts)
try:
self.conn.record_metering_data_batch(data)
except Exception as err:
LOG.error('Failed to record %(len)s: %(err)s.',
{'len': len(data), 'err': err})
raise

View File

@ -15,15 +15,39 @@
from oslo_utils import timeutils
import six
from ceilometer.storage import base
def serialize_dt(value):
"""Serializes parameter if it is datetime."""
return value.isoformat() if hasattr(value, 'isoformat') else value
class Event(base.Model):
class Model(object):
"""Base class for storage API models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in six.iteritems(kwds):
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
class Event(Model):
"""A raw event from the source system. Events have Traits.
Metrics will be derived from one or more Events.
@ -45,8 +69,8 @@ class Event(base.Model):
:param traits: list of Traits on this Event.
:param raw: Unindexed raw notification details.
"""
base.Model.__init__(self, message_id=message_id, event_type=event_type,
generated=generated, traits=traits, raw=raw)
Model.__init__(self, message_id=message_id, event_type=event_type,
generated=generated, traits=traits, raw=raw)
def append_trait(self, trait_model):
self.traits.append(trait_model)
@ -67,7 +91,7 @@ class Event(base.Model):
'raw': self.raw}
class Trait(base.Model):
class Trait(Model):
"""A Trait is a key/value pair of data on an Event.
The value is variant record of basic data types (int, date, float, etc).
@ -90,7 +114,7 @@ class Trait(base.Model):
def __init__(self, name, dtype, value):
if not dtype:
dtype = Trait.NONE_TYPE
base.Model.__init__(self, name=name, dtype=dtype, value=value)
Model.__init__(self, name=name, dtype=dtype, value=value)
def __repr__(self):
return "<Trait: %s %d %s>" % (self.name, self.dtype, self.value)

View File

@ -23,7 +23,6 @@ import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.utils
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
import ceilometer.dispatcher
import ceilometer.event.converter
import ceilometer.hardware.discovery
import ceilometer.hardware.pollsters.generic
@ -42,7 +41,6 @@ import ceilometer.pipeline
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
import ceilometer.storage
import ceilometer.utils
import ceilometer.volume.discovery
@ -75,7 +73,6 @@ def list_opts():
itertools.chain(ceilometer.agent.manager.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.utils.OPTS,
ceilometer.dispatcher.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.sample.OPTS,
@ -96,7 +93,6 @@ def list_opts():
help='Number of seconds between checks to see if group '
'membership has changed'),
]),
('database', ceilometer.storage.OPTS),
('dispatcher_gnocchi', (
cfg.StrOpt(
'filter_project',

View File

@ -1,98 +0,0 @@
#
# Copyright 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import six.moves.urllib.parse as urlparse
from stevedore import driver
import stevedore.exception
from ceilometer import publisher
from ceilometer.publisher import utils
LOG = log.getLogger(__name__)
class DirectPublisher(publisher.ConfigPublisherBase):
"""A publisher that allows saving directly from the pipeline.
Samples are saved to a configured dispatcher. This is useful
where it is desirable to limit the number of external services that
are required.
By default, the database dispatcher is used to select another one we
can use direct://?dispatcher=name_of_dispatcher, ...
"""
def __init__(self, conf, parsed_url):
super(DirectPublisher, self).__init__(conf, parsed_url)
default_dispatcher = parsed_url.scheme
if default_dispatcher == 'direct':
LOG.warning('Direct publisher is deprecated for removal. Use '
'an explicit publisher instead, e.g. '
'"database", "file", ...')
default_dispatcher = 'database'
options = urlparse.parse_qs(parsed_url.query)
self.dispatcher_name = options.get('dispatcher',
[default_dispatcher])[-1]
self._sample_dispatcher = None
self._event_dispatcher = None
try:
self.sample_driver = driver.DriverManager(
'ceilometer.dispatcher.meter', self.dispatcher_name).driver
except stevedore.exception.NoMatches:
self.sample_driver = None
try:
self.event_driver = driver.DriverManager(
'ceilometer.dispatcher.event', self.dispatcher_name).driver
except stevedore.exception.NoMatches:
self.event_driver = None
def get_sample_dispatcher(self):
if not self._sample_dispatcher:
self._sample_dispatcher = self.sample_driver(self.conf)
return self._sample_dispatcher
def get_event_dispatcher(self):
if not self._event_dispatcher:
if self.event_driver != self.sample_driver:
self._event_dispatcher = self.event_driver(self.conf)
else:
self._event_dispatcher = self.get_sample_dispatcher()
return self._event_dispatcher
def publish_samples(self, samples):
if not self.sample_driver:
LOG.error("Can't publish samples to a non-existing dispatcher "
"'%s'", self.dispatcher_name)
return
if not isinstance(samples, list):
samples = [samples]
# not published externally; skip signing
self.get_sample_dispatcher().record_metering_data([
utils.meter_message_from_counter(sample, secret=None)
for sample in samples])
def publish_events(self, events):
if not self.event_driver:
LOG.error("Can't publish events to a non-existing dispatcher "
"'%s'", self.dispatcher_name)
return
if not isinstance(events, list):
events = [events]
# not published externally; skip signing
self.get_event_dispatcher().record_events([
utils.message_from_event(event, secret=None) for event in events])

View File

@ -15,7 +15,6 @@
import sys
from oslo_config import cfg
from oslo_db import options as db_options
import oslo_i18n
from oslo_log import log
from oslo_reports import guru_meditation_report as gmr
@ -45,7 +44,6 @@ def prepare_service(argv=None, config_files=None, conf=None):
['futurist=INFO', 'neutronclient=INFO',
'keystoneclient=INFO'])
log.set_defaults(default_log_levels=log_levels)
db_options.set_defaults(conf)
conf(argv[1:], project='ceilometer', validate_default_values=True,
version=version.version_info.version_string(),

View File

@ -1,147 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend management
"""
from oslo_config import cfg
from oslo_log import log
import six.moves.urllib.parse as urlparse
from stevedore import driver
import tenacity
from ceilometer import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.IntOpt('metering_time_to_live',
default=-1,
help="Number of seconds that samples are kept "
"in the database for (<= 0 means forever).",
deprecated_opts=[cfg.DeprecatedOpt('time_to_live',
'database')]),
cfg.StrOpt('metering_connection',
secret=True,
help='The connection string used to connect to the metering '
'database. (if unset, connection is used)'),
cfg.BoolOpt('sql_expire_samples_only',
default=False,
help="Indicates if expirer expires only samples. If set true,"
" expired samples will be deleted, but residual"
" resource and meter definition data will remain."),
]
class StorageUnknownWriteError(Exception):
"""Error raised when an unknown error occurs while recording."""
class StorageBadVersion(Exception):
"""Error raised when the storage backend version is not good enough."""
class StorageBadAggregate(Exception):
"""Error raised when an aggregate is unacceptable to storage backend."""
code = 400
def get_connection_from_config(conf):
retries = conf.database.max_retries
@tenacity.retry(
wait=tenacity.wait_fixed(conf.database.retry_interval),
stop=(tenacity.stop_after_attempt(retries) if retries >= 0
else tenacity.stop_never),
reraise=True)
def _inner():
url = (getattr(conf.database, 'metering_connection') or
conf.database.connection)
return get_connection(conf, url)
return _inner()
def get_connection(conf, url):
"""Return an open connection to the database."""
connection_scheme = urlparse.urlparse(url).scheme
# SqlAlchemy connections specify may specify a 'dialect' or
# 'dialect+driver'. Handle the case where driver is specified.
engine_name = connection_scheme.split('+')[0]
namespace = 'ceilometer.metering.storage'
# NOTE: translation not applied bug #1446983
LOG.debug('looking for %(name)r driver in %(namespace)r',
{'name': engine_name, 'namespace': namespace})
mgr = driver.DriverManager(namespace, engine_name)
return mgr.driver(conf, url)
class SampleFilter(object):
"""Holds the properties for building a query from a meter/sample filter.
:param user: The sample owner.
:param project: The sample project.
:param start_timestamp: Earliest time point in the request.
:param start_timestamp_op: Earliest timestamp operation in the request.
:param end_timestamp: Latest time point in the request.
:param end_timestamp_op: Latest timestamp operation in the request.
:param resource: Optional filter for resource id.
:param meter: Optional filter for meter type using the meter name.
:param source: Optional source filter.
:param message_id: Optional sample_id filter.
:param metaquery: Optional filter on the metadata
"""
def __init__(self, user=None, project=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
resource=None, meter=None,
source=None, message_id=None,
metaquery=None):
self.user = user
self.project = project
self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
self.start_timestamp_op = start_timestamp_op
self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
self.end_timestamp_op = end_timestamp_op
self.resource = resource
self.meter = meter
self.source = source
self.metaquery = metaquery or {}
self.message_id = message_id
def __repr__(self):
return ("<SampleFilter(user: %s,"
" project: %s,"
" start_timestamp: %s,"
" start_timestamp_op: %s,"
" end_timestamp: %s,"
" end_timestamp_op: %s,"
" resource: %s,"
" meter: %s,"
" source: %s,"
" metaquery: %s,"
" message_id: %s)>" %
(self.user,
self.project,
self.start_timestamp,
self.start_timestamp_op,
self.end_timestamp,
self.end_timestamp_op,
self.resource,
self.meter,
self.source,
self.metaquery,
self.message_id))

View File

@ -1,253 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import datetime
import inspect
import math
from oslo_utils import timeutils
import six
from six import moves
import ceilometer
def iter_period(start, end, period):
"""Split a time from start to end in periods of a number of seconds.
This function yields the (start, end) time for each period composing the
time passed as argument.
:param start: When the period set start.
:param end: When the period end starts.
:param period: The duration of the period.
"""
period_start = start
increment = datetime.timedelta(seconds=period)
for i in moves.xrange(int(math.ceil(
timeutils.delta_seconds(start, end)
/ float(period)))):
next_start = period_start + increment
yield (period_start, next_start)
period_start = next_start
def _handle_sort_key(model_name, sort_key=None):
"""Generate sort keys according to the passed in sort key from user.
:param model_name: Database model name be query.(meter, etc.)
:param sort_key: sort key passed from user.
return: sort keys list
"""
sort_keys_extra = {'meter': ['user_id', 'project_id'],
'resource': ['user_id', 'project_id', 'timestamp'],
}
sort_keys = sort_keys_extra[model_name]
if not sort_key:
return sort_keys
# NOTE(Fengqian): We need to put the sort key from user
# in the first place of sort keys list.
try:
sort_keys.remove(sort_key)
except ValueError:
pass
finally:
sort_keys.insert(0, sort_key)
return sort_keys
class Model(object):
"""Base class for storage API models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in six.iteritems(kwds):
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def get_field_names(cls):
fields = inspect.getargspec(cls.__init__)[0]
return set(fields) - set(["self"])
class Connection(object):
"""Base class for storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'meters': {'query': {'simple': False,
'metadata': False}},
'resources': {'query': {'simple': False,
'metadata': False}},
'samples': {'query': {'simple': False,
'metadata': False,
'complex': False}},
'statistics': {'groupby': False,
'query': {'simple': False,
'metadata': False},
'aggregation': {'standard': False,
'selectable': {
'max': False,
'min': False,
'sum': False,
'avg': False,
'count': False,
'stddev': False,
'cardinality': False}}
},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, conf, url):
self.conf = conf
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
def record_metering_data_batch(self, samples):
"""Record the metering data in batch"""
for s in samples:
self.record_metering_data(s)
@staticmethod
def record_metering_data(data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.publisher.utils.meter_message_from_counter
All timestamps must be naive utc datetime object.
"""
raise ceilometer.NotImplementedError(
'Recording metering data is not implemented')
@staticmethod
def clear_expired_metering_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
raise ceilometer.NotImplementedError(
'Clearing samples not implemented')
@staticmethod
def get_resources(user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of models.Resource instances.
Iterable items containing resource information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional timestamp start range operation.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional timestamp end range operation.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Resources not implemented')
@staticmethod
def get_meters(user=None, project=None, resource=None, source=None,
metaquery=None, limit=None, unique=False):
"""Return an iterable of model.Meter instances.
Iterable items containing meter information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
:param unique: If set to true, return only unique meter information.
"""
raise ceilometer.NotImplementedError('Meters not implemented')
@staticmethod
def get_samples(sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Samples not implemented')
@staticmethod
def get_meter_statistics(sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of model.Statistics instances.
The filter must have a meter value set.
"""
raise ceilometer.NotImplementedError('Statistics not implemented')
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def query_samples(filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Sample objects.
:param filter_expr: Filter expression for query.
:param orderby: List of field name and direction pairs for order by.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Complex query for samples '
'is not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES

View File

@ -1,91 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import happybase
from oslo_log import log
from oslo_utils import netutils
from six.moves.urllib import parse as urlparse
from ceilometer.storage.hbase import inmemory as hbase_inmemory
LOG = log.getLogger(__name__)
class Connection(object):
"""Base connection class for HBase."""
_memory_instance = None
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
"""Hbase Connection Initialization."""
opts = self._parse_connection_url(url)
if opts['host'] == '__test__':
url = os.environ.get('CEILOMETER_TEST_HBASE_URL')
if url:
# Reparse URL, but from the env variable now
opts = self._parse_connection_url(url)
self.conn_pool = self._get_connection_pool(opts)
else:
# This is a in-memory usage for unit tests
if Connection._memory_instance is None:
LOG.debug('Creating a new in-memory HBase '
'Connection object')
Connection._memory_instance = (hbase_inmemory.
MConnectionPool())
self.conn_pool = Connection._memory_instance
else:
self.conn_pool = self._get_connection_pool(opts)
@staticmethod
def _get_connection_pool(conf):
"""Return a connection pool to the database.
.. note::
The tests use a subclass to override this and return an
in-memory connection pool.
"""
LOG.debug('connecting to HBase on %(host)s:%(port)s',
{'host': conf['host'], 'port': conf['port']})
return happybase.ConnectionPool(
size=100, host=conf['host'], port=conf['port'],
table_prefix=conf['table_prefix'],
table_prefix_separator=conf['table_prefix_separator'])
@staticmethod
def _parse_connection_url(url):
"""Parse connection parameters from a database url.
.. note::
HBase Thrift does not support authentication and there is no
database name, so we are not looking for these in the url.
"""
opts = {}
result = netutils.urlsplit(url)
opts['table_prefix'] = urlparse.parse_qs(
result.query).get('table_prefix', [None])[0]
opts['table_prefix_separator'] = urlparse.parse_qs(
result.query).get('table_prefix_separator', ['_'])[0]
opts['dbtype'] = result.scheme
if ':' in result.netloc:
opts['host'], port = result.netloc.split(':')
else:
opts['host'] = result.netloc
port = 9090
opts['port'] = port and int(port) or 9090
return opts

View File

@ -1,281 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This is a very crude version of "in-memory HBase", which implements just
enough functionality of HappyBase API to support testing of our driver.
"""
import copy
import re
from oslo_log import log
import six
import ceilometer
LOG = log.getLogger(__name__)
class MTable(object):
"""HappyBase.Table mock."""
def __init__(self, name, families):
self.name = name
self.families = families
self._rows_with_ts = {}
def row(self, key, columns=None):
if key not in self._rows_with_ts:
return {}
res = copy.copy(sorted(six.iteritems(
self._rows_with_ts.get(key)))[-1][1])
if columns:
keys = res.keys()
for key in keys:
if key not in columns:
res.pop(key)
return res
def rows(self, keys):
return ((k, self.row(k)) for k in keys)
def put(self, key, data, ts=None):
# Note: Now we use 'timestamped' but only for one Resource table.
# That's why we may put ts='0' in case when ts is None. If it is
# needed to use 2 types of put in one table ts=0 cannot be used.
if ts is None:
ts = "0"
if key not in self._rows_with_ts:
self._rows_with_ts[key] = {ts: data}
else:
if ts in self._rows_with_ts[key]:
self._rows_with_ts[key][ts].update(data)
else:
self._rows_with_ts[key].update({ts: data})
def delete(self, key):
del self._rows_with_ts[key]
def _get_latest_dict(self, row):
# The idea here is to return latest versions of columns.
# In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}.
# res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})]
# sorted by ts, i.e. in this list ts_2 is the most latest.
# To get result as HBase provides we should iterate in reverse order
# and get from "latest" data only key-values that are not in newer data
data = {}
for i in sorted(six.iteritems(self._rows_with_ts[row])):
data.update(i[1])
return data
def scan(self, filter=None, columns=None, row_start=None, row_stop=None,
limit=None):
columns = columns or []
sorted_keys = sorted(self._rows_with_ts)
# copy data between row_start and row_stop into a dict
rows = {}
for row in sorted_keys:
if row_start and row < row_start:
continue
if row_stop and row > row_stop:
break
rows[row] = self._get_latest_dict(row)
if columns:
ret = {}
for row, data in six.iteritems(rows):
for key in data:
if key in columns:
ret[row] = data
rows = ret
if filter:
# TODO(jdanjou): we should really parse this properly,
# but at the moment we are only going to support AND here
filters = filter.split('AND')
for f in filters:
# Extract filter name and its arguments
g = re.search("(.*)\((.*),?\)", f)
fname = g.group(1).strip()
fargs = [s.strip().replace('\'', '')
for s in g.group(2).split(',')]
m = getattr(self, fname)
if callable(m):
# overwrite rows for filtering to take effect
# in case of multiple filters
rows = m(fargs, rows)
else:
raise ceilometer.NotImplementedError(
"%s filter is not implemented, "
"you may want to add it!")
for k in sorted(rows)[:limit]:
yield k, rows[k]
@staticmethod
def SingleColumnValueFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'SingleColumnValueFilter'
is found in the 'filter' argument.
"""
op = args[2]
column = "%s:%s" % (args[0], args[1])
value = args[3]
if value.startswith('binary:'):
value = value[7:]
r = {}
for row in rows:
data = rows[row]
if op == '=':
if column in data and data[column] == value:
r[row] = data
elif op == '<':
if column in data and data[column] < value:
r[row] = data
elif op == '<=':
if column in data and data[column] <= value:
r[row] = data
elif op == '>':
if column in data and data[column] > value:
r[row] = data
elif op == '>=':
if column in data and data[column] >= value:
r[row] = data
elif op == '!=':
if column in data and data[column] != value:
r[row] = data
return r
@staticmethod
def ColumnPrefixFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'ColumnPrefixFilter' is found
in the 'filter' argument.
:param args: a list of filter arguments, contain prefix of column
:param rows: a dict of row prefixes for filtering
"""
value = args[0]
column = 'f:' + value
r = {}
for row, data in rows.items():
column_dict = {}
for key in data:
if key.startswith(column):
column_dict[key] = data[key]
r[row] = column_dict
return r
@staticmethod
def RowFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'RowFilter' is found in the
'filter' argument.
:param args: a list of filter arguments, it contains operator and
sought string
:param rows: a dict of rows which are filtered
"""
op = args[0]
value = args[1]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
r = {}
for row, data in rows.items():
try:
g = re.search(value, row).group()
if op == '=':
if g == row:
r[row] = data
else:
raise ceilometer.NotImplementedError(
"In-memory "
"RowFilter doesn't support "
"the %s operation yet" % op)
except AttributeError:
pass
return r
@staticmethod
def QualifierFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'QualifierFilter' is found in
the 'filter' argument
"""
op = args[0]
value = args[1]
is_regex = False
if value.startswith('binaryprefix:'):
value = value[len('binaryprefix:'):]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
is_regex = True
column = 'f:' + value
r = {}
for row in rows:
data = rows[row]
r_data = {}
for key in data:
if ((op == '=' and key.startswith(column)) or
(op == '>=' and key >= column) or
(op == '<=' and key <= column) or
(op == '>' and key > column) or
(op == '<' and key < column) or
(is_regex and re.search(value, key))):
r_data[key] = data[key]
else:
raise ceilometer.NotImplementedError(
"In-memory QualifierFilter "
"doesn't support the %s "
"operation yet" % op)
if r_data:
r[row] = r_data
return r
class MConnectionPool(object):
def __init__(self):
self.conn = MConnection()
def connection(self):
return self.conn
class MConnection(object):
"""HappyBase.Connection mock."""
def __init__(self):
self.tables = {}
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def open():
LOG.debug("Opening in-memory HBase connection")
def create_table(self, n, families=None):
families = families or {}
if n in self.tables:
return self.tables[n]
t = MTable(n, families)
self.tables[n] = t
return t
def delete_table(self, name, use_prefix=True):
del self.tables[name]
def table(self, name):
return self.create_table(name)

View File

@ -1,74 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HBase storage backend migrations
"""
from ceilometer.storage.hbase import utils as hbase_utils
def migrate_resource_table(conn, table):
"""Migrate table 'resource' in HBase.
Change qualifiers format from "%s+%s+%s!%s!%s" %
(rts, source, counter_name, counter_type,counter_unit)
in columns with meters f:m_*
to new separator format "%s:%s:%s:%s:%s" %
(rts, source, counter_name, counter_type,counter_unit)
"""
resource_table = conn.table(table)
resource_filter = ("QualifierFilter(=, "
"'regexstring:m_\\d{19}\\+"
"[\\w-\\._]*\\+[\\w-\\._!]')")
gen = resource_table.scan(filter=resource_filter)
for row, data in gen:
columns = []
updated_columns = dict()
column_prefix = "f:"
for column, value in data.items():
if column.startswith('f:m_'):
columns.append(column)
parts = column[2:].split("+", 2)
parts.extend(parts.pop(2).split("!"))
column = hbase_utils.prepare_key(*parts)
updated_columns[column_prefix + column] = value
resource_table.put(row, updated_columns)
resource_table.delete(row, columns)
def migrate_meter_table(conn, table):
"""Migrate table 'meter' in HBase.
Change row format from "%s_%d_%s" % (counter_name, rts, message_signature)
to new separator format "%s:%s:%s" % (counter_name, rts, message_signature)
"""
meter_table = conn.table(table)
meter_filter = ("RowFilter(=, "
"'regexstring:[\\w\\._-]*_\\d{19}_\\w*')")
gen = meter_table.scan(filter=meter_filter)
for row, data in gen:
parts = row.rsplit('_', 2)
new_row = hbase_utils.prepare_key(*parts)
meter_table.put(new_row, data)
meter_table.delete(row)
TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table,
'meter': migrate_meter_table}
def migrate_tables(conn, tables):
if type(tables) is not list:
tables = [tables]
for table in tables:
if table in TABLE_MIGRATION_FUNCS:
TABLE_MIGRATION_FUNCS.get(table)(conn, table)

View File

@ -1,448 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Various HBase helpers"""
import copy
import datetime
import json
import bson.json_util
from happybase.hbase import ttypes
from oslo_log import log
import six
from ceilometer.i18n import _
from ceilometer import utils
LOG = log.getLogger(__name__)
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
# We need this additional dictionary because we have reverted timestamp in
# row-keys for stored metrics
OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
'ge': '<='}
def _QualifierFilter(op, qualifier):
return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier)
def timestamp(dt, reverse=True):
"""Timestamp is count of milliseconds since start of epoch.
If reverse=True then timestamp will be reversed. Such a technique is used
in HBase rowkey design when period queries are required. Because of the
fact that rows are sorted lexicographically it's possible to vary whether
the 'oldest' entries will be on top of the table or it should be the newest
ones (reversed timestamp case).
:param dt: datetime which is translated to timestamp
:param reverse: a boolean parameter for reverse or straight count of
timestamp in milliseconds
:return: count or reversed count of milliseconds since start of epoch
"""
epoch = datetime.datetime(1970, 1, 1)
td = dt - epoch
ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
return 0x7fffffffffffffff - ts if reverse else ts
def make_timestamp_query(func, start=None, start_op=None, end=None,
end_op=None, bounds_only=False, **kwargs):
"""Return a filter start and stop row for filtering and a query.
Query is based on the fact that CF-name is 'rts'.
:param start: Optional start timestamp
:param start_op: Optional start timestamp operator, like gt, ge
:param end: Optional end timestamp
:param end_op: Optional end timestamp operator, like lt, le
:param bounds_only: if True than query will not be returned
:param func: a function that provide a format of row
:param kwargs: kwargs for :param func
"""
# We don't need to dump here because get_start_end_rts returns strings
rts_start, rts_end = get_start_end_rts(start, end)
start_row, end_row = func(rts_start, rts_end, **kwargs)
if bounds_only:
return start_row, end_row
q = []
start_op = start_op or 'ge'
end_op = end_op or 'lt'
if rts_start:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[start_op], rts_start))
if rts_end:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[end_op], rts_end))
res_q = None
if len(q):
res_q = " AND ".join(q)
return start_row, end_row, res_q
def get_start_end_rts(start, end):
rts_start = str(timestamp(start)) if start else ""
rts_end = str(timestamp(end)) if end else ""
return rts_start, rts_end
def make_query(metaquery=None, **kwargs):
"""Return a filter query string based on the selected parameters.
:param metaquery: optional metaquery dict
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
q = []
res_q = None
# Note: we use extended constructor for SingleColumnValueFilter here.
# It is explicitly specified that entry should not be returned if CF is not
# found in table.
for key, value in sorted(kwargs.items()):
if value is not None:
if key == 'source':
q.append("SingleColumnValueFilter "
"('f', 's_%s', =, 'binary:%s', true, true)" %
(value, dump('1')))
else:
q.append("SingleColumnValueFilter "
"('f', '%s', =, 'binary:%s', true, true)" %
(quote(key), dump(value)))
res_q = None
if len(q):
res_q = " AND ".join(q)
if metaquery:
meta_q = []
for k, v in metaquery.items():
meta_q.append(
"SingleColumnValueFilter ('f', '%s', =, 'binary:%s', "
"true, true)"
% ('r_' + k, dump(v)))
meta_q = " AND ".join(meta_q)
# join query and metaquery
if res_q is not None:
res_q += " AND " + meta_q
else:
res_q = meta_q # metaquery only
return res_q
def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs):
"""Return a list of required columns in meter table to be scanned.
SingleColumnFilter has 'columns' filter that should be used to determine
what columns we are interested in. But if we want to use 'filter' and
'columns' together we have to include columns we are filtering by
to columns list.
Please see an example: If we make scan with filter
"SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')"
and columns ['f:rts'], the output will be always empty
because only 'rts' will be returned and filter will be applied
to this data so 's_test-1' cannot be find.
To make this request correct it should be fixed as follows:
filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')",
columns = ['f:rts','f:s_test-1']}
:param metaquery: optional metaquery dict
:param need_timestamp: flag, which defines the need for timestamp columns
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
columns = ['f:message', 'f:recorded_at']
columns.extend("f:%s" % k for k, v in kwargs.items()
if v is not None)
if metaquery:
columns.extend("f:r_%s" % k for k, v in metaquery.items()
if v is not None)
source = kwargs.get('source')
if source:
columns.append("f:s_%s" % source)
if need_timestamp:
columns.extend(['f:rts', 'f:timestamp'])
return columns
def make_sample_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
meter = sample_filter.meter
if not meter and require_meter:
raise RuntimeError('Missing required meter specifier')
start_row, end_row, ts_query = make_timestamp_query(
make_general_rowkey_scan,
start=sample_filter.start_timestamp,
start_op=sample_filter.start_timestamp_op,
end=sample_filter.end_timestamp,
end_op=sample_filter.end_timestamp_op,
some_id=meter)
kwargs = dict(user_id=sample_filter.user,
project_id=sample_filter.project,
counter_name=meter,
resource_id=sample_filter.resource,
source=sample_filter.source,
message_id=sample_filter.message_id)
q = make_query(metaquery=sample_filter.metaquery, **kwargs)
if q:
res_q = q + " AND " + ts_query if ts_query else q
else:
res_q = ts_query if ts_query else None
need_timestamp = (sample_filter.start_timestamp or
sample_filter.end_timestamp) is not None
columns = get_meter_columns(metaquery=sample_filter.metaquery,
need_timestamp=need_timestamp, **kwargs)
return res_q, start_row, end_row, columns
def make_meter_query_for_resource(start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op, source,
query=None):
"""This method is used when Resource table should be filtered by meters.
In this method we are looking into all qualifiers with m_ prefix.
:param start_timestamp: meter's timestamp start range.
:param start_timestamp_op: meter's start time operator, like ge, gt.
:param end_timestamp: meter's timestamp end range.
:param end_timestamp_op: meter's end time operator, like lt, le.
:param source: source filter.
:param query: a query string to concatenate with.
"""
start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp)
mq = []
start_op = start_timestamp_op or 'ge'
end_op = end_timestamp_op or 'lt'
if start_rts:
filter_value = (start_rts + ':' + quote(source) if source
else start_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value))
if end_rts:
filter_value = (end_rts + ':' + quote(source) if source
else end_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value))
if mq:
meter_q = " AND ".join(mq)
# If there is a filtering on time_range we need to point that
# qualifiers should start with m_. Otherwise in case e.g.
# QualifierFilter (>=, 'binaryprefix:m_9222030811134775808')
# qualifier 's_test' satisfies the filter and will be returned.
meter_q = _QualifierFilter("=", '') + " AND " + meter_q
query = meter_q if not query else query + " AND " + meter_q
return query
def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None):
"""If it's filter on some_id without start and end.
start_row = some_id while end_row = some_id + MAX_BYTE.
"""
if some_id is None:
return None, None
if not rts_start:
# NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123
# will be quoted and character will be turn in a composition that is
# started with '%' (chr(37)) that lexicographically is less than chr
# of number
rts_start = chr(122)
end_row = prepare_key(some_id, rts_start)
start_row = prepare_key(some_id, rts_end)
return start_row, end_row
def prepare_key(*args):
"""Prepares names for rows and columns with correct separator.
:param args: strings or numbers that we want our key construct of
:return: key with quoted args that are separated with character ":"
"""
key_quote = []
for key in args:
if isinstance(key, six.integer_types):
key = str(key)
key_quote.append(quote(key))
return ":".join(key_quote)
def timestamp_from_record_tuple(record):
"""Extract timestamp from HBase tuple record."""
return record[0]['timestamp']
def resource_id_from_record_tuple(record):
"""Extract resource_id from HBase tuple record."""
return record[0]['resource_id']
def deserialize_entry(entry, get_raw_meta=True):
"""Return a list of flatten_result, sources, meters and metadata.
Flatten_result contains a dict of simple structures such as 'resource_id':1
sources/meters are the lists of sources and meters correspondingly.
metadata is metadata dict. This dict may be returned as flattened if
get_raw_meta is False.
:param entry: entry from HBase, without row name and timestamp
:param get_raw_meta: If true then raw metadata will be returned,
if False metadata will be constructed from
'f:r_metadata.' fields
"""
flatten_result = {}
sources = []
meters = []
metadata_flattened = {}
for k, v in entry.items():
if k.startswith('f:s_'):
sources.append(decode_unicode(k[4:]))
elif k.startswith('f:r_metadata.'):
qualifier = decode_unicode(k[len('f:r_metadata.'):])
metadata_flattened[qualifier] = load(v)
elif k.startswith("f:m_"):
meter = ([unquote(i) for i in k[4:].split(':')], load(v))
meters.append(meter)
else:
if ':' in k[2:]:
key = tuple([unquote(i) for i in k[2:].split(':')])
else:
key = unquote(k[2:])
flatten_result[key] = load(v)
if get_raw_meta:
metadata = flatten_result.get('resource_metadata', {})
else:
metadata = metadata_flattened
return flatten_result, meters, metadata
def serialize_entry(data=None, **kwargs):
"""Return a dict that is ready to be stored to HBase
:param data: dict to be serialized
:param kwargs: additional args
"""
data = data or {}
entry_dict = copy.copy(data)
entry_dict.update(**kwargs)
result = {}
for k, v in entry_dict.items():
if k == 'source':
# user, project and resource tables may contain several sources.
# Besides, resource table may contain several meters.
# To make insertion safe we need to store all meters and sources in
# a separate cell. For this purpose s_ and m_ prefixes are
# introduced.
qualifier = encode_unicode('f:s_%s' % v)
result[qualifier] = dump('1')
elif k == 'meter':
for meter, ts in v.items():
qualifier = encode_unicode('f:m_%s' % meter)
result[qualifier] = dump(ts)
elif k == 'resource_metadata':
# keep raw metadata as well as flattened to provide
# capability with API v2. It will be flattened in another
# way on API level. But we need flattened too for quick filtering.
flattened_meta = dump_metadata(v)
for key, m in flattened_meta.items():
metadata_qualifier = encode_unicode('f:r_metadata.' + key)
result[metadata_qualifier] = dump(m)
result['f:resource_metadata'] = dump(v)
else:
result['f:' + quote(k, ':')] = dump(v)
return result
def dump_metadata(meta):
resource_metadata = {}
for key, v in utils.dict_to_keyval(meta):
resource_metadata[key] = v
return resource_metadata
def dump(data):
return json.dumps(data, default=bson.json_util.default)
def load(data):
return json.loads(data, object_hook=object_hook)
def encode_unicode(data):
return data.encode('utf-8') if isinstance(data, six.text_type) else data
def decode_unicode(data):
return data.decode('utf-8') if isinstance(data, six.string_types) else data
# We don't want to have tzinfo in decoded json.This object_hook is
# overwritten json_util.object_hook for $date
def object_hook(dct):
if "$date" in dct:
dt = bson.json_util.object_hook(dct)
return dt.replace(tzinfo=None)
return bson.json_util.object_hook(dct)
def create_tables(conn, tables, column_families):
for table in tables:
try:
conn.create_table(table, column_families)
except ttypes.AlreadyExists:
if conn.table_prefix:
table = ("%(table_prefix)s"
"%(separator)s"
"%(table_name)s" %
dict(table_prefix=conn.table_prefix,
separator=conn.table_prefix_separator,
table_name=table))
LOG.warning(_("Cannot create table %(table_name)s, "
"it already exists. Ignoring error")
% {'table_name': table})
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
def unquote(s):
"""Return unquoted and decoded string.
:param s: string that should be unquoted
"""
s_de = six.moves.urllib.parse.unquote(s)
return s_de.decode('utf8')

View File

@ -1,440 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import time
from oslo_log import log
from oslo_utils import timeutils
import ceilometer
from ceilometer.storage import base
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import migration as hbase_migration
from ceilometer.storage.hbase import utils as hbase_utils
from ceilometer.storage import models
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'meters': {'query': {'simple': True,
'metadata': True}},
'resources': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True}},
'statistics': {'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(hbase_base.Connection, base.Connection):
"""Put the metering data into a HBase database
Collections:
- meter (describes sample actually):
- row-key: consists of reversed timestamp, meter and a message uuid
for purposes of uniqueness
- Column Families:
f: contains the following qualifiers:
- counter_name: <name of counter>
- counter_type: <type of counter>
- counter_unit: <unit of counter>
- counter_volume: <volume of counter>
- message: <raw incoming data>
- message_id: <id of message>
- message_signature: <signature of message>
- resource_metadata: raw metadata for corresponding resource
of the meter
- project_id: <id of project>
- resource_id: <id of resource>
- user_id: <id of user>
- recorded_at: <datetime when sample has been recorded (utc.now)>
- flattened metadata with prefix r_metadata. e.g.::
f:r_metadata.display_name or f:r_metadata.tag
- rts: <reversed timestamp of entry>
- timestamp: <meter's timestamp (came from message)>
- source for meter with prefix 's'
- resource:
- row_key: uuid of resource
- Column Families:
f: contains the following qualifiers:
- resource_metadata: raw metadata for corresponding resource
- project_id: <id of project>
- resource_id: <id of resource>
- user_id: <id of user>
- flattened metadata with prefix r_metadata. e.g.::
f:r_metadata.display_name or f:r_metadata.tag
- sources for all corresponding meters with prefix 's'
- all meters with prefix 'm' for this resource in format:
.. code-block:: python
"%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type,
counter_unit)
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
_memory_instance = None
RESOURCE_TABLE = "resource"
METER_TABLE = "meter"
def upgrade(self):
tables = [self.RESOURCE_TABLE, self.METER_TABLE]
column_families = {'f': dict(max_versions=1)}
with self.conn_pool.connection() as conn:
hbase_utils.create_tables(conn, tables, column_families)
hbase_migration.migrate_tables(conn, tables)
def clear(self):
LOG.debug('Dropping HBase schema...')
with self.conn_pool.connection() as conn:
for table in [self.RESOURCE_TABLE,
self.METER_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug('Cannot disable table but ignoring error')
try:
conn.delete_table(table)
except Exception:
LOG.debug('Cannot delete table but ignoring error')
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.publisher.utils.meter_message_from_counter
"""
# We must not record thing.
data.pop("monotonic_time", None)
with self.conn_pool.connection() as conn:
resource_table = conn.table(self.RESOURCE_TABLE)
meter_table = conn.table(self.METER_TABLE)
resource_metadata = data.get('resource_metadata', {})
# Determine the name of new meter
rts = hbase_utils.timestamp(data['timestamp'])
new_meter = hbase_utils.prepare_key(
rts, data['source'], data['counter_name'],
data['counter_type'], data['counter_unit'])
# TODO(nprivalova): try not to store resource_id
resource = hbase_utils.serialize_entry(**{
'source': data['source'],
'meter': {new_meter: data['timestamp']},
'resource_metadata': resource_metadata,
'resource_id': data['resource_id'],
'project_id': data['project_id'], 'user_id': data['user_id']})
# Here we put entry in HBase with our own timestamp. This is needed
# when samples arrive out-of-order
# If we use timestamp=data['timestamp'] the newest data will be
# automatically 'on the top'. It is needed to keep metadata
# up-to-date: metadata from newest samples is considered as actual.
ts = int(time.mktime(data['timestamp'].timetuple()) * 1000)
resource_table.put(hbase_utils.encode_unicode(data['resource_id']),
resource, ts)
# Rowkey consists of reversed timestamp, meter and a
# message uuid for purposes of uniqueness
row = hbase_utils.prepare_key(data['counter_name'], rts,
data['message_id'])
record = hbase_utils.serialize_entry(
data, **{'source': data['source'], 'rts': rts,
'message': data, 'recorded_at': timeutils.utcnow()})
meter_table.put(row, record)
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like ge, gt.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
q = hbase_utils.make_query(metaquery=metaquery, user_id=user,
project_id=project,
resource_id=resource, source=source)
q = hbase_utils.make_meter_query_for_resource(start_timestamp,
start_timestamp_op,
end_timestamp,
end_timestamp_op,
source, q)
with self.conn_pool.connection() as conn:
resource_table = conn.table(self.RESOURCE_TABLE)
LOG.debug("Query Resource table: %s", q)
for resource_id, data in resource_table.scan(filter=q,
limit=limit):
f_res, meters, md = hbase_utils.deserialize_entry(
data)
resource_id = hbase_utils.encode_unicode(resource_id)
# Unfortunately happybase doesn't keep ordered result from
# HBase. So that's why it's needed to find min and max
# manually
first_ts = min(meters, key=operator.itemgetter(1))[1]
last_ts = max(meters, key=operator.itemgetter(1))[1]
source = meters[0][0][1]
# If we use QualifierFilter then HBase returns only
# qualifiers filtered by. It will not return the whole entry.
# That's why if we need to ask additional qualifiers manually.
if 'project_id' not in f_res and 'user_id' not in f_res:
row = resource_table.row(
resource_id, columns=['f:project_id', 'f:user_id',
'f:resource_metadata'])
f_res, _m, md = hbase_utils.deserialize_entry(row)
yield models.Resource(
resource_id=resource_id,
first_sample_timestamp=first_ts,
last_sample_timestamp=last_ts,
project_id=f_res['project_id'],
source=source,
user_id=f_res['user_id'],
metadata=md)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery=None, limit=None, unique=False):
"""Return an iterable of models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
:param unique: If set to true, return only unique meter information.
"""
if limit == 0:
return
metaquery = metaquery or {}
with self.conn_pool.connection() as conn:
resource_table = conn.table(self.RESOURCE_TABLE)
q = hbase_utils.make_query(metaquery=metaquery, user_id=user,
project_id=project,
resource_id=resource,
source=source)
LOG.debug("Query Resource table: %s", q)
gen = resource_table.scan(filter=q)
# We need result set to be sure that user doesn't receive several
# same meters. Please see bug
# https://bugs.launchpad.net/ceilometer/+bug/1301371
result = set()
for ignored, data in gen:
flatten_result, meters, md = hbase_utils.deserialize_entry(
data)
for m in meters:
if limit and len(result) >= limit:
return
_m_rts, m_source, name, m_type, unit = m[0]
if unique:
meter_dict = {'name': name,
'type': m_type,
'unit': unit,
'resource_id': None,
'project_id': None,
'user_id': None,
'source': None}
else:
meter_dict = {'name': name,
'type': m_type,
'unit': unit,
'resource_id':
flatten_result['resource_id'],
'project_id':
flatten_result['project_id'],
'user_id':
flatten_result['user_id']}
frozen_meter = frozenset(meter_dict.items())
if frozen_meter in result:
continue
result.add(frozen_meter)
if not unique:
meter_dict.update({'source': m_source
if m_source else None})
yield models.Meter(**meter_dict)
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of models.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
with self.conn_pool.connection() as conn:
meter_table = conn.table(self.METER_TABLE)
q, start, stop, columns = (hbase_utils.
make_sample_query_from_filter
(sample_filter, require_meter=False))
LOG.debug("Query Meter Table: %s", q)
gen = meter_table.scan(filter=q, row_start=start, row_stop=stop,
limit=limit, columns=columns)
for ignored, meter in gen:
d_meter = hbase_utils.deserialize_entry(meter)[0]
d_meter['message']['counter_volume'] = (
float(d_meter['message']['counter_volume']))
d_meter['message']['recorded_at'] = d_meter['recorded_at']
yield models.Sample(**d_meter['message'])
@staticmethod
def _update_meter_stats(stat, meter):
"""Do the stats calculation on a requested time bucket in stats dict
:param stats: dict where aggregated stats are kept
:param index: time bucket index in stats
:param meter: meter record as returned from HBase
:param start_time: query start time
:param period: length of the time bucket
"""
vol = meter['counter_volume']
ts = meter['timestamp']
stat.unit = meter['counter_unit']
stat.min = min(vol, stat.min or vol)
stat.max = max(vol, stat.max)
stat.sum = vol + (stat.sum or 0)
stat.count += 1
stat.avg = (stat.sum / float(stat.count))
stat.duration_start = min(ts, stat.duration_start or ts)
stat.duration_end = max(ts, stat.duration_end or ts)
stat.duration = (timeutils.delta_seconds(stat.duration_start,
stat.duration_end))
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of models.Statistics instances.
Items are containing meter statistics described by the query
parameters. The filter must have a meter value set.
.. note::
Due to HBase limitations the aggregations are implemented
in the driver itself, therefore this method will be quite slow
because of all the Thrift traffic it is going to create.
"""
if groupby:
raise ceilometer.NotImplementedError("Group by not implemented.")
if aggregate:
raise ceilometer.NotImplementedError(
'Selectable aggregates not implemented')
with self.conn_pool.connection() as conn:
meter_table = conn.table(self.METER_TABLE)
q, start, stop, columns = (hbase_utils.
make_sample_query_from_filter
(sample_filter))
# These fields are used in statistics' calculating
columns.extend(['f:timestamp', 'f:counter_volume',
'f:counter_unit'])
meters = map(hbase_utils.deserialize_entry,
list(meter for (ignored, meter) in
meter_table.scan(
filter=q, row_start=start,
row_stop=stop, columns=columns)))
if sample_filter.start_timestamp:
start_time = sample_filter.start_timestamp
elif meters:
start_time = meters[-1][0]['timestamp']
else:
start_time = None
if sample_filter.end_timestamp:
end_time = sample_filter.end_timestamp
elif meters:
end_time = meters[0][0]['timestamp']
else:
end_time = None
results = []
if not period:
period = 0
period_start = start_time
period_end = end_time
# As our HBase meters are stored as newest-first, we need to iterate
# in the reverse order
for meter in meters[::-1]:
ts = meter[0]['timestamp']
if period:
offset = int(timeutils.delta_seconds(
start_time, ts) / period) * period
period_start = start_time + datetime.timedelta(0, offset)
if not results or not results[-1].period_start == period_start:
if period:
period_end = period_start + datetime.timedelta(
0, period)
results.append(
models.Statistics(unit='',
count=0,
min=0,
max=0,
avg=0,
sum=0,
period=period,
period_start=period_start,
period_end=period_end,
duration=None,
duration_start=None,
duration_end=None,
groupby=None)
)
self._update_meter_stats(results[-1], meter[0])
return results

View File

@ -1,130 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simple logging storage backend.
"""
from oslo_log import log
from ceilometer.storage import base
LOG = log.getLogger(__name__)
class Connection(base.Connection):
"""Log the data."""
def upgrade(self):
pass
def clear(self):
pass
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter.
"""
LOG.info('metering data %(counter_name)s for %(resource_id)s: '
'%(counter_volume)s'
% ({'counter_name': data['counter_name'],
'resource_id': data['resource_id'],
'counter_volume': data['counter_volume']}))
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.info("Dropping metering data with TTL %d", ttl)
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of dictionaries containing resource information.
{ 'resource_id': UUID of the resource,
'project_id': UUID of project owning the resource,
'user_id': UUID of user owning the resource,
'timestamp': UTC datetime of last update to the resource,
'metadata': most current metadata for the resource,
'meter': list of the meters reporting data for the resource,
}
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
return []
def get_meters(self, user=None, project=None, resource=None, source=None,
limit=None, metaquery=None, unique=False):
"""Return an iterable of dictionaries containing meter information.
{ 'name': name of the meter,
'type': type of the meter (gauge, delta, cumulative),
'resource_id': UUID of the resource,
'project_id': UUID of project owning the resource,
'user_id': UUID of user owning the resource,
}
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param limit: Maximum number of results to return.
:param metaquery: Optional dict with metadata to match on.
:param unique: If set to true, return only unique meter information.
"""
return []
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of samples.
Items are created by
ceilometer.publisher.utils.meter_message_from_counter.
"""
return []
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return a dictionary containing meter statistics.
Meter statistics is described by the query parameters.
The filter must have a meter value set.
{ 'min':
'max':
'avg':
'sum':
'count':
'period':
'period_start':
'period_end':
'duration':
'duration_start':
'duration_end':
}
"""
return []

View File

@ -1,710 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
import itertools
import operator
import copy
import datetime
import uuid
import bson.code
import bson.objectid
from oslo_log import log
from oslo_utils import timeutils
import pymongo
import six
import ceilometer
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer.storage import pymongo_base
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'resources': {'query': {'simple': True,
'metadata': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}}
}
class Connection(pymongo_base.Connection):
"""Put the data into a MongoDB database
Collections::
- meter
- the raw incoming data
- resource
- the metadata for resources
- { _id: uuid of resource,
metadata: metadata dictionaries
user_id: uuid
project_id: uuid
meter: [ array of {counter_name: string, counter_type: string,
counter_unit: string} ]
}
"""
CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
CONNECTION_POOL = pymongo_utils.ConnectionPool()
STANDARD_AGGREGATES = dict([(a.name, a) for a in [
pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION,
pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION,
pymongo_utils.COUNT_AGGREGATION,
]])
AGGREGATES = dict([(a.name, a) for a in [
pymongo_utils.SUM_AGGREGATION,
pymongo_utils.AVG_AGGREGATION,
pymongo_utils.MIN_AGGREGATION,
pymongo_utils.MAX_AGGREGATION,
pymongo_utils.COUNT_AGGREGATION,
pymongo_utils.STDDEV_AGGREGATION,
pymongo_utils.CARDINALITY_AGGREGATION,
]])
SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'),
'asc': (pymongo.ASCENDING, '$gt')}
MAP_RESOURCES = bson.code.Code("""
function () {
emit(this.resource_id,
{user_id: this.user_id,
project_id: this.project_id,
source: this.source,
first_timestamp: this.timestamp,
last_timestamp: this.timestamp,
metadata: this.resource_metadata})
}""")
REDUCE_RESOURCES = bson.code.Code("""
function (key, values) {
var merge = {user_id: values[0].user_id,
project_id: values[0].project_id,
source: values[0].source,
first_timestamp: values[0].first_timestamp,
last_timestamp: values[0].last_timestamp,
metadata: values[0].metadata}
values.forEach(function(value) {
if (merge.first_timestamp - value.first_timestamp > 0) {
merge.first_timestamp = value.first_timestamp;
merge.user_id = value.user_id;
merge.project_id = value.project_id;
merge.source = value.source;
} else if (merge.last_timestamp - value.last_timestamp <= 0) {
merge.last_timestamp = value.last_timestamp;
merge.metadata = value.metadata;
}
});
return merge;
}""")
_GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
_APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31,
hour=23, minute=59, second=59)
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(conf, url)
self.version = self.conn.server_info()['versionArray']
# Require MongoDB 2.4 to use $setOnInsert
if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION:
raise storage.StorageBadVersion(
"Need at least MongoDB %s" %
pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION)
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
@staticmethod
def update_ttl(ttl, ttl_index_name, index_field, coll):
"""Update or create time_to_live indexes.
:param ttl: time to live in seconds.
:param ttl_index_name: name of the index we want to update or create.
:param index_field: field with the index that we need to update.
:param coll: collection which indexes need to be updated.
"""
indexes = coll.index_information()
if ttl <= 0:
if ttl_index_name in indexes:
coll.drop_index(ttl_index_name)
return
if ttl_index_name in indexes:
return coll.database.command(
'collMod', coll.name,
index={'keyPattern': {index_field: pymongo.ASCENDING},
'expireAfterSeconds': ttl})
coll.create_index([(index_field, pymongo.ASCENDING)],
expireAfterSeconds=ttl,
name=ttl_index_name)
def upgrade(self):
# Establish indexes
#
# We need variations for user_id vs. project_id because of the
# way the indexes are stored in b-trees. The user_id and
# project_id values are usually mutually exclusive in the
# queries, so the database won't take advantage of an index
# including both.
# create collection if not present
if 'resource' not in self.db.conn.collection_names():
self.db.conn.create_collection('resource')
if 'meter' not in self.db.conn.collection_names():
self.db.conn.create_collection('meter')
name_qualifier = dict(user_id='', project_id='project_')
background = dict(user_id=False, project_id=True)
for primary in ['user_id', 'project_id']:
name = 'meter_%sidx' % name_qualifier[primary]
self.db.meter.create_index([
('resource_id', pymongo.ASCENDING),
(primary, pymongo.ASCENDING),
('counter_name', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING),
], name=name, background=background[primary])
self.db.meter.create_index([('timestamp', pymongo.DESCENDING)],
name='timestamp_idx')
# NOTE(ityaptin) This index covers get_resource requests sorting
# and MongoDB uses part of this compound index for different
# queries based on any of user_id, project_id, last_sample_timestamp
# fields
self.db.resource.create_index([('user_id', pymongo.DESCENDING),
('project_id', pymongo.DESCENDING),
('last_sample_timestamp',
pymongo.DESCENDING)],
name='resource_user_project_timestamp',)
self.db.resource.create_index([('last_sample_timestamp',
pymongo.DESCENDING)],
name='last_sample_timestamp_idx')
# update or create time_to_live index
ttl = self.conf.database.metering_time_to_live
self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter)
self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp',
self.db.resource)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
def record_metering_data(self, data):
# TODO(liusheng): this is a workaround that is because there are
# storage scenario tests which directly invoke this method and pass a
# sample dict with all the storage backends and
# call conn.record_metering_data. May all the Ceilometer
# native storage backends can support batch recording in future, and
# then we need to refactor the scenario tests.
self.record_metering_data_batch([data])
def record_metering_data_batch(self, samples):
"""Record the metering data in batch.
:param samples: a list of samples dict.
"""
# Record the updated resource metadata - we use $setOnInsert to
# unconditionally insert sample timestamps and resource metadata
# (in the update case, this must be conditional on the sample not
# being out-of-order)
# We must not store this
samples = copy.deepcopy(samples)
for sample in samples:
sample.pop("monotonic_time", None)
sorted_samples = sorted(
copy.deepcopy(samples),
key=lambda s: (s['resource_id'], s['timestamp']))
res_grouped_samples = itertools.groupby(
sorted_samples, key=operator.itemgetter('resource_id'))
samples_to_update_resource = []
for resource_id, g_samples in res_grouped_samples:
g_samples = list(g_samples)
g_samples[-1]['meter'] = [{'counter_name': s['counter_name'],
'counter_type': s['counter_type'],
'counter_unit': s['counter_unit'],
} for s in g_samples]
g_samples[-1]['last_sample_timestamp'] = g_samples[-1]['timestamp']
g_samples[-1]['first_sample_timestamp'] = g_samples[0]['timestamp']
samples_to_update_resource.append(g_samples[-1])
for sample in samples_to_update_resource:
sample['resource_metadata'] = pymongo_utils.improve_keys(
sample.pop('resource_metadata'))
resource = self.db.resource.find_one_and_update(
{'_id': sample['resource_id']},
{'$set': {'project_id': sample['project_id'],
'user_id': sample['user_id'],
'source': sample['source'],
},
'$setOnInsert': {
'metadata': sample['resource_metadata'],
'first_sample_timestamp': sample['timestamp'],
'last_sample_timestamp': sample['timestamp'],
},
'$addToSet': {
'meter': {'$each': sample['meter']},
},
},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
)
# only update last sample timestamp if actually later (the usual
# in-order case)
last_sample_timestamp = resource.get('last_sample_timestamp')
if (last_sample_timestamp is None or
last_sample_timestamp <= sample['last_sample_timestamp']):
self.db.resource.update_one(
{'_id': sample['resource_id']},
{'$set': {'metadata': sample['resource_metadata'],
'last_sample_timestamp':
sample['last_sample_timestamp']}}
)
# only update first sample timestamp if actually earlier (
# the unusual out-of-order case)
# NOTE: a null first sample timestamp is not updated as this
# indicates a pre-existing resource document dating from before
# we started recording these timestamps in the resource collection
first_sample_timestamp = resource.get('first_sample_timestamp')
if (first_sample_timestamp is not None and
first_sample_timestamp > sample['first_sample_timestamp']):
self.db.resource.update_one(
{'_id': sample['resource_id']},
{'$set': {'first_sample_timestamp':
sample['first_sample_timestamp']}}
)
# Record the raw data for the meter. Use a copy so we do not
# modify a data structure owned by our caller (the driver adds
# a new key '_id').
record = copy.deepcopy(samples)
for s in record:
s['recorded_at'] = timeutils.utcnow()
s['resource_metadata'] = pymongo_utils.improve_keys(
s.pop('resource_metadata'))
self.db.meter.insert_many(record)
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs with native MongoDB time-to-live feature.
"""
LOG.debug("Clearing expired metering data is based on native "
"MongoDB time to live feature and going in background.")
@classmethod
def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'):
"""Returns a sort_instruction and paging operator.
Sort instructions are used in the query to determine what attributes
to sort on and what direction to use.
:param sort_keys: array of attributes by which results be sorted.
:param sort_dir: direction in which results be sorted (asc, desc).
:return: sort instructions and paging operator
"""
sort_keys = sort_keys or []
sort_instructions = []
_sort_dir, operation = cls.SORT_OPERATION_MAPPING.get(
sort_dir, cls.SORT_OPERATION_MAPPING['desc'])
for _sort_key in sort_keys:
_instruction = (_sort_key, _sort_dir)
sort_instructions.append(_instruction)
return sort_instructions, operation
def _get_time_constrained_resources(self, query,
start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op,
metaquery, resource, limit):
"""Return an iterable of models.Resource instances
Items are constrained by sample timestamp.
:param query: project/user/source query
:param start_timestamp: modified timestamp start range.
:param start_timestamp_op: start time operator, like gt, ge.
:param end_timestamp: modified timestamp end range.
:param end_timestamp_op: end time operator, like lt, le.
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['resource_id'] = resource
# Add resource_ prefix so it matches the field in the db
query.update(dict(('resource_' + k, v)
for (k, v) in six.iteritems(metaquery)))
# FIXME(dhellmann): This may not perform very well,
# but doing any better will require changing the database
# schema and that will need more thought than I have time
# to put into it today.
# Look for resources matching the above criteria and with
# samples in the time range we care about, then change the
# resource query to return just those resources by id.
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
query['timestamp'] = ts_range
sort_keys = base._handle_sort_key('resource')
sort_instructions = self._build_sort_instructions(sort_keys)[0]
# use a unique collection name for the results collection,
# as result post-sorting (as oppposed to reduce pre-sorting)
# is not possible on an inline M-R
out = 'resource_list_%s' % uuid.uuid4()
self.db.meter.map_reduce(self.MAP_RESOURCES,
self.REDUCE_RESOURCES,
out=out,
sort={'resource_id': 1},
query=query)
try:
if limit is not None:
results = self.db[out].find(sort=sort_instructions,
limit=limit)
else:
results = self.db[out].find(sort=sort_instructions)
for r in results:
resource = r['value']
yield models.Resource(
resource_id=r['_id'],
user_id=resource['user_id'],
project_id=resource['project_id'],
first_sample_timestamp=resource['first_timestamp'],
last_sample_timestamp=resource['last_timestamp'],
source=resource['source'],
metadata=pymongo_utils.unquote_keys(resource['metadata']))
finally:
self.db[out].drop()
def _get_floating_resources(self, query, metaquery, resource, limit):
"""Return an iterable of models.Resource instances
Items are unconstrained by timestamp.
:param query: project/user/source query
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['_id'] = resource
query.update(dict((k, v)
for (k, v) in six.iteritems(metaquery)))
keys = base._handle_sort_key('resource')
sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i
for i in keys]
sort_instructions = self._build_sort_instructions(sort_keys)[0]
if limit is not None:
results = self.db.resource.find(query, sort=sort_instructions,
limit=limit)
else:
results = self.db.resource.find(query, sort=sort_instructions)
for r in results:
yield models.Resource(
resource_id=r['_id'],
user_id=r['user_id'],
project_id=r['project_id'],
first_sample_timestamp=r.get('first_sample_timestamp',
self._GENESIS),
last_sample_timestamp=r.get('last_sample_timestamp',
self._APOCALYPSE),
source=r['source'],
metadata=pymongo_utils.unquote_keys(r['metadata']))
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
query = {}
if user is not None:
query['user_id'] = user
if project is not None:
query['project_id'] = project
if source is not None:
query['source'] = source
if start_timestamp or end_timestamp:
return self._get_time_constrained_resources(query,
start_timestamp,
start_timestamp_op,
end_timestamp,
end_timestamp_op,
metaquery, resource,
limit)
else:
return self._get_floating_resources(query, metaquery, resource,
limit)
@staticmethod
def _make_period_dict(period, first_ts):
"""Create a period field for _id of grouped fields.
:param period: Period duration in seconds
:param first_ts: First timestamp for first period
:return:
"""
if period >= 0:
period_unique_dict = {
"period_start":
{
"$divide": [
{"$subtract": [
{"$subtract": ["$timestamp",
first_ts]},
{"$mod": [{"$subtract": ["$timestamp",
first_ts]},
period * 1000]
}
]},
period * 1000
]
}
}
else:
# Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older).
# Since 2.6+ we could use $literal operator
period_unique_dict = {"$period_start": {"$add": [0, 0]}}
return period_unique_dict
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of models.Statistics instance.
Items are containing meter statistics described by the query
parameters. The filter must have a meter value set.
"""
# NOTE(zqfan): We already have checked at API level, but
# still leave it here in case of directly storage calls.
if aggregate:
for a in aggregate:
if a.func not in self.AGGREGATES:
msg = _('Invalid aggregation function: %s') % a.func
raise storage.StorageBadAggregate(msg)
if (groupby and set(groupby) -
set(['user_id', 'project_id', 'resource_id', 'source',
'resource_metadata.instance_type'])):
raise ceilometer.NotImplementedError(
"Unable to group by these fields")
q = pymongo_utils.make_query_from_filter(sample_filter)
group_stage = {}
project_stage = {
"unit": "$_id.unit",
"name": "$_id.name",
"first_timestamp": "$first_timestamp",
"last_timestamp": "$last_timestamp",
"period_start": "$_id.period_start",
}
# Add timestamps to $group stage
group_stage.update({"first_timestamp": {"$min": "$timestamp"},
"last_timestamp": {"$max": "$timestamp"}})
# Define a _id field for grouped documents
unique_group_field = {"name": "$counter_name",
"unit": "$counter_unit"}
# Define a first timestamp for periods
if sample_filter.start_timestamp:
first_timestamp = sample_filter.start_timestamp
else:
first_timestamp_cursor = self.db.meter.find(
limit=1, sort=[('timestamp',
pymongo.ASCENDING)])
if first_timestamp_cursor.count():
first_timestamp = first_timestamp_cursor[0]['timestamp']
else:
first_timestamp = utils.EPOCH_TIME
# Add a start_period field to unique identifier of grouped documents
if period:
period_dict = self._make_period_dict(period,
first_timestamp)
unique_group_field.update(period_dict)
# Add a groupby fields to unique identifier of grouped documents
if groupby:
unique_group_field.update(dict((field.replace(".", "/"),
"$%s" % field)
for field in groupby))
group_stage.update({"_id": unique_group_field})
self._compile_aggregate_stages(aggregate, group_stage, project_stage)
# Aggregation stages list. It's work one by one and uses documents
# from previous stages.
aggregation_query = [{'$match': q},
{"$sort": {"timestamp": 1}},
{"$group": group_stage},
{"$sort": {"_id.period_start": 1}},
{"$project": project_stage}]
# results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0
results = self.db.meter.aggregate(aggregation_query,
**self._make_aggregation_params())
return [self._stats_result_to_model(point, groupby, aggregate,
period, first_timestamp)
for point in self._get_results(results)]
def _stats_result_aggregates(self, result, aggregate):
stats_args = {}
for attr, func in Connection.STANDARD_AGGREGATES.items():
if attr in result:
stats_args.update(func.finalize(result,
version_array=self.version))
if aggregate:
stats_args['aggregate'] = {}
for agr in aggregate:
stats_args['aggregate'].update(
Connection.AGGREGATES[agr.func].finalize(
result, agr.param, self.version))
return stats_args
def _stats_result_to_model(self, result, groupby, aggregate, period,
first_timestamp):
if period is None:
period = 0
first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp)
stats_args = self._stats_result_aggregates(result, aggregate)
stats_args['unit'] = result['unit']
stats_args['duration'] = (result["last_timestamp"] -
result["first_timestamp"]).total_seconds()
stats_args['duration_start'] = result['first_timestamp']
stats_args['duration_end'] = result['last_timestamp']
stats_args['period'] = period
start = result.get("period_start", 0) * period
stats_args['period_start'] = (first_timestamp +
datetime.timedelta(seconds=start))
stats_args['period_end'] = (first_timestamp +
datetime.timedelta(seconds=start + period)
if period else result['last_timestamp'])
stats_args['groupby'] = (
dict((g, result['_id'].get(g.replace(".", "/")))
for g in groupby) if groupby else None)
return models.Statistics(**stats_args)
def _compile_aggregate_stages(self, aggregate, group_stage, project_stage):
if not aggregate:
for aggregation in Connection.STANDARD_AGGREGATES.values():
group_stage.update(
aggregation.group(version_array=self.version)
)
project_stage.update(
aggregation.project(
version_array=self.version
)
)
else:
for description in aggregate:
aggregation = Connection.AGGREGATES.get(description.func)
if aggregation:
if not aggregation.validate(description.param):
raise storage.StorageBadAggregate(
'Bad aggregate: %s.%s' % (description.func,
description.param))
group_stage.update(
aggregation.group(description.param,
version_array=self.version)
)
project_stage.update(
aggregation.project(description.param,
version_array=self.version)
)
@staticmethod
def _get_results(results):
if isinstance(results, dict):
return results.get('result', [])
else:
return results
def _make_aggregation_params(self):
if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION:
return {"allowDiskUse": True}
return {}

View File

@ -1,838 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import datetime
import hashlib
import os
from oslo_db import api
from oslo_db import exception as dbexc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import distinct
from sqlalchemy import func
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import cast
import ceilometer
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models as api_models
from ceilometer.storage.sqlalchemy import models
from ceilometer.storage.sqlalchemy import utils as sql_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
STANDARD_AGGREGATES = dict(
avg=func.avg(models.Sample.volume).label('avg'),
sum=func.sum(models.Sample.volume).label('sum'),
min=func.min(models.Sample.volume).label('min'),
max=func.max(models.Sample.volume).label('max'),
count=func.count(models.Sample.volume).label('count')
)
UNPARAMETERIZED_AGGREGATES = dict(
stddev=func.stddev_pop(models.Sample.volume).label('stddev')
)
PARAMETERIZED_AGGREGATES = dict(
validate=dict(
cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']
),
compute=dict(
cardinality=lambda p: func.count(
distinct(getattr(models.Resource, p))
).label('cardinality/%s' % p)
)
)
AVAILABLE_CAPABILITIES = {
'meters': {'query': {'simple': True,
'metadata': True}},
'resources': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
def apply_metaquery_filter(session, query, metaquery):
"""Apply provided metaquery filter to existing query.
:param session: session used for original query
:param query: Query instance
:param metaquery: dict with metadata to match on.
"""
for k, value in six.iteritems(metaquery):
key = k[9:] # strip out 'metadata.' prefix
try:
_model = sql_utils.META_TYPE_MAP[type(value)]
except KeyError:
raise ceilometer.NotImplementedError(
'Query on %(key)s is of %(value)s '
'type and is not supported' %
{"key": k, "value": type(value)})
else:
meta_alias = aliased(_model)
on_clause = and_(models.Resource.internal_id == meta_alias.id,
meta_alias.meta_key == key)
# outer join is needed to support metaquery
# with or operator on non existent metadata field
# see: test_query_non_existing_metadata_with_result
# test case.
query = query.outerjoin(meta_alias, on_clause)
query = query.filter(meta_alias.value == value)
return query
def make_query_from_filter(session, query, sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param session: session used for original query
:param query: Query instance
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
if sample_filter.meter:
query = query.filter(models.Meter.name == sample_filter.meter)
elif require_meter:
raise RuntimeError('Missing required meter specifier')
if sample_filter.source:
query = query.filter(
models.Resource.source_id == sample_filter.source)
if sample_filter.start_timestamp:
ts_start = sample_filter.start_timestamp
if sample_filter.start_timestamp_op == 'gt':
query = query.filter(models.Sample.timestamp > ts_start)
else:
query = query.filter(models.Sample.timestamp >= ts_start)
if sample_filter.end_timestamp:
ts_end = sample_filter.end_timestamp
if sample_filter.end_timestamp_op == 'le':
query = query.filter(models.Sample.timestamp <= ts_end)
else:
query = query.filter(models.Sample.timestamp < ts_end)
if sample_filter.user:
if sample_filter.user == 'None':
sample_filter.user = None
query = query.filter(models.Resource.user_id == sample_filter.user)
if sample_filter.project:
if sample_filter.project == 'None':
sample_filter.project = None
query = query.filter(
models.Resource.project_id == sample_filter.project)
if sample_filter.resource:
query = query.filter(
models.Resource.resource_id == sample_filter.resource)
if sample_filter.message_id:
query = query.filter(
models.Sample.message_id == sample_filter.message_id)
if sample_filter.metaquery:
query = apply_metaquery_filter(session, query,
sample_filter.metaquery)
return query
class Connection(base.Connection):
"""Put the data into a SQLAlchemy database.
Tables::
- meter
- meter definition
- { id: meter id
name: meter name
type: meter type
unit: meter unit
}
- resource
- resource definition
- { internal_id: resource id
resource_id: resource uuid
user_id: user uuid
project_id: project uuid
source_id: source id
resource_metadata: metadata dictionary
metadata_hash: metadata dictionary hash
}
- sample
- the raw incoming data
- { id: sample id
meter_id: meter id (->meter.id)
resource_id: resource id (->resource.internal_id)
volume: sample volume
timestamp: datetime
recorded_at: datetime
message_signature: message signature
message_id: message uuid
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def __init__(self, conf, url):
super(Connection, self).__init__(conf, url)
# Set max_retries to 0, since oslo.db in certain cases may attempt
# to retry making the db connection retried max_retries ^ 2 times
# in failure case and db reconnection has already been implemented
# in storage.__init__.get_connection_from_config function
options = dict(self.conf.database.items())
options['max_retries'] = 0
# oslo.db doesn't support options defined by Ceilometer
for opt in storage.OPTS:
options.pop(opt.name, None)
self._engine_facade = db_session.EngineFacade(url, **options)
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo_db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'sqlalchemy', 'migrate_repo')
engine = self._engine_facade.get_engine()
from migrate import exceptions as migrate_exc
from migrate.versioning import api
from migrate.versioning import repository
repo = repository.Repository(path)
try:
api.db_version(engine, repo)
except migrate_exc.DatabaseNotControlledError:
models.Base.metadata.create_all(engine)
api.version_control(engine, repo, repo.latest)
else:
migration.db_sync(engine, path)
def clear(self):
engine = self._engine_facade.get_engine()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
engine.dispose()
@staticmethod
def _create_meter(conn, name, type, unit):
# TODO(gordc): implement lru_cache to improve performance
try:
meter = models.Meter.__table__
trans = conn.begin_nested()
if conn.dialect.name == 'sqlite':
trans = conn.begin()
with trans:
meter_row = conn.execute(
sa.select([meter.c.id])
.where(sa.and_(meter.c.name == name,
meter.c.type == type,
meter.c.unit == unit))).first()
meter_id = meter_row[0] if meter_row else None
if meter_id is None:
result = conn.execute(meter.insert(), name=name,
type=type, unit=unit)
meter_id = result.inserted_primary_key[0]
except dbexc.DBDuplicateEntry:
# retry function to pick up duplicate committed object
meter_id = Connection._create_meter(conn, name, type, unit)
return meter_id
@staticmethod
def _create_resource(conn, res_id, user_id, project_id, source_id,
rmeta):
# TODO(gordc): implement lru_cache to improve performance
try:
res = models.Resource.__table__
m_hash = jsonutils.dumps(rmeta, sort_keys=True)
if six.PY3:
m_hash = m_hash.encode('utf-8')
m_hash = hashlib.md5(m_hash).hexdigest()
trans = conn.begin_nested()
if conn.dialect.name == 'sqlite':
trans = conn.begin()
with trans:
res_row = conn.execute(
sa.select([res.c.internal_id])
.where(sa.and_(res.c.resource_id == res_id,
res.c.user_id == user_id,
res.c.project_id == project_id,
res.c.source_id == source_id,
res.c.metadata_hash == m_hash))).first()
internal_id = res_row[0] if res_row else None
if internal_id is None:
result = conn.execute(res.insert(), resource_id=res_id,
user_id=user_id,
project_id=project_id,
source_id=source_id,
resource_metadata=rmeta,
metadata_hash=m_hash)
internal_id = result.inserted_primary_key[0]
if rmeta and isinstance(rmeta, dict):
meta_map = {}
for key, v in utils.dict_to_keyval(rmeta):
try:
_model = sql_utils.META_TYPE_MAP[type(v)]
if meta_map.get(_model) is None:
meta_map[_model] = []
meta_map[_model].append(
{'id': internal_id, 'meta_key': key,
'value': v})
except KeyError:
LOG.warning(_("Unknown metadata type. Key "
"(%s) will not be queryable."),
key)
for _model in meta_map.keys():
conn.execute(_model.__table__.insert(),
meta_map[_model])
except dbexc.DBDuplicateEntry:
# retry function to pick up duplicate committed object
internal_id = Connection._create_resource(
conn, res_id, user_id, project_id, source_id, rmeta)
return internal_id
# FIXME(sileht): use set_defaults to pass cfg.CONF.database.retry_interval
# and cfg.CONF.database.max_retries to this method when global config
# have been removed (puting directly cfg.CONF don't work because and copy
# the default instead of the configured value)
@api.wrap_db_retry(retry_interval=10, max_retries=10,
retry_on_deadlock=True)
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.publisher.utils.meter_message_from_counter
"""
engine = self._engine_facade.get_engine()
with engine.begin() as conn:
# Record the raw data for the sample.
m_id = self._create_meter(conn,
data['counter_name'],
data['counter_type'],
data['counter_unit'])
res_id = self._create_resource(conn,
data['resource_id'],
data['user_id'],
data['project_id'],
data['source'],
data['resource_metadata'])
sample = models.Sample.__table__
conn.execute(sample.insert(), meter_id=m_id,
resource_id=res_id,
timestamp=data['timestamp'],
volume=data['counter_volume'],
message_signature=data['message_signature'],
message_id=data['message_id'])
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
# Prevent database deadlocks from occurring by
# using separate transaction for each delete
session = self._engine_facade.get_session()
with session.begin():
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
sample_q = (session.query(models.Sample)
.filter(models.Sample.timestamp < end))
rows = sample_q.delete()
LOG.info("%d samples removed from database", rows)
if not self.conf.database.sql_expire_samples_only:
with session.begin():
# remove Meter definitions with no matching samples
(session.query(models.Meter)
.filter(~models.Meter.samples.any())
.delete(synchronize_session=False))
with session.begin():
resource_q = (session.query(models.Resource.internal_id)
.filter(~models.Resource.samples.any()))
# mark resource with no matching samples for delete
resource_q.update({models.Resource.metadata_hash: "delete_"
+ cast(models.Resource.internal_id,
sa.String)},
synchronize_session=False)
# remove metadata of resources marked for delete
for table in [models.MetaText, models.MetaBigInt,
models.MetaFloat, models.MetaBool]:
with session.begin():
resource_q = (session.query(models.Resource.internal_id)
.filter(models.Resource.metadata_hash
.like('delete_%')))
resource_subq = resource_q.subquery()
(session.query(table)
.filter(table.id.in_(resource_subq))
.delete(synchronize_session=False))
# remove resource marked for delete
with session.begin():
resource_q = (session.query(models.Resource.internal_id)
.filter(models.Resource.metadata_hash
.like('delete_%')))
resource_q.delete(synchronize_session=False)
LOG.info("Expired residual resource and"
" meter definition data")
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of api_models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
s_filter = storage.SampleFilter(user=user,
project=project,
source=source,
start_timestamp=start_timestamp,
start_timestamp_op=start_timestamp_op,
end_timestamp=end_timestamp,
end_timestamp_op=end_timestamp_op,
metaquery=metaquery,
resource=resource)
session = self._engine_facade.get_session()
# get list of resource_ids
has_timestamp = start_timestamp or end_timestamp
# NOTE: When sql_expire_samples_only is enabled, there will be some
# resources without any sample, in such case we should use inner
# join on sample table to avoid wrong result.
if self.conf.database.sql_expire_samples_only or has_timestamp:
res_q = session.query(distinct(models.Resource.resource_id)).join(
models.Sample,
models.Sample.resource_id == models.Resource.internal_id)
else:
res_q = session.query(distinct(models.Resource.resource_id))
res_q = make_query_from_filter(session, res_q, s_filter,
require_meter=False)
res_q = res_q.limit(limit) if limit else res_q
for res_id in res_q.all():
# get max and min sample timestamp value
min_max_q = (session.query(func.max(models.Sample.timestamp)
.label('max_timestamp'),
func.min(models.Sample.timestamp)
.label('min_timestamp'))
.join(models.Resource,
models.Resource.internal_id ==
models.Sample.resource_id)
.filter(models.Resource.resource_id ==
res_id[0]))
min_max_q = make_query_from_filter(session, min_max_q, s_filter,
require_meter=False)
min_max = min_max_q.first()
# get resource details for latest sample
res_q = (session.query(models.Resource.resource_id,
models.Resource.user_id,
models.Resource.project_id,
models.Resource.source_id,
models.Resource.resource_metadata)
.join(models.Sample,
models.Sample.resource_id ==
models.Resource.internal_id)
.filter(models.Sample.timestamp ==
min_max.max_timestamp)
.filter(models.Resource.resource_id ==
res_id[0])
.order_by(models.Sample.id.desc()).limit(1))
res = res_q.first()
yield api_models.Resource(
resource_id=res.resource_id,
project_id=res.project_id,
first_sample_timestamp=min_max.min_timestamp,
last_sample_timestamp=min_max.max_timestamp,
source=res.source_id,
user_id=res.user_id,
metadata=res.resource_metadata
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery=None, limit=None, unique=False):
"""Return an iterable of api_models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional ID of the resource.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
:param unique: If set to true, return only unique meter information.
"""
if limit == 0:
return
s_filter = storage.SampleFilter(user=user,
project=project,
source=source,
metaquery=metaquery,
resource=resource)
# NOTE(gordc): get latest sample of each meter/resource. we do not
# filter here as we want to filter only on latest record.
session = self._engine_facade.get_session()
subq = session.query(func.max(models.Sample.id).label('id')).join(
models.Resource,
models.Resource.internal_id == models.Sample.resource_id)
if unique:
subq = subq.group_by(models.Sample.meter_id)
else:
subq = subq.group_by(models.Sample.meter_id,
models.Resource.resource_id)
if resource:
subq = subq.filter(models.Resource.resource_id == resource)
subq = subq.subquery()
# get meter details for samples.
query_sample = (session.query(models.Sample.meter_id,
models.Meter.name, models.Meter.type,
models.Meter.unit,
models.Resource.resource_id,
models.Resource.project_id,
models.Resource.source_id,
models.Resource.user_id).join(
subq, subq.c.id == models.Sample.id)
.join(models.Meter, models.Meter.id == models.Sample.meter_id)
.join(models.Resource,
models.Resource.internal_id == models.Sample.resource_id))
query_sample = make_query_from_filter(session, query_sample, s_filter,
require_meter=False)
query_sample = query_sample.limit(limit) if limit else query_sample
if unique:
for row in query_sample.all():
yield api_models.Meter(
name=row.name,
type=row.type,
unit=row.unit,
resource_id=None,
project_id=None,
source=None,
user_id=None)
else:
for row in query_sample.all():
yield api_models.Meter(
name=row.name,
type=row.type,
unit=row.unit,
resource_id=row.resource_id,
project_id=row.project_id,
source=row.source_id,
user_id=row.user_id)
@staticmethod
def _retrieve_samples(query):
samples = query.all()
for s in samples:
# Remove the id generated by the database when
# the sample was inserted. It is an implementation
# detail that should not leak outside of the driver.
yield api_models.Sample(
source=s.source_id,
counter_name=s.counter_name,
counter_type=s.counter_type,
counter_unit=s.counter_unit,
counter_volume=s.counter_volume,
user_id=s.user_id,
project_id=s.project_id,
resource_id=s.resource_id,
timestamp=s.timestamp,
recorded_at=s.recorded_at,
resource_metadata=s.resource_metadata,
message_id=s.message_id,
message_signature=s.message_signature,
)
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of api_models.Samples.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return []
session = self._engine_facade.get_session()
query = session.query(models.Sample.timestamp,
models.Sample.recorded_at,
models.Sample.message_id,
models.Sample.message_signature,
models.Sample.volume.label('counter_volume'),
models.Meter.name.label('counter_name'),
models.Meter.type.label('counter_type'),
models.Meter.unit.label('counter_unit'),
models.Resource.source_id,
models.Resource.user_id,
models.Resource.project_id,
models.Resource.resource_metadata,
models.Resource.resource_id).join(
models.Meter, models.Meter.id == models.Sample.meter_id).join(
models.Resource,
models.Resource.internal_id == models.Sample.resource_id).order_by(
models.Sample.timestamp.desc())
query = make_query_from_filter(session, query, sample_filter,
require_meter=False)
if limit:
query = query.limit(limit)
return self._retrieve_samples(query)
def query_samples(self, filter_expr=None, orderby=None, limit=None):
if limit == 0:
return []
session = self._engine_facade.get_session()
engine = self._engine_facade.get_engine()
query = session.query(models.Sample.timestamp,
models.Sample.recorded_at,
models.Sample.message_id,
models.Sample.message_signature,
models.Sample.volume.label('counter_volume'),
models.Meter.name.label('counter_name'),
models.Meter.type.label('counter_type'),
models.Meter.unit.label('counter_unit'),
models.Resource.source_id,
models.Resource.user_id,
models.Resource.project_id,
models.Resource.resource_metadata,
models.Resource.resource_id).join(
models.Meter, models.Meter.id == models.Sample.meter_id).join(
models.Resource,
models.Resource.internal_id == models.Sample.resource_id)
transformer = sql_utils.QueryTransformer(models.FullSample, query,
dialect=engine.dialect.name)
if filter_expr is not None:
transformer.apply_filter(filter_expr)
transformer.apply_options(orderby, limit)
return self._retrieve_samples(transformer.get_query())
@staticmethod
def _get_aggregate_functions(aggregate):
if not aggregate:
return [f for f in STANDARD_AGGREGATES.values()]
functions = []
for a in aggregate:
if a.func in STANDARD_AGGREGATES:
functions.append(STANDARD_AGGREGATES[a.func])
elif a.func in UNPARAMETERIZED_AGGREGATES:
functions.append(UNPARAMETERIZED_AGGREGATES[a.func])
elif a.func in PARAMETERIZED_AGGREGATES['compute']:
validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func)
if not (validate and validate(a.param)):
raise storage.StorageBadAggregate('Bad aggregate: %s.%s'
% (a.func, a.param))
compute = PARAMETERIZED_AGGREGATES['compute'][a.func]
functions.append(compute(a.param))
else:
# NOTE(zqfan): We already have checked at API level, but
# still leave it here in case of directly storage calls.
msg = _('Invalid aggregation function: %s') % a.func
raise storage.StorageBadAggregate(msg)
return functions
def _make_stats_query(self, sample_filter, groupby, aggregate):
select = [
func.min(models.Sample.timestamp).label('tsmin'),
func.max(models.Sample.timestamp).label('tsmax'),
models.Meter.unit
]
select.extend(self._get_aggregate_functions(aggregate))
session = self._engine_facade.get_session()
if groupby:
group_attributes = []
for g in groupby:
if g != 'resource_metadata.instance_type':
group_attributes.append(getattr(models.Resource, g))
else:
group_attributes.append(
getattr(models.MetaText, 'value')
.label('resource_metadata.instance_type'))
select.extend(group_attributes)
query = (
session.query(*select)
.join(models.Meter,
models.Meter.id == models.Sample.meter_id)
.join(models.Resource,
models.Resource.internal_id == models.Sample.resource_id)
.group_by(models.Meter.unit))
if groupby:
for g in groupby:
if g == 'resource_metadata.instance_type':
query = query.join(
models.MetaText,
models.Resource.internal_id == models.MetaText.id)
query = query.filter(
models.MetaText.meta_key == 'instance_type')
query = query.group_by(*group_attributes)
return make_query_from_filter(session, query, sample_filter)
@staticmethod
def _stats_result_aggregates(result, aggregate):
stats_args = {}
if isinstance(result.count, six.integer_types):
stats_args['count'] = result.count
for attr in ['min', 'max', 'sum', 'avg']:
if hasattr(result, attr):
stats_args[attr] = getattr(result, attr)
if aggregate:
stats_args['aggregate'] = {}
for a in aggregate:
key = '%s%s' % (a.func, '/%s' % a.param if a.param else '')
stats_args['aggregate'][key] = getattr(result, key)
return stats_args
@staticmethod
def _stats_result_to_model(result, period, period_start,
period_end, groupby, aggregate):
stats_args = Connection._stats_result_aggregates(result, aggregate)
stats_args['unit'] = result.unit
duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
if result.tsmin is not None and result.tsmax is not None
else None)
stats_args['duration'] = duration
stats_args['duration_start'] = result.tsmin
stats_args['duration_end'] = result.tsmax
stats_args['period'] = period
stats_args['period_start'] = period_start
stats_args['period_end'] = period_end
stats_args['groupby'] = (dict(
(g, getattr(result, g)) for g in groupby) if groupby else None)
return api_models.Statistics(**stats_args)
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of api_models.Statistics instances.
Items are containing meter statistics described by the query
parameters. The filter must have a meter value set.
"""
if groupby:
for group in groupby:
if group not in ['user_id', 'project_id', 'resource_id',
'resource_metadata.instance_type']:
raise ceilometer.NotImplementedError('Unable to group by '
'these fields')
if not period:
for res in self._make_stats_query(sample_filter,
groupby,
aggregate):
if res.count:
yield self._stats_result_to_model(res, 0,
res.tsmin, res.tsmax,
groupby,
aggregate)
return
if not (sample_filter.start_timestamp and sample_filter.end_timestamp):
res = self._make_stats_query(sample_filter,
None,
aggregate).first()
if not res:
# NOTE(liusheng):The 'res' may be NoneType, because no
# sample has found with sample filter(s).
return
query = self._make_stats_query(sample_filter, groupby, aggregate)
# HACK(jd) This is an awful method to compute stats by period, but
# since we're trying to be SQL agnostic we have to write portable
# code, so here it is, admire! We're going to do one request to get
# stats by period. We would like to use GROUP BY, but there's no
# portable way to manipulate timestamp in SQL, so we can't.
for period_start, period_end in base.iter_period(
sample_filter.start_timestamp or res.tsmin,
sample_filter.end_timestamp or res.tsmax,
period):
q = query.filter(models.Sample.timestamp >= period_start)
q = q.filter(models.Sample.timestamp < period_end)
for r in q.all():
if r.count:
yield self._stats_result_to_model(
result=r,
period=int(timeutils.delta_seconds(period_start,
period_end)),
period_start=period_start,
period_end=period_end,
groupby=groupby,
aggregate=aggregate
)

View File

@ -1,148 +0,0 @@
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the storage API.
"""
from ceilometer.storage import base
class Resource(base.Model):
"""Something for which sample data has been collected."""
def __init__(self, resource_id, project_id,
first_sample_timestamp,
last_sample_timestamp,
source, user_id, metadata):
"""Create a new resource.
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param first_sample_timestamp: first sample timestamp captured
:param last_sample_timestamp: last sample timestamp captured
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
:param metadata: most current metadata for the resource (a dict)
"""
base.Model.__init__(self,
resource_id=resource_id,
first_sample_timestamp=first_sample_timestamp,
last_sample_timestamp=last_sample_timestamp,
project_id=project_id,
source=source,
user_id=user_id,
metadata=metadata,
)
class Meter(base.Model):
"""Definition of a meter for which sample data has been collected."""
def __init__(self, name, type, unit, resource_id, project_id, source,
user_id):
"""Create a new meter.
:param name: name of the meter
:param type: type of the meter (gauge, delta, cumulative)
:param unit: unit of the meter
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
"""
base.Model.__init__(self,
name=name,
type=type,
unit=unit,
resource_id=resource_id,
project_id=project_id,
source=source,
user_id=user_id,
)
class Sample(base.Model):
"""One collected data point."""
def __init__(self,
source,
counter_name, counter_type, counter_unit, counter_volume,
user_id, project_id, resource_id,
timestamp, resource_metadata,
message_id,
message_signature,
recorded_at,
):
"""Create a new sample.
:param source: the identifier for the user/project id definition
:param counter_name: the name of the measurement being taken
:param counter_type: the type of the measurement
:param counter_unit: the units for the measurement
:param counter_volume: the measured value
:param user_id: the user that triggered the measurement
:param project_id: the project that owns the resource
:param resource_id: the thing on which the measurement was taken
:param timestamp: the time of the measurement
:param resource_metadata: extra details about the resource
:param message_id: a message identifier
:param recorded_at: sample record timestamp
:param message_signature: a hash created from the rest of the
message data
"""
base.Model.__init__(self,
source=source,
counter_name=counter_name,
counter_type=counter_type,
counter_unit=counter_unit,
counter_volume=counter_volume,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=timestamp,
resource_metadata=resource_metadata,
message_id=message_id,
message_signature=message_signature,
recorded_at=recorded_at)
class Statistics(base.Model):
"""Computed statistics based on a set of sample data."""
def __init__(self, unit,
period, period_start, period_end,
duration, duration_start, duration_end,
groupby, **data):
"""Create a new statistics object.
:param unit: The unit type of the data set
:param period: The length of the time range covered by these stats
:param period_start: The timestamp for the start of the period
:param period_end: The timestamp for the end of the period
:param duration: The total time for the matching samples
:param duration_start: The earliest time for the matching samples
:param duration_end: The latest time for the matching samples
:param groupby: The fields used to group the samples.
:param data: some or all of the following aggregates
min: The smallest volume found
max: The largest volume found
avg: The average of all volumes found
sum: The total of all volumes found
count: The number of samples found
aggregate: name-value pairs for selectable aggregates
"""
base.Model.__init__(self, unit=unit,
period=period, period_start=period_start,
period_end=period_end, duration=duration,
duration_start=duration_start,
duration_end=duration_end,
groupby=groupby,
**data)

View File

@ -1,590 +0,0 @@
#
# Copyright Ericsson AB 2013. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB backend
"""
import datetime
import time
import weakref
from oslo_log import log
from oslo_utils import netutils
import pymongo
import pymongo.errors
import six
from six.moves.urllib import parse
from ceilometer.i18n import _
ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
LOG = log.getLogger(__name__)
MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4]
COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6]
FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result)
FINALIZE_INT_LAMBDA = lambda result, param=None: int(result)
CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id',
'user_id',
'project_id',
'source'])
def make_timestamp_range(start, end,
start_timestamp_op=None, end_timestamp_op=None):
"""Create the query document to find timestamps within that range.
This is done by given two possible datetimes and their operations.
By default, using $gte for the lower bound and $lt for the upper bound.
"""
ts_range = {}
if start:
if start_timestamp_op == 'gt':
start_timestamp_op = '$gt'
else:
start_timestamp_op = '$gte'
ts_range[start_timestamp_op] = start
if end:
if end_timestamp_op == 'le':
end_timestamp_op = '$lte'
else:
end_timestamp_op = '$lt'
ts_range[end_timestamp_op] = end
return ts_range
def make_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
q = {}
if sample_filter.user:
q['user_id'] = sample_filter.user
if sample_filter.project:
q['project_id'] = sample_filter.project
if sample_filter.meter:
q['counter_name'] = sample_filter.meter
elif require_meter:
raise RuntimeError('Missing required meter specifier')
ts_range = make_timestamp_range(sample_filter.start_timestamp,
sample_filter.end_timestamp,
sample_filter.start_timestamp_op,
sample_filter.end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
if sample_filter.resource:
q['resource_id'] = sample_filter.resource
if sample_filter.source:
q['source'] = sample_filter.source
if sample_filter.message_id:
q['message_id'] = sample_filter.message_id
# so the samples call metadata resource_metadata, so we convert
# to that.
q.update(dict(
('resource_%s' % k, v) for (k, v) in six.iteritems(
improve_keys(sample_filter.metaquery, metaquery=True))))
return q
def quote_key(key, reverse=False):
"""Prepare key for storage data in MongoDB.
:param key: key that should be quoted
:param reverse: boolean, True --- if we need a reverse order of the keys
parts
:return: iter of quoted part of the key
"""
r = -1 if reverse else 1
for k in key.split('.')[::r]:
if k.startswith('$'):
k = parse.quote(k)
yield k
def improve_keys(data, metaquery=False):
"""Improves keys in dict if they contained '.' or started with '$'.
:param data: is a dictionary where keys need to be checked and improved
:param metaquery: boolean, if True dots are not escaped from the keys
:return: improved dictionary if keys contained dots or started with '$':
{'a.b': 'v'} -> {'a': {'b': 'v'}}
{'$ab': 'v'} -> {'%24ab': 'v'}
"""
if not isinstance(data, dict):
return data
if metaquery:
for key in six.iterkeys(data):
if '.$' in key:
key_list = []
for k in quote_key(key):
key_list.append(k)
new_key = '.'.join(key_list)
data[new_key] = data.pop(key)
else:
for key, value in data.items():
if isinstance(value, dict):
improve_keys(value)
if '.' in key:
new_dict = {}
for k in quote_key(key, reverse=True):
new = {}
new[k] = new_dict if new_dict else data.pop(key)
new_dict = new
data.update(new_dict)
else:
if key.startswith('$'):
new_key = parse.quote(key)
data[new_key] = data.pop(key)
return data
def unquote_keys(data):
"""Restores initial view of 'quoted' keys in dictionary data
:param data: is a dictionary
:return: data with restored keys if they were 'quoted'.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, dict):
unquote_keys(value)
if key.startswith('%24'):
k = parse.unquote(key)
data[k] = data.pop(key)
return data
class ConnectionPool(object):
def __init__(self):
self._pool = {}
def connect(self, conf, url):
connection_options = pymongo.uri_parser.parse_uri(url)
del connection_options['database']
del connection_options['username']
del connection_options['password']
del connection_options['collection']
pool_key = tuple(connection_options)
if pool_key in self._pool:
client = self._pool.get(pool_key)()
if client:
return client
splitted_url = netutils.urlsplit(url)
log_data = {'db': splitted_url.scheme,
'nodelist': connection_options['nodelist']}
LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data)
client = self._mongo_connect(conf, url)
self._pool[pool_key] = weakref.ref(client)
return client
@staticmethod
def _mongo_connect(conf, url):
try:
return MongoProxy(conf, pymongo.MongoClient(url))
except pymongo.errors.ConnectionFailure as e:
LOG.warning(_('Unable to connect to the database server: '
'%(errmsg)s.') % {'errmsg': e})
raise
class QueryTransformer(object):
operators = {"<": "$lt",
">": "$gt",
"<=": "$lte",
"=<": "$lte",
">=": "$gte",
"=>": "$gte",
"!=": "$ne",
"in": "$in",
"=~": "$regex"}
complex_operators = {"or": "$or",
"and": "$and"}
ordering_functions = {"asc": pymongo.ASCENDING,
"desc": pymongo.DESCENDING}
def transform_orderby(self, orderby):
orderby_filter = []
for field in orderby:
field_name = list(field.keys())[0]
ordering = self.ordering_functions[list(field.values())[0]]
orderby_filter.append((field_name, ordering))
return orderby_filter
@staticmethod
def _move_negation_to_leaf(condition):
"""Moves every not operator to the leafs.
Moving is going by applying the De Morgan rules and annihilating
double negations.
"""
def _apply_de_morgan(tree, negated_subtree, negated_op):
if negated_op == "and":
new_op = "or"
else:
new_op = "and"
tree[new_op] = [{"not": child}
for child in negated_subtree[negated_op]]
del tree["not"]
def transform(subtree):
op = list(subtree.keys())[0]
if op in ["and", "or"]:
[transform(child) for child in subtree[op]]
elif op == "not":
negated_tree = subtree[op]
negated_op = list(negated_tree.keys())[0]
if negated_op == "and":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "or":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "not":
# two consecutive not annihilates themselves
value = list(negated_tree.values())[0]
new_op = list(value.keys())[0]
subtree[new_op] = negated_tree[negated_op][new_op]
del subtree["not"]
transform(subtree)
transform(condition)
def transform_filter(self, condition):
# in Mongo not operator can only be applied to
# simple expressions so we have to move every
# not operator to the leafs of the expression tree
self._move_negation_to_leaf(condition)
return self._process_json_tree(condition)
def _handle_complex_op(self, complex_op, nodes):
element_list = []
for node in nodes:
element = self._process_json_tree(node)
element_list.append(element)
complex_operator = self.complex_operators[complex_op]
op = {complex_operator: element_list}
return op
def _handle_not_op(self, negated_tree):
# assumes that not is moved to the leaf already
# so we are next to a leaf
negated_op = list(negated_tree.keys())[0]
negated_field = list(negated_tree[negated_op].keys())[0]
value = negated_tree[negated_op][negated_field]
if negated_op == "=":
return {negated_field: {"$ne": value}}
elif negated_op == "!=":
return {negated_field: value}
else:
return {negated_field: {"$not":
{self.operators[negated_op]: value}}}
def _handle_simple_op(self, simple_op, nodes):
field_name = list(nodes.keys())[0]
field_value = list(nodes.values())[0]
# no operator for equal in Mongo
if simple_op == "=":
op = {field_name: field_value}
return op
operator = self.operators[simple_op]
op = {field_name: {operator: field_value}}
return op
def _process_json_tree(self, condition_tree):
operator_node = list(condition_tree.keys())[0]
nodes = list(condition_tree.values())[0]
if operator_node in self.complex_operators:
return self._handle_complex_op(operator_node, nodes)
if operator_node == "not":
negated_tree = condition_tree[operator_node]
return self._handle_not_op(negated_tree)
return self._handle_simple_op(operator_node, nodes)
def safe_mongo_call(call):
def closure(self, *args, **kwargs):
# NOTE(idegtiarov) options max_retries and retry_interval have been
# registered in storage.__init__ in oslo_db.options.set_defaults
# default values for both options are 10.
max_retries = self.conf.database.max_retries
retry_interval = self.conf.database.retry_interval
attempts = 0
while True:
try:
return call(self, *args, **kwargs)
except pymongo.errors.AutoReconnect as err:
if 0 <= max_retries <= attempts:
LOG.error('Unable to reconnect to the primary mongodb '
'after %(retries)d retries. Giving up.' %
{'retries': max_retries})
raise
LOG.warning(_('Unable to reconnect to the primary '
'mongodb: %(errmsg)s. Trying again in '
'%(retry_interval)d seconds.') %
{'errmsg': err, 'retry_interval': retry_interval})
attempts += 1
time.sleep(retry_interval)
return closure
class MongoConn(object):
def __init__(self, conf, method):
self.conf = conf
self.method = method
@safe_mongo_call
def __call__(self, *args, **kwargs):
return self.method(*args, **kwargs)
MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection)
if not typ.startswith('_')])
MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient)
if not typ.startswith('_')]))
MONGO_METHODS.update(set([typ for typ in dir(pymongo)
if not typ.startswith('_')]))
class MongoProxy(object):
def __init__(self, conf, conn):
self.conn = conn
self.conf = conf
def __getitem__(self, item):
"""Create and return proxy around the method in the connection.
:param item: name of the connection
"""
return MongoProxy(self.conf, self.conn[item])
def find(self, *args, **kwargs):
# We need this modifying method to return a CursorProxy object so that
# we can handle the Cursor next function to catch the AutoReconnect
# exception.
return CursorProxy(self.conf, self.conn.find(*args, **kwargs))
def create_index(self, keys, name=None, *args, **kwargs):
try:
self.conn.create_index(keys, name=name, *args, **kwargs)
except pymongo.errors.OperationFailure as e:
if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
LOG.info("Index %s will be recreate.", name)
self._recreate_index(keys, name, *args, **kwargs)
@safe_mongo_call
def _recreate_index(self, keys, name, *args, **kwargs):
self.conn.drop_index(name)
self.conn.create_index(keys, name=name, *args, **kwargs)
def __getattr__(self, item):
"""Wrap MongoDB connection.
If item is the name of an executable method, for example find or
insert, wrap this method in the MongoConn.
Else wrap getting attribute with MongoProxy.
"""
if item in ("conf",):
return super(MongoProxy, self).__getattr__(item)
elif item in ('name', 'database'):
return getattr(self.conn, item)
elif item in MONGO_METHODS:
return MongoConn(self.conf, getattr(self.conn, item))
return MongoProxy(self.conf, getattr(self.conn, item))
def __call__(self, *args, **kwargs):
return self.conn(*args, **kwargs)
class CursorProxy(pymongo.cursor.Cursor):
def __init__(self, conf, cursor):
self.cursor = cursor
self.conf = conf
def __getitem__(self, item):
return self.cursor[item]
@safe_mongo_call
def next(self):
"""Wrap Cursor next method.
This method will be executed before each Cursor next method call.
"""
try:
save_cursor = self.cursor.clone()
return self.cursor.next()
except pymongo.errors.AutoReconnect:
self.cursor = save_cursor
raise
def __getattr__(self, item):
return getattr(self.cursor, item)
class AggregationFields(object):
def __init__(self, version,
group,
project,
finalize=None,
parametrized=False,
validate=None):
self._finalize = finalize or FINALIZE_FLOAT_LAMBDA
self.group = lambda *args: group(*args) if parametrized else group
self.project = (lambda *args: project(*args)
if parametrized else project)
self.version = version
self.validate = validate or (lambda name, param: True)
def finalize(self, name, data, param=None):
field = ("%s" % name) + ("/%s" % param if param else "")
return {field: (self._finalize(data.get(field))
if self._finalize else data.get(field))}
class Aggregation(object):
def __init__(self, name, aggregation_fields):
self.name = name
aggregation_fields = (aggregation_fields
if isinstance(aggregation_fields, list)
else [aggregation_fields])
self.aggregation_fields = sorted(aggregation_fields,
key=lambda af: getattr(af, "version"),
reverse=True)
def _get_compatible_aggregation_field(self, version_array):
if version_array:
version_array = version_array[0:2]
else:
version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION
for aggregation_field in self.aggregation_fields:
if version_array >= aggregation_field.version:
return aggregation_field
def group(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.group(param)
def project(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.project(param)
def finalize(self, data, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.finalize(self.name, data, param)
def validate(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.validate(self.name, param)
SUM_AGGREGATION = Aggregation(
"sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"sum": {"$sum": "$counter_volume"}},
{"sum": "$sum"},
))
AVG_AGGREGATION = Aggregation(
"avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"avg": {"$avg": "$counter_volume"}},
{"avg": "$avg"},
))
MIN_AGGREGATION = Aggregation(
"min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"min": {"$min": "$counter_volume"}},
{"min": "$min"},
))
MAX_AGGREGATION = Aggregation(
"max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"max": {"$max": "$counter_volume"}},
{"max": "$max"},
))
COUNT_AGGREGATION = Aggregation(
"count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"count": {"$sum": 1}},
{"count": "$count"},
FINALIZE_INT_LAMBDA))
STDDEV_AGGREGATION = Aggregation(
"stddev",
AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"std_square": {
"$sum": {
"$multiply": ["$counter_volume",
"$counter_volume"]
}},
"std_count": {"$sum": 1},
"std_sum": {"$sum": "$counter_volume"}},
{"stddev": {
"count": "$std_count",
"sum": "$std_sum",
"square_sum": "$std_square"}},
lambda stddev: ((stddev['square_sum']
* stddev['count']
- stddev["sum"] ** 2) ** 0.5
/ stddev['count'])))
CARDINALITY_AGGREGATION = Aggregation(
"cardinality",
# $cond operator available only in MongoDB 2.6+
[AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION,
lambda field: ({"cardinality/%s" % field:
{"$addToSet": "$%s" % field}}),
lambda field: {
"cardinality/%s" % field: {
"$cond": [
{"$eq": ["$cardinality/%s" % field, None]},
0,
{"$size": "$cardinality/%s" % field}]
}},
validate=CARDINALITY_VALIDATION,
parametrized=True),
AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
lambda field: ({"cardinality/%s" % field:
{"$addToSet": "$%s" % field}}),
lambda field: ({"cardinality/%s" % field:
"$cardinality/%s" % field}),
finalize=len,
validate=CARDINALITY_VALIDATION,
parametrized=True)]
)
def from_unix_timestamp(timestamp):
if (isinstance(timestamp, six.integer_types) or
isinstance(timestamp, float)):
return datetime.datetime.fromtimestamp(timestamp)
return timestamp

View File

@ -1,175 +0,0 @@
#
# Copyright Ericsson AB 2013. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB backend."""
import pymongo
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
COMMON_AVAILABLE_CAPABILITIES = {
'meters': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base Connection class for MongoDB driver."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery=None, limit=None, unique=False):
"""Return an iterable of models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
:param unique: If set to true, return only unique meter information.
"""
if limit == 0:
return
metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
q = {}
if user == 'None':
q['user_id'] = None
elif user is not None:
q['user_id'] = user
if project == 'None':
q['project_id'] = None
elif project is not None:
q['project_id'] = project
if resource == 'None':
q['_id'] = None
elif resource is not None:
q['_id'] = resource
if source is not None:
q['source'] = source
q.update(metaquery)
count = 0
if unique:
meter_names = set()
for r in self.db.resource.find(q):
for r_meter in r['meter']:
if unique:
if r_meter['counter_name'] in meter_names:
continue
else:
meter_names.add(r_meter['counter_name'])
if limit and count >= limit:
return
else:
count += 1
if unique:
yield models.Meter(
name=r_meter['counter_name'],
type=r_meter['counter_type'],
# Return empty string if 'counter_unit' is not valid
# for backward compatibility.
unit=r_meter.get('counter_unit', ''),
resource_id=None,
project_id=None,
source=None,
user_id=None)
else:
yield models.Meter(
name=r_meter['counter_name'],
type=r_meter['counter_type'],
# Return empty string if 'counter_unit' is not valid
# for backward compatibility.
unit=r_meter.get('counter_unit', ''),
resource_id=r['_id'],
project_id=r['project_id'],
source=r['source'],
user_id=r['user_id'])
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return []
q = pymongo_utils.make_query_from_filter(sample_filter,
require_meter=False)
return self._retrieve_samples(q,
[("timestamp", pymongo.DESCENDING)],
limit)
def query_samples(self, filter_expr=None, orderby=None, limit=None):
if limit == 0:
return []
query_filter = {}
orderby_filter = [("timestamp", pymongo.DESCENDING)]
transformer = pymongo_utils.QueryTransformer()
if orderby is not None:
orderby_filter = transformer.transform_orderby(orderby)
if filter_expr is not None:
query_filter = transformer.transform_filter(filter_expr)
return self._retrieve_samples(query_filter, orderby_filter, limit)
def _retrieve_samples(self, query, orderby, limit):
if limit is not None:
samples = self.db.meter.find(query,
limit=limit,
sort=orderby)
else:
samples = self.db.meter.find(query,
sort=orderby)
for s in samples:
# Remove the ObjectId generated by the database when
# the sample was inserted. It is an implementation
# detail that should not leak outside of the driver.
del s['_id']
# Backward compatibility for samples without units
s['counter_unit'] = s.get('counter_unit', '')
# Compatibility with MongoDB 3.+
s['counter_volume'] = float(s.get('counter_volume'))
# Tolerate absence of recorded_at in older datapoints
s['recorded_at'] = s.get('recorded_at')
# Check samples for metadata and "unquote" key if initially it
# was started with '$'.
if s.get('resource_metadata'):
s['resource_metadata'] = pymongo_utils.unquote_keys(
s.get('resource_metadata'))
yield models.Sample(**s)

View File

@ -1,4 +0,0 @@
sqlalchemy-migrate is DEPRECATED.
All new migrations should be written using alembic.
Please see ceilometer/storage/sqlalchemy/alembic/README

View File

@ -1,5 +0,0 @@
#!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False')

View File

@ -1,25 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=ceilometer
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering=False

View File

@ -1,95 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import UniqueConstraint
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table(
'meter', meta,
Column('id', Integer, primary_key=True, index=True),
Column('counter_name', String(255)),
Column('user_id', String(255), index=True),
Column('project_id', String(255), index=True),
Column('resource_id', String(255)),
Column('resource_metadata', String(5000)),
Column('counter_type', String(255)),
Column('counter_volume', Integer),
Column('counter_duration', Integer),
Column('timestamp', DateTime(timezone=False), index=True),
Column('message_signature', String(1000)),
Column('message_id', String(1000)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
resource = Table(
'resource', meta,
Column('id', String(255), primary_key=True, index=True),
Column('resource_metadata', String(5000)),
Column('project_id', String(255), index=True),
Column('received_timestamp', DateTime(timezone=False)),
Column('timestamp', DateTime(timezone=False), index=True),
Column('user_id', String(255), index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
user = Table(
'user', meta,
Column('id', String(255), primary_key=True, index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
project = Table(
'project', meta,
Column('id', String(255), primary_key=True, index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
sourceassoc = Table(
'sourceassoc', meta,
Column('source_id', String(255), index=True),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('resource_id', String(255)),
Column('meter_id', Integer),
Index('idx_su', 'source_id', 'user_id'),
Index('idx_sp', 'source_id', 'project_id'),
Index('idx_sr', 'source_id', 'resource_id'),
Index('idx_sm', 'source_id', 'meter_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
source = Table(
'source', meta,
Column('id', String(255), primary_key=True, index=True),
UniqueConstraint('id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
tables = [meter, project, resource, user, source, sourceassoc]
for i in sorted(tables, key=lambda table: table.fullname):
i.create()

View File

@ -1,23 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
duration = Column('counter_duration', Integer)
meter.drop_column(duration)

View File

@ -1,29 +0,0 @@
# Copyright 2012 Canonical.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
if migrate_engine.name == "mysql":
tables = ['meter', 'user', 'resource', 'project', 'source',
'sourceassoc']
migrate_engine.execute("SET foreign_key_checks = 0")
for table in tables:
migrate_engine.execute(
"ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table)
migrate_engine.execute("SET foreign_key_checks = 1")
migrate_engine.execute(
"ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
migrate_engine.url.database)

View File

@ -1,23 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
unit = Column('counter_unit', String(255))
meter.create_column(unit)

View File

@ -1,24 +0,0 @@
#
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, DateTime
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
resource = Table('resource', meta, autoload=True)
timestamp = Column('timestamp', DateTime)
resource.drop_column(timestamp)
received_timestamp = Column('received_timestamp', DateTime)
resource.drop_column(received_timestamp)

View File

@ -1,25 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
meter.c.counter_volume.alter(type=Float(53))

View File

@ -1,46 +0,0 @@
#
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, Text
from sqlalchemy import Boolean, Integer, String, DateTime, Float
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
alarm = Table(
'alarm', meta,
Column('id', String(255), primary_key=True, index=True),
Column('enabled', Boolean),
Column('name', Text()),
Column('description', Text()),
Column('timestamp', DateTime(timezone=False)),
Column('counter_name', String(255), index=True),
Column('user_id', String(255), index=True),
Column('project_id', String(255), index=True),
Column('comparison_operator', String(2)),
Column('threshold', Float),
Column('statistic', String(255)),
Column('evaluation_periods', Integer),
Column('period', Integer),
Column('state', String(255)),
Column('state_timestamp', DateTime(timezone=False)),
Column('ok_actions', Text()),
Column('alarm_actions', Text()),
Column('insufficient_data_actions', Text()),
Column('matching_metadata', Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
alarm.create()

View File

@ -1,60 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
unique_name = Table(
'unique_name', meta,
Column('id', Integer, primary_key=True),
Column('key', String(32), index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
unique_name.create()
event = Table(
'event', meta,
Column('id', Integer, primary_key=True),
Column('generated', Float(asdecimal=True), index=True),
Column('unique_name_id', Integer, ForeignKey('unique_name.id')),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
event.create()
trait = Table(
'trait', meta,
Column('id', Integer, primary_key=True),
Column('name_id', Integer, ForeignKey('unique_name.id')),
Column('t_type', Integer, index=True),
Column('t_string', String(32), nullable=True, default=None,
index=True),
Column('t_float', Float, nullable=True, default=None, index=True),
Column('t_int', Integer, nullable=True, default=None, index=True),
Column('t_datetime', Float(asdecimal=True), nullable=True,
default=None, index=True),
Column('event_id', Integer, ForeignKey('event.id')),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
trait.create()

View File

@ -1,24 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import VARCHAR
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
name = Table('unique_name', meta, autoload=True)
name.c.key.alter(type=VARCHAR(length=255))
trait = Table('trait', meta, autoload=True)
trait.c.t_string.alter(type=VARCHAR(length=255))

View File

@ -1,23 +0,0 @@
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
meter = sa.Table('meter', meta, autoload=True)
index = sa.Index('idx_meter_rid_cname', meter.c.resource_id,
meter.c.counter_name)
index.create(bind=migrate_engine)

View File

@ -1,37 +0,0 @@
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
# `table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()

View File

@ -1,58 +0,0 @@
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import MetaData, Table
from sqlalchemy.sql.expression import select
TABLES = ['resource', 'sourceassoc', 'user',
'project', 'meter', 'source', 'alarm']
INDEXES = {
"resource": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
"sourceassoc": (('user_id', 'user', 'id'),
('project_id', 'project', 'id'),
('resource_id', 'resource', 'id'),
('meter_id', 'meter', 'id'),
('source_id', 'source', 'id')),
"alarm": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
"meter": (('user_id', 'user', 'id'),
('project_id', 'project', 'id'),
('resource_id', 'resource', 'id'),)
}
def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in TABLES)
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for column, ref_table_name, ref_column_name in indexes:
ref_table = load_tables[ref_table_name]
subq = select([getattr(ref_table.c, ref_column_name)])
sql_del = table.delete().where(
~ getattr(table.c, column).in_(subq))
migrate_engine.execute(sql_del)
params = {'columns': [table.c[column]],
'refcolumns': [ref_table.c[ref_column_name]]}
if migrate_engine.name == 'mysql':
params['name'] = "_".join(('fk', table_name, column))
fkey = ForeignKeyConstraint(**params)
fkey.create()

View File

@ -1,23 +0,0 @@
#
# Copyright 2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
alarm = Table('alarm', meta, autoload=True)
alarm.c.counter_name.alter(name='meter_name')

View File

@ -1,44 +0,0 @@
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset.constraint import UniqueConstraint
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
event = sqlalchemy.Table('event', meta, autoload=True)
message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50))
event.create_column(message_id)
cons = UniqueConstraint('message_id', table=event)
cons.create()
index = sqlalchemy.Index('idx_event_message_id', event.c.message_id)
index.create(bind=migrate_engine)
# Populate the new column ...
trait = sqlalchemy.Table('trait', meta, autoload=True)
unique_name = sqlalchemy.Table('unique_name', meta, autoload=True)
join = trait.join(unique_name, unique_name.c.id == trait.c.name_id)
traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string],
whereclause=(unique_name.c.key == 'message_id'),
from_obj=join)
for event_id, value in traits.execute():
(event.update().where(event.c.id == event_id).values(message_id=value).
execute())
# Leave the Trait, makes the rollback easier and won't really hurt anyone.

View File

@ -1,63 +0,0 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import String, DateTime
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
project = Table('project', meta, autoload=True)
user = Table('user', meta, autoload=True)
alarm_history = Table(
'alarm_history', meta,
Column('event_id', String(255), primary_key=True, index=True),
Column('alarm_id', String(255)),
Column('on_behalf_of', String(255)),
Column('project_id', String(255)),
Column('user_id', String(255)),
Column('type', String(20)),
Column('detail', String(255)),
Column('timestamp', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8')
alarm_history.create()
if migrate_engine.name in ['mysql', 'postgresql']:
indices = [Index('ix_alarm_history_alarm_id',
alarm_history.c.alarm_id),
Index('ix_alarm_history_on_behalf_of',
alarm_history.c.on_behalf_of),
Index('ix_alarm_history_project_id',
alarm_history.c.project_id),
Index('ix_alarm_history_on_user_id',
alarm_history.c.user_id)]
for index in indices:
index.create(migrate_engine)
fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of],
refcolumns=[project.c.id]),
ForeignKeyConstraint(columns=[alarm_history.c.project_id],
refcolumns=[project.c.id]),
ForeignKeyConstraint(columns=[alarm_history.c.user_id],
refcolumns=[user.c.id])]
for fkey in fkeys:
fkey.create(engine=migrate_engine)

View File

@ -1,60 +0,0 @@
#
# Copyright 2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import String, Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('alarm', meta, autoload=True)
type = Column('type', String(50), default='threshold')
type.create(table, populate_default=True)
rule = Column('rule', Text())
rule.create(table)
for row in table.select().execute().fetchall():
query = []
if row.matching_metadata is not None:
matching_metadata = json.loads(row.matching_metadata)
for key in matching_metadata:
query.append({'field': key,
'op': 'eq',
'value': matching_metadata[key]})
rule = {
'meter_name': row.meter_name,
'comparison_operator': row.comparison_operator,
'threshold': row.threshold,
'statistic': row.statistic,
'evaluation_periods': row.evaluation_periods,
'period': row.period,
'query': query
}
table.update().where(table.c.id == row.id).values(rule=rule).execute()
index = Index('ix_alarm_counter_name', table.c.meter_name)
index.drop(bind=migrate_engine)
table.c.meter_name.drop()
table.c.comparison_operator.drop()
table.c.threshold.drop()
table.c.statistic.drop()
table.c.evaluation_periods.drop()
table.c.period.drop()
table.c.matching_metadata.drop()

View File

@ -1,54 +0,0 @@
#
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
_col = 'timestamp'
def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
if index:
sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
meter = sa.Table('meter', meta, autoload=True)
_convert_data_type(meter, _col, sa.DateTime(),
models.PreciseTimestamp(),
pk_attr='id', index=True)

View File

@ -1,26 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
resource = Table('resource', meta, autoload=True)
resource.c.resource_metadata.alter(type=Text)

View File

@ -1,26 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
alm_hist = Table('alarm_history', meta, autoload=True)
alm_hist.c.detail.alter(type=Text)

View File

@ -1,68 +0,0 @@
#
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from ceilometer import utils
tables = [('metadata_text', Text, True),
('metadata_bool', Boolean, False),
('metadata_int', Integer, False),
('metadata_float', Float, False)]
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
meta_tables = {}
for t_name, t_type, t_nullable in tables:
meta_tables[t_name] = Table(
t_name, meta,
Column('id', Integer, ForeignKey('meter.id'), primary_key=True),
Column('meta_key', String(255), index=True, primary_key=True),
Column('value', t_type, nullable=t_nullable),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
meta_tables[t_name].create()
for row in select([meter]).execute():
if row['resource_metadata']:
meter_id = row['id']
rmeta = json.loads(row['resource_metadata'])
for key, v in utils.dict_to_keyval(rmeta):
ins = None
if isinstance(v, six.string_types) or v is None:
ins = meta_tables['metadata_text'].insert()
elif isinstance(v, bool):
ins = meta_tables['metadata_bool'].insert()
elif isinstance(v, six.integer_types):
ins = meta_tables['metadata_int'].insert()
elif isinstance(v, float):
ins = meta_tables['metadata_float'].insert()
if ins is not None:
ins.values(id=meter_id, meta_key=key, value=v).execute()

View File

@ -1,77 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from ceilometer.storage.sqlalchemy import migration
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
event_type = Table(
'event_type', meta,
Column('id', Integer, primary_key=True),
Column('desc', String(255), unique=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
event_type.create()
event = Table('event', meta, autoload=True)
unique_name = Table('unique_name', meta, autoload=True)
# Event type is a specialization of Unique name, so
# we insert into the event_type table all the distinct
# unique names from the event.unique_name field along
# with the key from the unique_name table, and
# then rename the event.unique_name field to event.event_type
conn = migrate_engine.connect()
sql = ("INSERT INTO event_type "
"SELECT unique_name.id, unique_name.key FROM event "
"INNER JOIN unique_name "
"ON event.unique_name_id = unique_name.id "
"GROUP BY unique_name.id")
conn.execute(sql)
conn.close()
# Now we need to drop the foreign key constraint, rename
# the event.unique_name column, and re-add a new foreign
# key constraint
params = {'columns': [event.c.unique_name_id],
'refcolumns': [unique_name.c.id]}
if migrate_engine.name == 'mysql':
params['name'] = "event_ibfk_1"
fkey = ForeignKeyConstraint(**params)
fkey.drop()
Column('event_type_id', Integer).create(event)
# Move data from unique_name_id column into event_type_id column
# and delete the entry from the unique_name table
query = select([event.c.id, event.c.unique_name_id])
for key, value in migration.paged(query):
(event.update().where(event.c.id == key).
values({"event_type_id": value}).execute())
unique_name.delete().where(unique_name.c.id == key).execute()
params = {'columns': [event.c.event_type_id],
'refcolumns': [event_type.c.id]}
if migrate_engine.name == 'mysql':
params['name'] = "_".join(('fk', 'event_type', 'id'))
fkey = ForeignKeyConstraint(**params)
fkey.create()
event.c.unique_name_id.drop()

View File

@ -1,29 +0,0 @@
CREATE TABLE event_type (
id INTEGER PRIMARY KEY ASC,
desc STRING NOT NULL
);
INSERT INTO event_type
SELECT un.id, un.key
FROM unique_name un
JOIN event e ON un.id = e.unique_name_id
GROUP BY un.id;
ALTER TABLE event RENAME TO event_orig;
CREATE TABLE event (
id INTEGER PRIMARY KEY ASC,
generated FLOAT NOT NULL,
message_id VARCHAR(50) UNIQUE,
event_type_id INTEGER NOT NULL,
FOREIGN KEY (event_type_id) REFERENCES event_type (id)
);
INSERT INTO event
SELECT id, generated, message_id, unique_name_id
FROM event_orig;
DROP TABLE event_orig;
DELETE FROM unique_name
WHERE id IN (SELECT id FROM event_type);

View File

@ -1,26 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import BigInteger
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
resource = Table('metadata_int', meta, autoload=True)
resource.c.value.alter(type=BigInteger)

View File

@ -1,86 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import UniqueConstraint
from ceilometer.storage.sqlalchemy import migration
def upgrade(migrate_engine):
meta = MetaData(migrate_engine)
trait_type = Table(
'trait_type', meta,
Column('id', Integer, primary_key=True),
Column('desc', String(255)),
Column('data_type', Integer),
UniqueConstraint('desc', 'data_type', name="tt_unique"),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
trait = Table('trait', meta, autoload=True)
unique_name = Table('unique_name', meta, autoload=True)
trait_type.create(migrate_engine)
# Trait type extracts data from Trait and Unique name.
# We take all trait names from Unique Name, and data types
# from Trait. We then remove dtype and name from trait, and
# remove the name field.
conn = migrate_engine.connect()
sql = ("INSERT INTO trait_type "
"SELECT unique_name.id, unique_name.key, trait.t_type FROM trait "
"INNER JOIN unique_name "
"ON trait.name_id = unique_name.id "
"GROUP BY unique_name.id, unique_name.key, trait.t_type")
conn.execute(sql)
conn.close()
# Now we need to drop the foreign key constraint, rename
# the trait.name column, and re-add a new foreign
# key constraint
params = {'columns': [trait.c.name_id],
'refcolumns': [unique_name.c.id]}
if migrate_engine.name == 'mysql':
params['name'] = "trait_ibfk_1" # foreign key to the unique name table
fkey = ForeignKeyConstraint(**params)
fkey.drop()
Column('trait_type_id', Integer).create(trait)
# Move data from name_id column into trait_type_id column
query = select([trait.c.id, trait.c.name_id])
for key, value in migration.paged(query):
(trait.update().where(trait.c.id == key).
values({"trait_type_id": value}).execute())
trait.c.name_id.drop()
params = {'columns': [trait.c.trait_type_id],
'refcolumns': [trait_type.c.id]}
if migrate_engine.name == 'mysql':
params['name'] = "_".join(('fk', 'trait_type', 'id'))
fkey = ForeignKeyConstraint(**params)
fkey.create()
# Drop the t_type column to data_type.
trait.c.t_type.drop()
# Finally, drop the unique_name table - we don't need it
# anymore.
unique_name.drop()

View File

@ -1,34 +0,0 @@
ALTER TABLE trait RENAME TO trait_orig;
CREATE TABLE trait_type (
id INTEGER PRIMARY KEY ASC,
'desc' STRING NOT NULL,
data_type INTEGER NOT NULL,
UNIQUE ('desc', data_type)
);
INSERT INTO trait_type
SELECT un.id, un.key, t.t_type
FROM unique_name un
JOIN trait_orig t ON un.id = t.name_id
GROUP BY un.id;
CREATE TABLE trait (
id INTEGER PRIMARY KEY ASC,
t_string VARCHAR(255),
t_int INTEGER,
t_float FLOAT,
t_datetime FLOAT,
trait_type_id INTEGER NOT NULL,
event_id INTEGER NOT NULL,
FOREIGN KEY (trait_type_id) REFERENCES trait_type (id)
FOREIGN KEY (event_id) REFERENCES event (id)
);
INSERT INTO trait
SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id,
t.event_id
FROM trait_orig t;
DROP TABLE trait_orig;
DROP TABLE unique_name;

View File

@ -1,56 +0,0 @@
#
# Copyright 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
if index:
sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
_convert_data_type(event, 'generated', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
trait = sa.Table('trait', meta, autoload=True)
_convert_data_type(trait, 't_datetime', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)

View File

@ -1,58 +0,0 @@
#
# Copyright 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
def _convert_data_type(table, col, from_t, to_t, pk_attr='id'):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
to_convert = [
('alarm', 'timestamp', 'id'),
('alarm', 'state_timestamp', 'id'),
('alarm_history', 'timestamp', 'alarm_id'),
]
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
for table_name, col_name, pk_attr in to_convert:
table = sa.Table(table_name, meta, autoload=True)
_convert_data_type(table, col_name, sa.DateTime(),
models.PreciseTimestamp(),
pk_attr=pk_attr)

View File

@ -1,24 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
metadata_float = Table('metadata_float', meta, autoload=True)
metadata_float.c.value.alter(type=Float(53))
trait = Table('trait', meta, autoload=True)
trait.c.t_float.alter(type=Float(53))

View File

@ -1,42 +0,0 @@
#
# Copyright 2014 Intel Crop.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import MetaData, Table
TABLES = ['user', 'project', 'alarm']
INDEXES = {
"alarm": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
}
def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in TABLES)
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for column, ref_table_name, ref_column_name in indexes:
ref_table = load_tables[ref_table_name]
params = {'columns': [table.c[column]],
'refcolumns': [ref_table.c[ref_column_name]]}
if migrate_engine.name == 'mysql':
params['name'] = "_".join(('fk', table_name, column))
fkey = ForeignKeyConstraint(**params)
fkey.drop()

View File

@ -1,138 +0,0 @@
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy as sa
def get_alembic_version(meta):
"""Return Alembic version or None if no Alembic table exists."""
try:
a_ver = sa.Table(
'alembic_version',
meta,
autoload=True)
return sa.select([a_ver.c.version_num]).scalar()
except sa.exc.NoSuchTableError:
return None
def delete_alembic(meta):
try:
sa.Table(
'alembic_version',
meta,
autoload=True).drop(checkfirst=True)
except sa.exc.NoSuchTableError:
pass
INDEXES = (
# ([dialects], table_name, index_name, create/delete, uniq/not_uniq)
(['mysql', 'sqlite', 'postgresql'],
'resource',
'resource_user_id_project_id_key',
('user_id', 'project_id'), True, False, True),
(['mysql'], 'source', 'id', ('id',), False, True, False))
def index_cleanup(meta, table_name, uniq_name, columns,
create, unique, limited):
table = sa.Table(table_name, meta, autoload=True)
if create:
if limited and meta.bind.engine.name == 'mysql':
# For some versions of mysql we can get an error
# "Specified key was too long; max key length is 1000 bytes".
# We should create an index by hand in this case with limited
# length of columns.
columns_mysql = ",".join((c + "(100)" for c in columns))
sql = ("create index %s ON %s (%s)" % (uniq_name, table,
columns_mysql))
meta.bind.engine.execute(sql)
else:
cols = [table.c[col] for col in columns]
sa.Index(uniq_name, *cols, unique=unique).create()
else:
if unique:
migrate.UniqueConstraint(*columns, table=table,
name=uniq_name).drop()
else:
cols = [table.c[col] for col in columns]
sa.Index(uniq_name, *cols).drop()
def change_uniq(meta):
uniq_name = 'uniq_sourceassoc0meter_id0user_id'
columns = ('meter_id', 'user_id')
if meta.bind.engine.name == 'sqlite':
return
sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
meter = sa.Table('meter', meta, autoload=True)
user = sa.Table('user', meta, autoload=True)
if meta.bind.engine.name == 'mysql':
# For mysql dialect all dependent FK should be removed
# before renaming of constraint.
params = {'columns': [sourceassoc.c.meter_id],
'refcolumns': [meter.c.id],
'name': 'fk_sourceassoc_meter_id'}
migrate.ForeignKeyConstraint(**params).drop()
params = {'columns': [sourceassoc.c.user_id],
'refcolumns': [user.c.id],
'name': 'fk_sourceassoc_user_id'}
migrate.ForeignKeyConstraint(**params).drop()
migrate.UniqueConstraint(*columns, table=sourceassoc,
name=uniq_name).create()
if meta.bind.engine.name == 'mysql':
params = {'columns': [sourceassoc.c.meter_id],
'refcolumns': [meter.c.id],
'name': 'fk_sourceassoc_meter_id'}
migrate.ForeignKeyConstraint(**params).create()
params = {'columns': [sourceassoc.c.user_id],
'refcolumns': [user.c.id],
'name': 'fk_sourceassoc_user_id'}
migrate.ForeignKeyConstraint(**params).create()
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
a_ver = get_alembic_version(meta)
if not a_ver:
alarm = sa.Table('alarm', meta, autoload=True)
repeat_act = sa.Column('repeat_actions', sa.Boolean,
server_default=sa.sql.expression.false())
alarm.create_column(repeat_act)
a_ver = '43b1a023dfaa'
if a_ver == '43b1a023dfaa':
meter = sa.Table('meter', meta, autoload=True)
meter.c.resource_metadata.alter(type=sa.Text)
a_ver = '17738166b91'
if a_ver == '17738166b91':
for (engine_names, table_name, uniq_name,
columns, create, uniq, limited) in INDEXES:
if migrate_engine.name in engine_names:
index_cleanup(meta, table_name, uniq_name,
columns, create, uniq, limited)
a_ver = 'b6ae66d05e3'
if a_ver == 'b6ae66d05e3':
change_uniq(meta)
delete_alembic(meta)

View File

@ -1,24 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import sqlalchemy
from ceilometer.storage.sqlalchemy import models
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
meter = sqlalchemy.Table('meter', meta, autoload=True)
c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(),
default=timeutils.utcnow)
meter.create_column(c)

View File

@ -1,110 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy as sa
def _handle_meter_indices(meta):
if meta.bind.engine.name == 'sqlite':
return
resource = sa.Table('resource', meta, autoload=True)
project = sa.Table('project', meta, autoload=True)
user = sa.Table('user', meta, autoload=True)
meter = sa.Table('meter', meta, autoload=True)
indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp),
sa.Index('ix_sample_timestamp', meter.c.timestamp)),
(sa.Index('ix_meter_user_id', meter.c.user_id),
sa.Index('ix_sample_user_id', meter.c.user_id)),
(sa.Index('ix_meter_project_id', meter.c.project_id),
sa.Index('ix_sample_project_id', meter.c.project_id)),
(sa.Index('idx_meter_rid_cname', meter.c.resource_id,
meter.c.counter_name),
sa.Index('idx_sample_rid_cname', meter.c.resource_id,
meter.c.counter_name))]
fk_params = [({'columns': [meter.c.resource_id],
'refcolumns': [resource.c.id]},
'fk_meter_resource_id',
'fk_sample_resource_id'),
({'columns': [meter.c.project_id],
'refcolumns': [project.c.id]},
'fk_meter_project_id',
'fk_sample_project_id'),
({'columns': [meter.c.user_id],
'refcolumns': [user.c.id]},
'fk_meter_user_id',
'fk_sample_user_id')]
for fk in fk_params:
params = fk[0]
if meta.bind.engine.name == 'mysql':
params['name'] = fk[1]
migrate.ForeignKeyConstraint(**params).drop()
for meter_ix, sample_ix in indices:
meter_ix.drop()
sample_ix.create()
for fk in fk_params:
params = fk[0]
if meta.bind.engine.name == 'mysql':
params['name'] = fk[2]
migrate.ForeignKeyConstraint(**params).create()
def _alter_sourceassoc(meta, t_name, ix_name, post_action=False):
if meta.bind.engine.name == 'sqlite':
return
sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
table = sa.Table(t_name, meta, autoload=True)
user = sa.Table('user', meta, autoload=True)
c_name = '%s_id' % t_name
col = getattr(sourceassoc.c, c_name)
uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name
uniq_cols = (c_name, 'user_id')
param = {'columns': [col],
'refcolumns': [table.c.id]}
user_param = {'columns': [sourceassoc.c.user_id],
'refcolumns': [user.c.id]}
if meta.bind.engine.name == 'mysql':
param['name'] = 'fk_sourceassoc_%s' % c_name
user_param['name'] = 'fk_sourceassoc_user_id'
actions = [migrate.ForeignKeyConstraint(**user_param),
migrate.ForeignKeyConstraint(**param),
sa.Index(ix_name, sourceassoc.c.source_id, col),
migrate.UniqueConstraint(*uniq_cols, table=sourceassoc,
name=uniq_name)]
for action in actions:
action.create() if post_action else action.drop()
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
_handle_meter_indices(meta)
meter = sa.Table('meter', meta, autoload=True)
meter.rename('sample')
_alter_sourceassoc(meta, 'meter', 'idx_sm')
sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
sourceassoc.c.meter_id.alter(name='sample_id')
# re-bind metadata to pick up alter name change
meta = sa.MetaData(bind=migrate_engine)
_alter_sourceassoc(meta, 'sample', 'idx_ss', True)

View File

@ -1,87 +0,0 @@
#
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy as sa
def handle_rid_index(meta):
if meta.bind.engine.name == 'sqlite':
return
resource = sa.Table('resource', meta, autoload=True)
sample = sa.Table('sample', meta, autoload=True)
params = {'columns': [sample.c.resource_id],
'refcolumns': [resource.c.id],
'name': 'fk_sample_resource_id'}
if meta.bind.engine.name == 'mysql':
# For mysql dialect all dependent FK should be removed
# before index create/delete
migrate.ForeignKeyConstraint(**params).drop()
index = sa.Index('idx_sample_rid_cname', sample.c.resource_id,
sample.c.counter_name)
index.drop()
if meta.bind.engine.name == 'mysql':
migrate.ForeignKeyConstraint(**params).create()
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
meter = sa.Table(
'meter', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('type', sa.String(255)),
sa.Column('unit', sa.String(255)),
sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
meter.create()
sample = sa.Table('sample', meta, autoload=True)
query = sa.select([sample.c.counter_name, sample.c.counter_type,
sample.c.counter_unit]).distinct()
for row in query.execute():
meter.insert().values(name=row['counter_name'],
type=row['counter_type'],
unit=row['counter_unit']).execute()
meter_id = sa.Column('meter_id', sa.Integer)
meter_id.create(sample)
params = {'columns': [sample.c.meter_id],
'refcolumns': [meter.c.id]}
if migrate_engine.name == 'mysql':
params['name'] = 'fk_sample_meter_id'
if migrate_engine.name != 'sqlite':
migrate.ForeignKeyConstraint(**params).create()
index = sa.Index('ix_meter_name', meter.c.name)
index.create(bind=migrate_engine)
for row in sa.select([meter]).execute():
(sample.update().
where(sa.and_(sample.c.counter_name == row['name'],
sample.c.counter_type == row['type'],
sample.c.counter_unit == row['unit'])).
values({sample.c.meter_id: row['id']}).execute())
handle_rid_index(meta)
sample.c.counter_name.drop()
sample.c.counter_type.drop()
sample.c.counter_unit.drop()
sample.c.counter_volume.alter(name='volume')

View File

@ -1,23 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
alarm = Table('alarm', meta, autoload=True)
time_constraints = Column('time_constraints', Text())
alarm.create_column(time_constraints)

View File

@ -1,21 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
users = Table('alarm', meta, autoload=True)
users.c.id.alter(name='alarm_id')

View File

@ -1,33 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
TABLES_012 = ['resource', 'sourceassoc', 'user',
'project', 'meter', 'source', 'alarm']
TABLES_027 = ['user', 'project', 'alarm']
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
for table_name in TABLES_027:
try:
(sa.Table('dump027_' + table_name, meta, autoload=True).
drop(checkfirst=True))
except sa.exc.NoSuchTableError:
pass
for table_name in TABLES_012:
try:
(sa.Table('dump_' + table_name, meta, autoload=True).
drop(checkfirst=True))
except sa.exc.NoSuchTableError:
pass

View File

@ -1,84 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
import sqlalchemy as sa
TABLES_DROP = ['user', 'project']
TABLES = ['user', 'project', 'sourceassoc', 'sample',
'resource', 'alarm_history']
INDEXES = {
"sample": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
"sourceassoc": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
"resource": (('user_id', 'user', 'id'),
('project_id', 'project', 'id')),
"alarm_history": (('user_id', 'user', 'id'),
('project_id', 'project', 'id'),
('on_behalf_of', 'project', 'id')),
}
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
load_tables = dict((table_name, sa.Table(table_name, meta,
autoload=True))
for table_name in TABLES)
if migrate_engine.name != 'sqlite':
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for column, ref_table_name, ref_column_name in indexes:
ref_table = load_tables[ref_table_name]
params = {'columns': [table.c[column]],
'refcolumns': [ref_table.c[ref_column_name]]}
if (migrate_engine.name == "mysql" and
table_name != 'alarm_history'):
params['name'] = "_".join(('fk', table_name, column))
elif (migrate_engine.name == "postgresql" and
table_name == "sample"):
# The fk contains the old table name
params['name'] = "_".join(('meter', column, 'fkey'))
fkey = ForeignKeyConstraint(**params)
fkey.drop()
sourceassoc = load_tables['sourceassoc']
if migrate_engine.name != 'sqlite':
idx = sa.Index('idx_su', sourceassoc.c.source_id,
sourceassoc.c.user_id)
idx.drop(bind=migrate_engine)
idx = sa.Index('idx_sp', sourceassoc.c.source_id,
sourceassoc.c.project_id)
idx.drop(bind=migrate_engine)
params = {}
if migrate_engine.name == "mysql":
params = {'name': 'uniq_sourceassoc0sample_id'}
uc = UniqueConstraint('sample_id', table=sourceassoc, **params)
uc.create()
params = {}
if migrate_engine.name == "mysql":
params = {'name': 'uniq_sourceassoc0sample_id0user_id'}
uc = UniqueConstraint('sample_id', 'user_id',
table=sourceassoc, **params)
uc.drop()
sourceassoc.c.user_id.drop()
sourceassoc.c.project_id.drop()
for table_name in TABLES_DROP:
sa.Table(table_name, meta, autoload=True).drop()

View File

@ -1,68 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
TABLES = ['sample', 'resource', 'source', 'sourceassoc']
DROP_TABLES = ['resource', 'source', 'sourceassoc']
INDEXES = {
"sample": (('resource_id', 'resource', 'id'),),
"sourceassoc": (('sample_id', 'sample', 'id'),
('resource_id', 'resource', 'id'),
('source_id', 'source', 'id'))
}
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
load_tables = dict((table_name, sa.Table(table_name, meta,
autoload=True))
for table_name in TABLES)
# drop foreign keys
if migrate_engine.name != 'sqlite':
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for column, ref_table_name, ref_column_name in indexes:
ref_table = load_tables[ref_table_name]
params = {'columns': [table.c[column]],
'refcolumns': [ref_table.c[ref_column_name]]}
fk_table_name = table_name
if migrate_engine.name == "mysql":
params['name'] = "_".join(('fk', fk_table_name, column))
elif (migrate_engine.name == "postgresql" and
table_name == 'sample'):
# fk was not renamed in script 030
params['name'] = "_".join(('meter', column, 'fkey'))
fkey = ForeignKeyConstraint(**params)
fkey.drop()
# create source field in sample
sample = load_tables['sample']
sample.create_column(sa.Column('source_id', sa.String(255)))
# move source values to samples
sourceassoc = load_tables['sourceassoc']
query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]).
where(sourceassoc.c.sample_id.isnot(None)))
for sample_id, source_id in migration.paged(query):
(sample.update().where(sample_id == sample.c.id).
values({'source_id': source_id}).execute())
# drop tables
for table_name in DROP_TABLES:
sa.Table(table_name, meta, autoload=True).drop()

View File

@ -1,44 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint
import sqlalchemy as sa
class ForeignKeyHandle(object):
def __init__(self, meta):
sample = sa.Table('sample', meta, autoload=True)
meter = sa.Table('meter', meta, autoload=True)
self.sample_params = {'columns': [sample.c.meter_id],
'refcolumns': [meter.c.id]}
if meta.bind.engine.name == 'mysql':
self.sample_params['name'] = "fk_sample_meter_id"
def __enter__(self):
ForeignKeyConstraint(**self.sample_params).drop()
def __exit__(self, type, value, traceback):
ForeignKeyConstraint(**self.sample_params).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
meta = sa.MetaData(bind=migrate_engine)
sample = sa.Table('sample', meta, autoload=True)
with ForeignKeyHandle(meta):
# remove stray indexes implicitly created by InnoDB
for index in sample.indexes:
if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']:
index.drop()
sa.Index('ix_sample_meter_id', sample.c.meter_id).create()

View File

@ -1,131 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import migrate
from oslo_serialization import jsonutils
import sqlalchemy as sa
m_tables = [('metadata_text', sa.Text, True),
('metadata_bool', sa.Boolean, False),
('metadata_int', sa.BigInteger, False),
('metadata_float', sa.Float(53), False)]
def _migrate_meta_tables(meta, col, new_col, new_fk):
for t_name, t_type, t_nullable in m_tables:
m_table = sa.Table(t_name, meta, autoload=True)
m_table_new = sa.Table(
'%s_new' % t_name, meta,
sa.Column('id', sa.Integer, sa.ForeignKey(new_fk),
primary_key=True),
sa.Column('meta_key', sa.String(255),
primary_key=True),
sa.Column('value', t_type, nullable=t_nullable),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
m_table_new.create()
if m_table.select().scalar() is not None:
m_table_new.insert().from_select(
['id', 'meta_key', 'value'],
sa.select([new_col, m_table.c.meta_key,
m_table.c.value]).where(
col == m_table.c.id).group_by(
new_col, m_table.c.meta_key, m_table.c.value)).execute()
m_table.drop()
if meta.bind.engine.name != 'sqlite':
sa.Index('ix_%s_meta_key' % t_name,
m_table_new.c.meta_key).create()
m_table_new.rename(t_name)
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
resource = sa.Table(
'resource', meta,
sa.Column('internal_id', sa.Integer, primary_key=True),
sa.Column('resource_id', sa.String(255)),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('source_id', sa.String(255)),
sa.Column('resource_metadata', sa.Text),
sa.Column('metadata_hash', sa.String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8')
resource.create()
# copy resource data in to resource table
sample = sa.Table('sample', meta, autoload=True)
sa.Column('metadata_hash', sa.String(32)).create(sample)
for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute():
sample.update().where(sample.c.id == row['id']).values(
{sample.c.metadata_hash:
hashlib.md5(jsonutils.dumps(
row['resource_metadata'],
sort_keys=True)).hexdigest()}).execute()
query = sa.select([sample.c.resource_id, sample.c.user_id,
sample.c.project_id, sample.c.source_id,
sample.c.resource_metadata,
sample.c.metadata_hash]).distinct()
for row in query.execute():
resource.insert().values(
resource_id=row['resource_id'],
user_id=row['user_id'],
project_id=row['project_id'],
source_id=row['source_id'],
resource_metadata=row['resource_metadata'],
metadata_hash=row['metadata_hash']).execute()
# link sample records to new resource records
sa.Column('resource_id_new', sa.Integer).create(sample)
for row in sa.select([resource]).execute():
(sample.update().
where(sa.and_(
sample.c.resource_id == row['resource_id'],
sample.c.user_id == row['user_id'],
sample.c.project_id == row['project_id'],
sample.c.source_id == row['source_id'],
sample.c.metadata_hash == row['metadata_hash'])).
values({sample.c.resource_id_new: row['internal_id']}).execute())
sample.c.resource_id.drop()
sample.c.metadata_hash.drop()
sample.c.resource_id_new.alter(name='resource_id')
# re-bind metadata to pick up alter name change
meta = sa.MetaData(bind=migrate_engine)
sample = sa.Table('sample', meta, autoload=True)
resource = sa.Table('resource', meta, autoload=True)
if migrate_engine.name != 'sqlite':
sa.Index('ix_resource_resource_id', resource.c.resource_id).create()
sa.Index('ix_sample_user_id', sample.c.user_id).drop()
sa.Index('ix_sample_project_id', sample.c.project_id).drop()
sa.Index('ix_sample_resource_id', sample.c.resource_id).create()
sa.Index('ix_sample_meter_id_resource_id',
sample.c.meter_id, sample.c.resource_id).create()
params = {'columns': [sample.c.resource_id],
'refcolumns': [resource.c.internal_id]}
if migrate_engine.name == 'mysql':
params['name'] = 'fk_sample_resource_internal_id'
migrate.ForeignKeyConstraint(**params).create()
sample.c.user_id.drop()
sample.c.project_id.drop()
sample.c.source_id.drop()
sample.c.resource_metadata.drop()
_migrate_meta_tables(meta, sample.c.id, sample.c.resource_id,
'resource.internal_id')

View File

@ -1,56 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE (gordc): this is a copy of 024 migration script which missed pgsql
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
if index:
sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'postgresql':
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
_convert_data_type(event, 'generated', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
trait = sa.Table('trait', meta, autoload=True)
_convert_data_type(trait, 't_datetime', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)

View File

@ -1,24 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
alarm = Table('alarm', meta, autoload=True)
severity = Column('severity', String(50))
alarm.create_column(severity)

View File

@ -1,54 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import models
tables = [('trait_text', sa.String(255), True, 't_string', 1),
('trait_int', sa.Integer, False, 't_int', 2),
('trait_float', sa.Float(53), False, 't_float', 3),
('trait_datetime', models.PreciseTimestamp(),
False, 't_datetime', 4)]
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
trait = sa.Table('trait', meta, autoload=True)
event = sa.Table('event', meta, autoload=True)
trait_type = sa.Table('trait_type', meta, autoload=True)
for t_name, t_type, t_nullable, col_name, __ in tables:
t_table = sa.Table(
t_name, meta,
sa.Column('event_id', sa.Integer,
sa.ForeignKey(event.c.id), primary_key=True),
sa.Column('key', sa.String(255), primary_key=True),
sa.Column('value', t_type, nullable=t_nullable),
sa.Index('ix_%s_event_id_key' % t_name,
'event_id', 'key'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
t_table.create()
query = sa.select(
[trait.c.event_id,
trait_type.c.desc,
trait.c[col_name]]).select_from(
trait.join(trait_type,
trait.c.trait_type_id == trait_type.c.id)).where(
trait.c[col_name] != sa.null())
if query.alias().select().scalar() is not None:
t_table.insert().from_select(
['event_id', 'key', 'value'], query).execute()
trait.drop()
trait_type.drop()

View File

@ -1,21 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
raw = sa.Column('raw', sa.Text)
event.create_column(raw)

View File

@ -1,19 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
# NOTE(gordc): this is a noop script to handle bug1468916
# previous lowering of id length will fail if db contains data longer.
# this skips migration for those failing. the next script will resize
# if this original migration passed.
pass

View File

@ -1,37 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
resource = Table('resource', meta, autoload=True)
resource.c.user_id.alter(type=String(255))
resource.c.project_id.alter(type=String(255))
resource.c.resource_id.alter(type=String(255))
resource.c.source_id.alter(type=String(255))
sample = Table('sample', meta, autoload=True)
sample.c.message_signature.alter(type=String(64))
sample.c.message_id.alter(type=String(128))
alarm = Table('alarm', meta, autoload=True)
alarm.c.alarm_id.alter(type=String(128))
alarm.c.user_id.alter(type=String(255))
alarm.c.project_id.alter(type=String(255))
alarm_history = Table('alarm_history', meta, autoload=True)
alarm_history.c.alarm_id.alter(type=String(128))
alarm_history.c.user_id.alter(type=String(255))
alarm_history.c.project_id.alter(type=String(255))
alarm_history.c.event_id.alter(type=String(128))
alarm_history.c.on_behalf_of.alter(type=String(255))

View File

@ -1,21 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
# Add index on metadata_hash column of resource
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
resource = sa.Table('resource', meta, autoload=True)
index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash)
index.create(bind=migrate_engine)

View File

@ -1,29 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def paged(query, size=1000):
"""Page query results
:param query: the SQLAlchemy query to execute
:param size: the max page size
return: generator with query data
"""
offset = 0
while True:
page = query.offset(offset).limit(size).execute()
if page.rowcount <= 0:
# There are no more rows
break
for row in page:
yield row
offset += size

View File

@ -1,250 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for Ceilometer data.
"""
import hashlib
import json
from oslo_utils import timeutils
import six
from sqlalchemy import (Column, Integer, String, ForeignKey, Index,
UniqueConstraint, BigInteger)
from sqlalchemy import event
from sqlalchemy import Float, Boolean, Text, DateTime
from sqlalchemy.dialects.mysql import DECIMAL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import deferred
from sqlalchemy.orm import relationship
from sqlalchemy.types import TypeDecorator
from ceilometer import utils
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = Text
@staticmethod
def process_bind_param(value, dialect):
if value is not None:
value = json.dumps(value)
return value
@staticmethod
def process_result_value(value, dialect):
if value is not None:
value = json.loads(value)
return value
class PreciseTimestamp(TypeDecorator):
"""Represents a timestamp precise to the microsecond."""
impl = DateTime
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(DECIMAL(precision=20,
scale=6,
asdecimal=True))
return self.impl
@staticmethod
def process_bind_param(value, dialect):
if value is None:
return value
elif dialect.name == 'mysql':
return utils.dt_to_decimal(value)
return value
@staticmethod
def process_result_value(value, dialect):
if value is None:
return value
elif dialect.name == 'mysql':
return utils.decimal_to_dt(value)
return value
_COMMON_TABLE_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"}
class CeilometerBase(object):
"""Base class for Ceilometer Models."""
__table_args__ = _COMMON_TABLE_ARGS
__table_initialized__ = False
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
Base = declarative_base(cls=CeilometerBase)
class MetaText(Base):
"""Metering text metadata."""
__tablename__ = 'metadata_text'
__table_args__ = (
Index('ix_meta_text_key', 'meta_key'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
meta_key = Column(String(255), primary_key=True)
value = Column(Text)
class MetaBool(Base):
"""Metering boolean metadata."""
__tablename__ = 'metadata_bool'
__table_args__ = (
Index('ix_meta_bool_key', 'meta_key'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
meta_key = Column(String(255), primary_key=True)
value = Column(Boolean)
class MetaBigInt(Base):
"""Metering integer metadata."""
__tablename__ = 'metadata_int'
__table_args__ = (
Index('ix_meta_int_key', 'meta_key'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
meta_key = Column(String(255), primary_key=True)
value = Column(BigInteger, default=False)
class MetaFloat(Base):
"""Metering float metadata."""
__tablename__ = 'metadata_float'
__table_args__ = (
Index('ix_meta_float_key', 'meta_key'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
meta_key = Column(String(255), primary_key=True)
value = Column(Float(53), default=False)
class Meter(Base):
"""Meter definition data."""
__tablename__ = 'meter'
__table_args__ = (
UniqueConstraint('name', 'type', 'unit', name='def_unique'),
Index('ix_meter_name', 'name'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
type = Column(String(255))
unit = Column(String(255))
samples = relationship("Sample", backref="meter")
class Resource(Base):
"""Resource data."""
__tablename__ = 'resource'
__table_args__ = (
# TODO(gordc): this should exist but the attribute values we set
# for user/project/source/resource id's are too large
# for an uuid.
# UniqueConstraint('resource_id', 'user_id', 'project_id',
# 'source_id', 'metadata_hash',
# name='res_def_unique'),
Index('ix_resource_resource_id', 'resource_id'),
Index('ix_resource_metadata_hash', 'metadata_hash'),
_COMMON_TABLE_ARGS,
)
internal_id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
source_id = Column(String(255))
resource_id = Column(String(255), nullable=False)
resource_metadata = deferred(Column(JSONEncodedDict()))
metadata_hash = deferred(Column(String(32)))
samples = relationship("Sample", backref="resource")
meta_text = relationship("MetaText", backref="resource",
cascade="all, delete-orphan")
meta_float = relationship("MetaFloat", backref="resource",
cascade="all, delete-orphan")
meta_int = relationship("MetaBigInt", backref="resource",
cascade="all, delete-orphan")
meta_bool = relationship("MetaBool", backref="resource",
cascade="all, delete-orphan")
@event.listens_for(Resource, "before_insert")
def before_insert(mapper, connection, target):
metadata = json.dumps(target.resource_metadata, sort_keys=True)
target.metadata_hash = hashlib.md5(metadata).hexdigest()
class Sample(Base):
"""Metering data."""
__tablename__ = 'sample'
__table_args__ = (
Index('ix_sample_timestamp', 'timestamp'),
Index('ix_sample_resource_id', 'resource_id'),
Index('ix_sample_meter_id', 'meter_id'),
Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id'),
_COMMON_TABLE_ARGS,
)
id = Column(Integer, primary_key=True)
meter_id = Column(Integer, ForeignKey('meter.id'))
resource_id = Column(Integer, ForeignKey('resource.internal_id'))
volume = Column(Float(53))
timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow())
recorded_at = Column(PreciseTimestamp(),
default=lambda: timeutils.utcnow())
message_signature = Column(String(64))
message_id = Column(String(128))
class FullSample(object):
"""A fake model for query samples."""
id = Sample.id
timestamp = Sample.timestamp
message_id = Sample.message_id
message_signature = Sample.message_signature
recorded_at = Sample.recorded_at
counter_name = Meter.name
counter_type = Meter.type
counter_unit = Meter.unit
counter_volume = Sample.volume
resource_id = Resource.resource_id
source_id = Resource.source_id
user_id = Resource.user_id
project_id = Resource.project_id
resource_metadata = Resource.resource_metadata
internal_id = Resource.internal_id

View File

@ -1,131 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import operator
import six
from sqlalchemy import and_
from sqlalchemy import asc
from sqlalchemy import desc
from sqlalchemy import not_
from sqlalchemy import or_
from sqlalchemy.orm import aliased
import ceilometer
from ceilometer.storage.sqlalchemy import models
META_TYPE_MAP = {bool: models.MetaBool,
str: models.MetaText,
six.text_type: models.MetaText,
type(None): models.MetaText,
int: models.MetaBigInt,
float: models.MetaFloat}
if six.PY2:
META_TYPE_MAP[long] = models.MetaBigInt
class QueryTransformer(object):
operators = {"=": operator.eq,
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
"=<": operator.le,
">=": operator.ge,
"=>": operator.ge,
"!=": operator.ne,
"in": lambda field_name, values: field_name.in_(values),
"=~": lambda field, value: field.op("regexp")(value)}
# operators which are different for different dialects
dialect_operators = {'postgresql': {'=~': (lambda field, value:
field.op("~")(value))}}
complex_operators = {"or": or_,
"and": and_,
"not": not_}
ordering_functions = {"asc": asc,
"desc": desc}
def __init__(self, table, query, dialect='mysql'):
self.table = table
self.query = query
self.dialect_name = dialect
def _get_operator(self, op):
return (self.dialect_operators.get(self.dialect_name, {}).get(op)
or self.operators[op])
def _handle_complex_op(self, complex_op, nodes):
op = self.complex_operators[complex_op]
if op == not_:
nodes = [nodes]
element_list = []
for node in nodes:
element = self._transform(node)
element_list.append(element)
return op(*element_list)
def _handle_simple_op(self, simple_op, nodes):
op = self._get_operator(simple_op)
field_name, value = list(nodes.items())[0]
if field_name.startswith('resource_metadata.'):
return self._handle_metadata(op, field_name, value)
else:
return op(getattr(self.table, field_name), value)
def _handle_metadata(self, op, field_name, value):
if op == self.operators["in"]:
raise ceilometer.NotImplementedError('Metadata query with in '
'operator is not implemented')
field_name = field_name[len('resource_metadata.'):]
meta_table = META_TYPE_MAP[type(value)]
meta_alias = aliased(meta_table)
on_clause = and_(self.table.internal_id == meta_alias.id,
meta_alias.meta_key == field_name)
# outer join is needed to support metaquery
# with or operator on non existent metadata field
# see: test_query_non_existing_metadata_with_result
# test case.
self.query = self.query.outerjoin(meta_alias, on_clause)
return op(meta_alias.value, value)
def _transform(self, sub_tree):
operator, nodes = list(sub_tree.items())[0]
if operator in self.complex_operators:
return self._handle_complex_op(operator, nodes)
else:
return self._handle_simple_op(operator, nodes)
def apply_filter(self, expression_tree):
condition = self._transform(expression_tree)
self.query = self.query.filter(condition)
def apply_options(self, orderby, limit):
self._apply_order_by(orderby)
if limit is not None:
self.query = self.query.limit(limit)
def _apply_order_by(self, orderby):
if orderby is not None:
for field in orderby:
attr, order = list(field.items())[0]
ordering_function = self.ordering_functions[order]
self.query = self.query.order_by(ordering_function(
getattr(self.table, attr)))
else:
self.query = self.query.order_by(desc(self.table.timestamp))
def get_query(self):
return self.query

View File

@ -1,214 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
import os
import uuid
import warnings
import fixtures
import mock
import six
from six.moves.urllib import parse as urlparse
import sqlalchemy
from testtools import testcase
from ceilometer import service
from ceilometer import storage
from ceilometer.tests import base as test_base
try:
from ceilometer.tests import mocks
except ImportError:
mocks = None # happybase module is not Python 3 compatible yet
class DBManager(fixtures.Fixture):
def __init__(self, conf, url):
self._url = url
self._conf = conf
class MongoDbManager(DBManager):
def setUp(self):
super(MongoDbManager, self).setUp()
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='.*you must provide a username and password.*')
try:
self.connection = storage.get_connection(self._conf, self.url)
except storage.StorageBadVersion as e:
raise testcase.TestSkipped(six.text_type(e))
@property
def url(self):
return '%(url)s_%(db)s' % {
'url': self._url,
'db': uuid.uuid4().hex
}
class SQLManager(DBManager):
def __init__(self, conf, url):
super(SQLManager, self).__init__(conf, url)
db_name = 'ceilometer_%s' % uuid.uuid4().hex
engine = sqlalchemy.create_engine(url)
conn = engine.connect()
self._create_database(conn, db_name)
conn.close()
engine.dispose()
parsed = list(urlparse.urlparse(url))
parsed[2] = '/' + db_name
self.url = urlparse.urlunparse(parsed)
def setUp(self):
super(SQLManager, self).setUp()
self.connection = storage.get_connection(self._conf, self.url)
class PgSQLManager(SQLManager):
@staticmethod
def _create_database(conn, db_name):
conn.connection.set_isolation_level(0)
conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name)
conn.connection.set_isolation_level(1)
class MySQLManager(SQLManager):
@staticmethod
def _create_database(conn, db_name):
conn.execute('CREATE DATABASE %s;' % db_name)
class HBaseManager(DBManager):
def setUp(self):
super(HBaseManager, self).setUp()
self.connection = storage.get_connection(self._conf, self.url)
# Unique prefix for each test to keep data is distinguished because
# all test data is stored in one table
data_prefix = str(uuid.uuid4().hex)
def table(conn, name):
return mocks.MockHBaseTable(name, conn, data_prefix)
# Mock only real HBase connection, MConnection "table" method
# stays origin.
mock.patch('happybase.Connection.table', new=table).start()
# We shouldn't delete data and tables after each test,
# because it last for too long.
# All tests tables will be deleted in setup-test-env.sh
mock.patch("happybase.Connection.disable_table",
new=mock.MagicMock()).start()
mock.patch("happybase.Connection.delete_table",
new=mock.MagicMock()).start()
mock.patch("happybase.Connection.create_table",
new=mock.MagicMock()).start()
@property
def url(self):
return '%s?table_prefix=%s&table_prefix_separator=%s' % (
self._url,
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"),
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_")
)
class SQLiteManager(DBManager):
def setUp(self):
super(SQLiteManager, self).setUp()
self.url = self._url
self.connection = storage.get_connection(self._conf, self._url)
@six.add_metaclass(test_base.SkipNotImplementedMeta)
class TestBase(test_base.BaseTestCase):
DRIVER_MANAGERS = {
'mongodb': MongoDbManager,
'mysql': MySQLManager,
'postgresql': PgSQLManager,
'sqlite': SQLiteManager,
}
if mocks is not None:
DRIVER_MANAGERS['hbase'] = HBaseManager
def setUp(self):
super(TestBase, self).setUp()
db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace(
"mysql://", "mysql+pymysql://")
engine = urlparse.urlparse(db_url).scheme
# in case some drivers have additional specification, for example:
# PyMySQL will have scheme mysql+pymysql
engine = engine.split('+')[0]
# NOTE(Alexei_987) Shortcut to skip expensive db setUp
test_method = self._get_test_method()
if (hasattr(test_method, '_run_with')
and engine not in test_method._run_with):
raise testcase.TestSkipped(
'Test is not applicable for %s' % engine)
self.CONF = service.prepare_service([], [])
manager = self.DRIVER_MANAGERS.get(engine)
if not manager:
self.skipTest("missing driver manager: %s" % engine)
self.db_manager = manager(self.CONF, db_url)
self.useFixture(self.db_manager)
self.conn = self.db_manager.connection
self.conn.upgrade()
self.useFixture(fixtures.MockPatch('ceilometer.storage.get_connection',
side_effect=self._get_connection))
# Set a default location for the pipeline config file so the
# tests work even if ceilometer is not installed globally on
# the system.
self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline')
def tearDown(self):
self.conn.clear()
self.conn = None
super(TestBase, self).tearDown()
def _get_connection(self, conf, url):
return self.conn
def run_with(*drivers):
"""Used to mark tests that are only applicable for certain db driver.
Skips test if driver is not available.
"""
def decorator(test):
if isinstance(test, type) and issubclass(test, TestBase):
# Decorate all test methods
for attr in dir(test):
value = getattr(test, attr)
if callable(value) and attr.startswith('test_'):
if six.PY3:
value._run_with = drivers
else:
value.__func__._run_with = drivers
else:
test._run_with = drivers
return test
return decorator

View File

@ -1,51 +0,0 @@
#!/bin/bash -xe
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test_hook function in devstack gate.
function generate_testr_results {
if [ -f .testrepository/0 ]; then
sudo .tox/py-functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit
sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit
sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
sudo gzip -9 $BASE/logs/testrepository.subunit
sudo gzip -9 $BASE/logs/testr_results.html
sudo chown $USER:$USER $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
fi
}
export CEILOMETER_DIR="$BASE/new/ceilometer"
# Go to the ceilometer dir
cd $CEILOMETER_DIR
if [[ -z "$STACK_USER" ]]; then
export STACK_USER=stack
fi
sudo chown -R $STACK_USER:stack $CEILOMETER_DIR
# Run tests
echo "Running ceilometer functional test suite"
set +e
# NOTE(ityaptin) Expected a script param which contains a backend name
CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -epy-functional
EXIT_CODE=$?
set -e
# Collect and parse result
generate_testr_results
exit $EXIT_CODE

View File

@ -1,80 +0,0 @@
#
# Copyright 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/direct.py
"""
import datetime
import uuid
from oslo_utils import netutils
from ceilometer.publisher import direct
from ceilometer import sample
from ceilometer.tests import db as tests_db
class TestDirectPublisher(tests_db.TestBase):
resource_id = str(uuid.uuid4())
test_data = [
sample.Sample(
name='alpha',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='beta',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='gamma',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.now().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def test_direct_publisher(self):
"""Test samples are saved."""
self.CONF.set_override('connection', self.db_manager.url,
group='database')
parsed_url = netutils.urlsplit('direct://')
publisher = direct.DirectPublisher(self.CONF, parsed_url)
publisher.publish_samples(self.test_data)
meters = list(self.conn.get_meters(resource=self.resource_id))
names = sorted([meter.name for meter in meters])
self.assertEqual(3, len(meters), 'There should be 3 samples')
self.assertEqual(['alpha', 'beta', 'gamma'], names)

View File

@ -1,94 +0,0 @@
#
# Copyright 2012, 2013 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_hbase.py
.. note::
In order to run the tests against real HBase server set the environment
variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before
running the tests. Make sure the Thrift server is running on that server.
"""
import mock
try:
import happybase # noqa
except ImportError:
import testtools.testcase
raise testtools.testcase.TestSkipped("happybase is needed")
from ceilometer.storage import impl_hbase as hbase
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
class ConnectionTest(tests_db.TestBase):
@tests_db.run_with('hbase')
def test_hbase_connection(self):
class TestConn(object):
def __init__(self, host, port):
self.netloc = '%s:%s' % (host, port)
def open(self):
pass
def get_connection_pool(conf):
return TestConn(conf['host'], conf['port'])
with mock.patch.object(hbase.Connection, '_get_connection_pool',
side_effect=get_connection_pool):
conn = hbase.Connection(self.CONF, 'hbase://test_hbase:9090')
self.assertIsInstance(conn.conn_pool, TestConn)
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'query': {'simple': True,
'metadata': True}},
'resources': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': False}},
'statistics': {'groupby': False,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {
'max': False,
'min': False,
'sum': False,
'avg': False,
'count': False,
'stddev': False,
'cardinality': False}}
},
}
actual_capabilities = hbase.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = hbase.Connection.get_storage_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)

View File

@ -1,29 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_log.py
"""
from oslotest import base
from ceilometer.storage import impl_log
class ConnectionTest(base.BaseTestCase):
@staticmethod
def test_get_connection():
conn = impl_log.Connection(None, None)
conn.record_metering_data({'counter_name': 'test',
'resource_id': __name__,
'counter_volume': 1,
})

View File

@ -1,117 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_mongodb.py
.. note::
In order to run the tests against another MongoDB server set the
environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB
server before running the tests.
"""
from ceilometer.storage import impl_mongodb
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
@tests_db.run_with('mongodb')
class MongoDBConnection(tests_db.TestBase):
def test_connection_pooling(self):
test_conn = impl_mongodb.Connection(self.CONF, self.db_manager.url)
self.assertEqual(self.conn.conn, test_conn.conn)
def test_replica_set(self):
url = self.db_manager._url + '?replicaSet=foobar'
conn = impl_mongodb.Connection(self.CONF, url)
self.assertTrue(conn.conn)
@tests_db.run_with('mongodb')
class IndexTest(tests_db.TestBase):
def _test_ttl_index_absent(self, conn, coll_name, ttl_opt):
# create a fake index and check it is deleted
coll = getattr(conn.db, coll_name)
index_name = '%s_ttl' % coll_name
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
def test_meter_ttl_index_absent(self):
self._test_ttl_index_absent(self.conn, 'meter',
'metering_time_to_live')
def _test_ttl_index_present(self, conn, coll_name, ttl_opt):
coll = getattr(conn.db, coll_name)
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
index_name = '%s_ttl' % coll_name
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
def test_meter_ttl_index_present(self):
self._test_ttl_index_present(self.conn, 'meter',
'metering_time_to_live')
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'query': {'simple': True,
'metadata': True}},
'resources': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
}
actual_capabilities = impl_mongodb.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = (impl_mongodb.Connection.
get_storage_capabilities())
self.assertEqual(expected_capabilities, actual_capabilities)

View File

@ -1,154 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_sqlalchemy.py
.. note::
In order to run the tests against real SQL server set the environment
variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running
the tests.
"""
import datetime
import warnings
import mock
from oslo_db import exception
from oslo_utils import timeutils
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.storage import impl_sqlalchemy
from ceilometer.storage.sqlalchemy import models as sql_models
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.storage \
import test_storage_scenarios as scenarios
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class CeilometerBaseTest(tests_db.TestBase):
def test_ceilometer_base(self):
base = sql_models.CeilometerBase()
base['key'] = 'value'
self.assertEqual('value', base['key'])
@tests_db.run_with('sqlite')
class EngineFacadeTest(tests_db.TestBase):
@mock.patch.object(warnings, 'warn')
def test_no_not_supported_warning(self, mocked):
impl_sqlalchemy.Connection(self.CONF, 'sqlite://')
self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning),
mocked.call_args_list)
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class RelationshipTest(scenarios.DBTestBase):
# Note: Do not derive from SQLAlchemyEngineTestBase, since we
# don't want to automatically inherit all the Meter setup.
@mock.patch.object(timeutils, 'utcnow')
def test_clear_metering_data_meta_tables(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
self.conn.clear_expired_metering_data(3 * 60)
session = self.conn._engine_facade.get_session()
self.assertEqual(5, session.query(sql_models.Sample).count())
resource_ids = (session.query(sql_models.Resource.internal_id)
.group_by(sql_models.Resource.internal_id))
meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
sql_models.MetaBigInt, sql_models.MetaBool]
s = set()
for table in meta_tables:
self.assertEqual(0, (session.query(table)
.filter(~table.id.in_(resource_ids)).count()
))
s.update(session.query(table.id).all())
self.assertEqual(set(resource_ids.all()), s)
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'query': {'simple': True,
'metadata': True}},
'resources': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
}
actual_capabilities = impl_sqlalchemy.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = (impl_sqlalchemy.
Connection.get_storage_capabilities())
self.assertEqual(expected_capabilities, actual_capabilities)
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class FilterQueryTestForMeters(scenarios.DBTestBase):
def prepare_data(self):
self.counters = []
c = sample.Sample(
'volume.size',
'gauge',
'GiB',
5,
user_id=None,
project_id=None,
resource_id='fake_id',
timestamp=datetime.datetime(2012, 9, 25, 10, 30),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
},
source='test',
)
self.counters.append(c)
msg = utils.meter_message_from_counter(
c,
secret='not-so-secret')
self.conn.record_metering_data(msg)
def test_get_meters_by_user(self):
meters = list(self.conn.get_meters(user='None'))
self.assertEqual(1, len(meters))
def test_get_meters_by_project(self):
meters = list(self.conn.get_meters(project='None'))
self.assertEqual(1, len(meters))

View File

@ -1,145 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the mongodb functionality
"""
import copy
import datetime
import mock
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.storage import test_storage_scenarios
@tests_db.run_with('mongodb')
class CompatibilityTest(test_storage_scenarios.DBTestBase):
def prepare_data(self):
def old_record_metering_data(self, data):
received_timestamp = datetime.datetime.utcnow()
self.db.resource.update(
{'_id': data['resource_id']},
{'$set': {'project_id': data['project_id'],
'user_id': data['user_id'],
# Current metadata being used and when it was
# last updated.
'timestamp': data['timestamp'],
'received_timestamp': received_timestamp,
'metadata': data['resource_metadata'],
'source': data['source'],
},
'$addToSet': {'meter': {'counter_name': data['counter_name'],
'counter_type': data['counter_type'],
},
},
},
upsert=True,
)
record = copy.copy(data)
self.db.meter.insert(record)
# Stubout with the old version DB schema, the one w/o 'counter_unit'
with mock.patch.object(self.conn, 'record_metering_data',
side_effect=old_record_metering_data):
self.counters = []
c = sample.Sample(
'volume.size',
'gauge',
'GiB',
5,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 9, 25, 10, 30),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
},
source='test',
)
self.counters.append(c)
msg = utils.meter_message_from_counter(
c,
secret='not-so-secret')
self.conn.record_metering_data(self.conn, msg)
def test_counter_unit(self):
meters = list(self.conn.get_meters())
self.assertEqual(1, len(meters))
@tests_db.run_with('mongodb')
class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase):
def prepare_data(self):
def old_record_metering_data(self, data):
received_timestamp = datetime.datetime.utcnow()
self.db.resource.update(
{'_id': data['resource_id']},
{'$set': {'project_id': data['project_id'],
'user_id': data['user_id'],
# Current metadata being used and when it was
# last updated.
'timestamp': data['timestamp'],
'received_timestamp': received_timestamp,
'metadata': data['resource_metadata'],
'source': data['source'],
},
'$addToSet': {'meter': {'counter_name': data['counter_name'],
'counter_type': data['counter_type'],
},
},
},
upsert=True,
)
record = copy.copy(data)
self.db.meter.insert(record)
# Stubout with the old version DB schema, the one w/o 'counter_unit'
with mock.patch.object(self.conn, 'record_metering_data',
side_effect=old_record_metering_data):
self.counters = []
c = sample.Sample(
'volume.size',
'gauge',
'GiB',
5,
None,
None,
None,
timestamp=datetime.datetime(2012, 9, 25, 10, 30),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
},
source='test',
)
self.counters.append(c)
msg = utils.meter_message_from_counter(
c,
secret='not-so-secret')
self.conn.record_metering_data(self.conn, msg)
def test_get_meters_by_user(self):
meters = list(self.conn.get_meters(user='None'))
self.assertEqual(1, len(meters))
def test_get_meters_by_resource(self):
meters = list(self.conn.get_meters(resource='None'))
self.assertEqual(1, len(meters))
def test_get_meters_by_project(self):
meters = list(self.conn.get_meters(project='None'))
self.assertEqual(1, len(meters))

File diff suppressed because it is too large Load Diff

View File

@ -1,81 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import happybase
class MockHBaseTable(happybase.Table):
def __init__(self, name, connection, data_prefix):
# data_prefix is added to all rows which are written
# in this test. It allows to divide data from different tests
self.data_prefix = data_prefix
# We create happybase Table with prefix from
# CEILOMETER_TEST_HBASE_TABLE_PREFIX
prefix = os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", 'test')
separator = os.getenv(
"CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", '_')
super(MockHBaseTable, self).__init__(
"%s%s%s" % (prefix, separator, name),
connection)
def put(self, row, *args, **kwargs):
row = self.data_prefix + row
return super(MockHBaseTable, self).put(row, *args,
**kwargs)
def scan(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, filter=None, timestamp=None,
include_timestamp=False, batch_size=10, scan_batching=None,
limit=None, sorted_columns=False):
# Add data prefix for row parameters
# row_prefix could not be combined with row_start or row_stop
if not row_start and not row_stop:
row_prefix = self.data_prefix + (row_prefix or "")
row_start = None
row_stop = None
elif row_start and not row_stop:
# Adding data_prefix to row_start and row_stop does not work
# if it looks like row_start = %data_prefix%foo,
# row_stop = %data_prefix, because row_start > row_stop
filter = self._update_filter_row(filter)
row_start = self.data_prefix + row_start
else:
row_start = self.data_prefix + (row_start or "")
row_stop = self.data_prefix + (row_stop or "")
gen = super(MockHBaseTable, self).scan(row_start, row_stop,
row_prefix, columns,
filter, timestamp,
include_timestamp, batch_size,
scan_batching, limit,
sorted_columns)
data_prefix_len = len(self.data_prefix)
# Restore original row format
for row, data in gen:
yield (row[data_prefix_len:], data)
def row(self, row, *args, **kwargs):
row = self.data_prefix + row
return super(MockHBaseTable, self).row(row, *args, **kwargs)
def delete(self, row, *args, **kwargs):
row = self.data_prefix + row
return super(MockHBaseTable, self).delete(row, *args, **kwargs)
def _update_filter_row(self, filter):
if filter:
return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter)
else:
return "PrefixFilter(%s)" % self.data_prefix

View File

@ -1,85 +0,0 @@
#
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslotest import base
from ceilometer.dispatcher import database
from ceilometer.publisher import utils
from ceilometer import service
class TestDispatcherDB(base.BaseTestCase):
def setUp(self):
super(TestDispatcherDB, self).setUp()
self.CONF = service.prepare_service([], [])
self.CONF.set_override('connection', 'sqlite://', group='database')
self.meter_dispatcher = database.MeterDatabaseDispatcher(self.CONF)
def test_valid_message(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg, self.CONF.publisher.telemetry_secret,
)
with mock.patch.object(self.meter_dispatcher.conn,
'record_metering_data') as record_metering_data:
self.meter_dispatcher.record_metering_data(msg)
record_metering_data.assert_called_once_with(msg)
def test_timestamp_conversion(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
'timestamp': '2012-07-02T13:53:40Z',
}
msg['message_signature'] = utils.compute_signature(
msg, self.CONF.publisher.telemetry_secret,
)
expected = msg.copy()
expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40)
with mock.patch.object(self.meter_dispatcher.conn,
'record_metering_data') as record_metering_data:
self.meter_dispatcher.record_metering_data(msg)
record_metering_data.assert_called_once_with(expected)
def test_timestamp_tzinfo_conversion(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
'timestamp': '2012-09-30T15:31:50.262-08:00',
}
msg['message_signature'] = utils.compute_signature(
msg, self.CONF.publisher.telemetry_secret,
)
expected = msg.copy()
expected['timestamp'] = datetime.datetime(2012, 9, 30, 23,
31, 50, 262000)
with mock.patch.object(self.meter_dispatcher.conn,
'record_metering_data') as record_metering_data:
self.meter_dispatcher.record_metering_data(msg)
record_metering_data.assert_called_once_with(expected)

View File

@ -1,42 +0,0 @@
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_config import fixture
from ceilometer import dispatcher
from ceilometer import service
from ceilometer.tests import base
class FakeMeterDispatcher(dispatcher.MeterDispatcherBase):
def record_metering_data(self, data):
pass
class TestDispatchManager(base.BaseTestCase):
def setUp(self):
super(TestDispatchManager, self).setUp()
conf = service.prepare_service([], [])
self.conf = self.useFixture(fixture.Config(conf))
self.conf.config(meter_dispatchers=['database'],
event_dispatchers=['database'])
self.CONF = self.conf.conf
self.useFixture(fixtures.MockPatch(
'ceilometer.dispatcher.database.MeterDatabaseDispatcher',
new=FakeMeterDispatcher))
def test_load(self):
sample_mg, event_mg = dispatcher.load_dispatcher_manager(self.CONF)
self.assertEqual(1, len(list(sample_mg)))

View File

@ -124,7 +124,6 @@ class TestEventEndpoint(tests_base.BaseTestCase):
def setUp(self):
super(TestEventEndpoint, self).setUp()
self.CONF = service.prepare_service([], [])
self.CONF.set_override("connection", "log://", group='database')
self.setup_messaging(self.CONF)
self.useFixture(fixtures.MockPatchObject(

View File

@ -1,96 +0,0 @@
#
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslotest import base
import sqlalchemy
from sqlalchemy.dialects.mysql import DECIMAL
from sqlalchemy.types import NUMERIC
from ceilometer.storage.sqlalchemy import models
from ceilometer import utils
class PreciseTimestampTest(base.BaseTestCase):
@staticmethod
def fake_dialect(name):
def _type_descriptor_mock(desc):
if type(desc) == DECIMAL:
return NUMERIC(precision=desc.precision, scale=desc.scale)
dialect = mock.MagicMock()
dialect.name = name
dialect.type_descriptor = _type_descriptor_mock
return dialect
def setUp(self):
super(PreciseTimestampTest, self).setUp()
self._mysql_dialect = self.fake_dialect('mysql')
self._postgres_dialect = self.fake_dialect('postgres')
self._type = models.PreciseTimestamp()
self._date = datetime.datetime(2012, 7, 2, 10, 44)
def test_load_dialect_impl_mysql(self):
result = self._type.load_dialect_impl(self._mysql_dialect)
self.assertEqual(NUMERIC, type(result))
self.assertEqual(20, result.precision)
self.assertEqual(6, result.scale)
self.assertTrue(result.asdecimal)
def test_load_dialect_impl_postgres(self):
result = self._type.load_dialect_impl(self._postgres_dialect)
self.assertEqual(sqlalchemy.DateTime, type(result))
def test_process_bind_param_store_decimal_mysql(self):
expected = utils.dt_to_decimal(self._date)
result = self._type.process_bind_param(self._date, self._mysql_dialect)
self.assertEqual(expected, result)
def test_process_bind_param_store_datetime_postgres(self):
result = self._type.process_bind_param(self._date,
self._postgres_dialect)
self.assertEqual(self._date, result)
def test_process_bind_param_store_none_mysql(self):
result = self._type.process_bind_param(None, self._mysql_dialect)
self.assertIsNone(result)
def test_process_bind_param_store_none_postgres(self):
result = self._type.process_bind_param(None,
self._postgres_dialect)
self.assertIsNone(result)
def test_process_result_value_datetime_mysql(self):
dec_value = utils.dt_to_decimal(self._date)
result = self._type.process_result_value(dec_value,
self._mysql_dialect)
self.assertEqual(self._date, result)
def test_process_result_value_datetime_postgres(self):
result = self._type.process_result_value(self._date,
self._postgres_dialect)
self.assertEqual(self._date, result)
def test_process_result_value_none_mysql(self):
result = self._type.process_result_value(None,
self._mysql_dialect)
self.assertIsNone(result)
def test_process_result_value_none_postgres(self):
result = self._type.process_result_value(None,
self._postgres_dialect)
self.assertIsNone(result)

Some files were not shown because too many files have changed in this diff Show More